Rename fail! to panic!

https://github.com/rust-lang/rfcs/pull/221

The current terminology of "task failure" often causes problems when
writing or speaking about code. You often want to talk about the
possibility of an operation that returns a Result "failing", but cannot
because of the ambiguity with task failure. Instead, you have to speak
of "the failing case" or "when the operation does not succeed" or other
circumlocutions.

Likewise, we use a "Failure" header in rustdoc to describe when
operations may fail the task, but it would often be helpful to separate
out a section describing the "Err-producing" case.

We have been steadily moving away from task failure and toward Result as
an error-handling mechanism, so we should optimize our terminology
accordingly: Result-producing functions should be easy to describe.

To update your code, rename any call to `fail!` to `panic!` instead.
Assuming you have not created your own macro named `panic!`, this
will work on UNIX based systems:

    grep -lZR 'fail!' . | xargs -0 -l sed -i -e 's/fail!/panic!/g'

You can of course also do this by hand.

[breaking-change]
This commit is contained in:
Steve Klabnik 2014-10-09 15:17:22 -04:00
parent 3bc545373d
commit 7828c3dd28
505 changed files with 1623 additions and 1618 deletions

View file

@ -741,7 +741,7 @@ fn expand_arm(arm: ast::Arm, fld: &mut MacroExpander) -> ast::Arm {
// expand pats... they might contain macro uses:
let expanded_pats = arm.pats.move_map(|pat| fld.fold_pat(pat));
if expanded_pats.len() == 0 {
fail!("encountered match arm with 0 patterns");
panic!("encountered match arm with 0 patterns");
}
// all of the pats must have the same set of bindings, so use the
// first one to extract them and generate new names:
@ -1621,7 +1621,7 @@ mod test {
// good lord, you can't make a path with 0 segments, can you?
let final_varref_ident = match varref.segments.last() {
Some(pathsegment) => pathsegment.identifier,
None => fail!("varref with 0 path segments?")
None => panic!("varref with 0 path segments?")
};
let varref_name = mtwt::resolve(final_varref_ident);
let varref_idents : Vec<ast::Ident>
@ -1688,7 +1688,7 @@ foo_module!()
let cxbinds: &[&ast::Ident] = cxbinds.as_slice();
let cxbind = match cxbinds {
[b] => b,
_ => fail!("expected just one binding for ext_cx")
_ => panic!("expected just one binding for ext_cx")
};
let resolved_binding = mtwt::resolve(*cxbind);
let varrefs = crate_varrefs(&cr);

View file

@ -211,7 +211,7 @@ fn resolve_internal(id: Ident,
resolvedthis
}
}
IllegalCtxt => fail!("expected resolvable context, got IllegalCtxt")
IllegalCtxt => panic!("expected resolvable context, got IllegalCtxt")
}
};
resolve_table.insert(key, resolved);
@ -250,7 +250,7 @@ fn marksof_internal(ctxt: SyntaxContext,
loopvar = tl;
}
}
IllegalCtxt => fail!("expected resolvable context, got IllegalCtxt")
IllegalCtxt => panic!("expected resolvable context, got IllegalCtxt")
}
}
}
@ -261,7 +261,7 @@ pub fn outer_mark(ctxt: SyntaxContext) -> Mrk {
with_sctable(|sctable| {
match (*sctable.table.borrow())[ctxt as uint] {
Mark(mrk, _) => mrk,
_ => fail!("can't retrieve outer mark when outside is not a mark")
_ => panic!("can't retrieve outer mark when outside is not a mark")
}
})
}
@ -342,7 +342,7 @@ mod tests {
sc = tail;
continue;
}
IllegalCtxt => fail!("expected resolvable context, got IllegalCtxt")
IllegalCtxt => panic!("expected resolvable context, got IllegalCtxt")
}
}
}

View file

@ -366,7 +366,7 @@ pub mod rt {
Some(ast) => ast,
None => {
error!("parse error");
fail!()
panic!()
}
}
}
@ -598,7 +598,7 @@ fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
vec!(mk_name(cx, sp, ident.ident())));
}
token::Interpolated(_) => fail!("quote! with interpolated token"),
token::Interpolated(_) => panic!("quote! with interpolated token"),
_ => ()
}
@ -635,7 +635,7 @@ fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
token::Dollar => "Dollar",
token::Underscore => "Underscore",
token::Eof => "Eof",
_ => fail!(),
_ => panic!(),
};
mk_token_path(cx, sp, name)
}
@ -662,7 +662,7 @@ fn mk_tt(cx: &ExtCtxt, _: Span, tt: &ast::TokenTree) -> Vec<P<ast::Stmt>> {
.chain(mk_tt(cx, sp, &close.to_tt()).into_iter())
.collect()
},
ast::TtSequence(..) => fail!("TtSequence in quote!"),
ast::TtSequence(..) => panic!("TtSequence in quote!"),
ast::TtNonterminal(sp, ident) => {
// tt.extend($ident.to_tokens(ext_cx).into_iter())

View file

@ -395,7 +395,7 @@ pub fn parse(sess: &ParseSess,
token::get_ident(name),
token::get_ident(bind))).to_string()
}
_ => fail!()
_ => panic!()
} }).collect::<Vec<String>>().connect(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
@ -421,7 +421,7 @@ pub fn parse(sess: &ParseSess,
parse_nt(&mut rust_parser, name_string.get()))));
ei.idx += 1u;
}
_ => fail!()
_ => panic!()
}
cur_eis.push(ei);

View file

@ -39,7 +39,7 @@ impl<'a> ParserAnyMacro<'a> {
/// silently drop anything. `allow_semi` is so that "optional"
/// semicolons at the end of normal expressions aren't complained
/// about e.g. the semicolon in `macro_rules! kapow( () => {
/// fail!(); } )` doesn't get picked up by .parse_expr(), but it's
/// panic!(); } )` doesn't get picked up by .parse_expr(), but it's
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();