diff --git a/src/librustc/middle/trans/_match.rs b/src/librustc/middle/trans/_match.rs index 084f0ba421e0..86dad3b7c1b2 100644 --- a/src/librustc/middle/trans/_match.rs +++ b/src/librustc/middle/trans/_match.rs @@ -173,11 +173,14 @@ use syntax::ast_util; use syntax::codemap::span; use syntax::print::pprust::pat_to_str; -fn macros() { include!("macros.rs"); } // FIXME(#3114): Macro import/export. +pub fn macros() { + // FIXME(#3114): Macro import/export. + include!("macros.rs"); +} // An option identifying a literal: either a unit-like struct or an // expression. -enum Lit { +pub enum Lit { UnitLikeStructLit(ast::node_id), // the node ID of the pattern ExprLit(@ast::expr), ConstLit(ast::def_id), // the def ID of the constant @@ -185,7 +188,7 @@ enum Lit { // An option identifying a branch (either a literal, a enum variant or a // range) -enum Opt { +pub enum Opt { lit(Lit), var(/* disr val */int, /* variant dids */{enm: def_id, var: def_id}), range(@ast::expr, @ast::expr), @@ -193,7 +196,7 @@ enum Opt { vec_len_ge(uint) } -fn opt_eq(tcx: ty::ctxt, a: &Opt, b: &Opt) -> bool { +pub fn opt_eq(tcx: ty::ctxt, a: &Opt, b: &Opt) -> bool { match (*a, *b) { (lit(a), lit(b)) => { match (a, b) { @@ -240,12 +243,12 @@ fn opt_eq(tcx: ty::ctxt, a: &Opt, b: &Opt) -> bool { } } -enum opt_result { +pub enum opt_result { single_result(Result), lower_bound(Result), range_result(Result, Result), } -fn trans_opt(bcx: block, o: &Opt) -> opt_result { +pub fn trans_opt(bcx: block, o: &Opt) -> opt_result { let _icx = bcx.insn_ctxt("match::trans_opt"); let ccx = bcx.ccx(); let mut bcx = bcx; @@ -279,7 +282,7 @@ fn trans_opt(bcx: block, o: &Opt) -> opt_result { } } -fn variant_opt(tcx: ty::ctxt, pat_id: ast::node_id) -> Opt { +pub fn variant_opt(tcx: ty::ctxt, pat_id: ast::node_id) -> Opt { match tcx.def_map.get(pat_id) { ast::def_variant(enum_id, var_id) => { let variants = ty::enum_variants(tcx, enum_id); @@ -299,7 +302,7 @@ fn variant_opt(tcx: ty::ctxt, pat_id: ast::node_id) -> Opt { } } -enum TransBindingMode { +pub enum TransBindingMode { TrByValue(/*ismove:*/ bool, /*llbinding:*/ ValueRef), TrByRef, TrByImplicitRef @@ -313,27 +316,27 @@ enum TransBindingMode { * - `trmode` is the trans binding mode * - `id` is the node id of the binding * - `ty` is the Rust type of the binding */ -struct BindingInfo { +pub struct BindingInfo { llmatch: ValueRef, trmode: TransBindingMode, id: ast::node_id, ty: ty::t, } -type BindingsMap = HashMap; +pub type BindingsMap = HashMap; -struct ArmData { +pub struct ArmData { bodycx: block, arm: &ast::arm, bindings_map: BindingsMap } -struct Match { +pub struct Match { pats: ~[@ast::pat], data: @ArmData } -fn match_to_str(bcx: block, m: &Match) -> ~str { +pub fn match_to_str(bcx: block, m: &Match) -> ~str { if bcx.sess().verbose() { // for many programs, this just take too long to serialize fmt!("%?", m.pats.map(|p| pat_to_str(*p, bcx.sess().intr()))) @@ -342,11 +345,11 @@ fn match_to_str(bcx: block, m: &Match) -> ~str { } } -fn matches_to_str(bcx: block, m: &[@Match]) -> ~str { +pub fn matches_to_str(bcx: block, m: &[@Match]) -> ~str { fmt!("%?", m.map(|n| match_to_str(bcx, *n))) } -fn has_nested_bindings(m: &[@Match], col: uint) -> bool { +pub fn has_nested_bindings(m: &[@Match], col: uint) -> bool { for vec::each(m) |br| { match br.pats[col].node { ast::pat_ident(_, _, Some(_)) => return true, @@ -356,10 +359,9 @@ fn has_nested_bindings(m: &[@Match], col: uint) -> bool { return false; } -fn expand_nested_bindings(bcx: block, m: &[@Match/&r], - col: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn expand_nested_bindings(bcx: block, m: &[@Match/&r], + col: uint, val: ValueRef) + -> ~[@Match/&r] { debug!("expand_nested_bindings(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -388,9 +390,9 @@ fn expand_nested_bindings(bcx: block, m: &[@Match/&r], } } -type enter_pat = fn(@ast::pat) -> Option<~[@ast::pat]>; +pub type enter_pat = fn(@ast::pat) -> Option<~[@ast::pat]>; -fn assert_is_binding_or_wild(bcx: block, p: @ast::pat) { +pub fn assert_is_binding_or_wild(bcx: block, p: @ast::pat) { if !pat_is_binding_or_wild(bcx.tcx().def_map, p) { bcx.sess().span_bug( p.span, @@ -399,10 +401,9 @@ fn assert_is_binding_or_wild(bcx: block, p: @ast::pat) { } } -fn enter_match(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef, e: enter_pat) - -> ~[@Match/&r] -{ +pub fn enter_match(bcx: block, dm: DefMap, m: &[@Match/&r], + col: uint, val: ValueRef, e: enter_pat) + -> ~[@Match/&r] { debug!("enter_match(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -442,10 +443,9 @@ fn enter_match(bcx: block, dm: DefMap, m: &[@Match/&r], return result; } -fn enter_default(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn enter_default(bcx: block, dm: DefMap, m: &[@Match/&r], + col: uint, val: ValueRef) + -> ~[@Match/&r] { debug!("enter_default(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -487,10 +487,9 @@ fn enter_default(bcx: block, dm: DefMap, m: &[@Match/&r], // so all patterns must either be records (resp. tuples) or // wildcards -fn enter_opt(bcx: block, m: &[@Match/&r], opt: &Opt, col: uint, - variant_size: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn enter_opt(bcx: block, m: &[@Match/&r], opt: &Opt, col: uint, + variant_size: uint, val: ValueRef) + -> ~[@Match/&r] { debug!("enter_opt(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -591,8 +590,13 @@ fn enter_opt(bcx: block, m: &[@Match/&r], opt: &Opt, col: uint, } } -fn enter_rec_or_struct(bcx: block, dm: DefMap, m: &[@Match/&r], col: uint, - fields: ~[ast::ident], val: ValueRef) -> ~[@Match/&r] { +pub fn enter_rec_or_struct(bcx: block, + dm: DefMap, + m: &[@Match/&r], + col: uint, + fields: ~[ast::ident], + val: ValueRef) + -> ~[@Match/&r] { debug!("enter_rec_or_struct(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -621,10 +625,9 @@ fn enter_rec_or_struct(bcx: block, dm: DefMap, m: &[@Match/&r], col: uint, } } -fn enter_tup(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef, n_elts: uint) - -> ~[@Match/&r] -{ +pub fn enter_tup(bcx: block, dm: DefMap, m: &[@Match/&r], + col: uint, val: ValueRef, n_elts: uint) + -> ~[@Match/&r] { debug!("enter_tup(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -646,10 +649,13 @@ fn enter_tup(bcx: block, dm: DefMap, m: &[@Match/&r], } } -fn enter_tuple_struct(bcx: block, dm: DefMap, m: &[@Match/&r], col: uint, - val: ValueRef, n_elts: uint) - -> ~[@Match/&r] -{ +pub fn enter_tuple_struct(bcx: block, + dm: DefMap, + m: &[@Match/&r], + col: uint, + val: ValueRef, + n_elts: uint) + -> ~[@Match/&r] { debug!("enter_tuple_struct(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -669,10 +675,12 @@ fn enter_tuple_struct(bcx: block, dm: DefMap, m: &[@Match/&r], col: uint, } } -fn enter_box(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn enter_box(bcx: block, + dm: DefMap, + m: &[@Match/&r], + col: uint, + val: ValueRef) + -> ~[@Match/&r] { debug!("enter_box(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -694,10 +702,12 @@ fn enter_box(bcx: block, dm: DefMap, m: &[@Match/&r], } } -fn enter_uniq(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn enter_uniq(bcx: block, + dm: DefMap, + m: &[@Match/&r], + col: uint, + val: ValueRef) + -> ~[@Match/&r] { debug!("enter_uniq(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -719,10 +729,12 @@ fn enter_uniq(bcx: block, dm: DefMap, m: &[@Match/&r], } } -fn enter_region(bcx: block, dm: DefMap, m: &[@Match/&r], - col: uint, val: ValueRef) - -> ~[@Match/&r] -{ +pub fn enter_region(bcx: block, + dm: DefMap, + m: &[@Match/&r], + col: uint, + val: ValueRef) + -> ~[@Match/&r] { debug!("enter_region(bcx=%s, m=%s, col=%u, val=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -747,7 +759,7 @@ fn enter_region(bcx: block, dm: DefMap, m: &[@Match/&r], // Returns the options in one column of matches. An option is something that // needs to be conditionally matched at runtime; for example, the discriminant // on a set of enum variants or a literal. -fn get_options(ccx: @crate_ctxt, m: &[@Match], col: uint) -> ~[Opt] { +pub fn get_options(ccx: @crate_ctxt, m: &[@Match], col: uint) -> ~[Opt] { fn add_to_set(tcx: ty::ctxt, set: &DVec, val: Opt) { if set.any(|l| opt_eq(tcx, l, &val)) {return;} set.push(val); @@ -806,11 +818,11 @@ fn get_options(ccx: @crate_ctxt, m: &[@Match], col: uint) -> ~[Opt] { return dvec::unwrap(move found); } -fn extract_variant_args(bcx: block, pat_id: ast::node_id, - vdefs: {enm: def_id, var: def_id}, - val: ValueRef) - -> {vals: ~[ValueRef], bcx: block} -{ +pub fn extract_variant_args(bcx: block, + pat_id: ast::node_id, + vdefs: {enm: def_id, var: def_id}, + val: ValueRef) + -> {vals: ~[ValueRef], bcx: block} { let _icx = bcx.insn_ctxt("match::extract_variant_args"); let ccx = bcx.fcx.ccx; let enum_ty_substs = match ty::get(node_id_type(bcx, pat_id)).sty { @@ -838,10 +850,12 @@ fn extract_variant_args(bcx: block, pat_id: ast::node_id, return {vals: args, bcx: bcx}; } -fn extract_vec_elems(bcx: block, pat_id: ast::node_id, - elem_count: uint, tail: bool, val: ValueRef) - -> {vals: ~[ValueRef], bcx: block} -{ +pub fn extract_vec_elems(bcx: block, + pat_id: ast::node_id, + elem_count: uint, + tail: bool, + val: ValueRef) + -> {vals: ~[ValueRef], bcx: block} { let _icx = bcx.insn_ctxt("match::extract_vec_elems"); let vt = tvec::vec_types(bcx, node_id_type(bcx, pat_id)); let unboxed = load_if_immediate(bcx, val, vt.vec_ty); @@ -874,8 +888,10 @@ fn extract_vec_elems(bcx: block, pat_id: ast::node_id, } // NB: This function does not collect fields from struct-like enum variants. -fn collect_record_or_struct_fields(bcx: block, m: &[@Match], col: uint) -> - ~[ast::ident] { +pub fn collect_record_or_struct_fields(bcx: block, + m: &[@Match], + col: uint) + -> ~[ast::ident] { let mut fields: ~[ast::ident] = ~[]; for vec::each(m) |br| { match /*bad*/copy br.pats[col].node { @@ -901,11 +917,11 @@ fn collect_record_or_struct_fields(bcx: block, m: &[@Match], col: uint) -> } } -fn root_pats_as_necessary(bcx: block, - m: &[@Match], - col: uint, - val: ValueRef) - -> block { +pub fn root_pats_as_necessary(bcx: block, + m: &[@Match], + col: uint, + val: ValueRef) + -> block { let mut bcx = bcx; for vec::each(m) |br| { let pat_id = br.pats[col].id; @@ -945,23 +961,23 @@ macro_rules! any_pat ( ) ) -fn any_box_pat(m: &[@Match], col: uint) -> bool { +pub fn any_box_pat(m: &[@Match], col: uint) -> bool { any_pat!(m, ast::pat_box(_)) } -fn any_uniq_pat(m: &[@Match], col: uint) -> bool { +pub fn any_uniq_pat(m: &[@Match], col: uint) -> bool { any_pat!(m, ast::pat_uniq(_)) } -fn any_region_pat(m: &[@Match], col: uint) -> bool { +pub fn any_region_pat(m: &[@Match], col: uint) -> bool { any_pat!(m, ast::pat_region(_)) } -fn any_tup_pat(m: &[@Match], col: uint) -> bool { +pub fn any_tup_pat(m: &[@Match], col: uint) -> bool { any_pat!(m, ast::pat_tup(_)) } -fn any_tuple_struct_pat(bcx: block, m: &[@Match], col: uint) -> bool { +pub fn any_tuple_struct_pat(bcx: block, m: &[@Match], col: uint) -> bool { vec::any(m, |br| { let pat = br.pats[col]; match pat.node { @@ -976,9 +992,9 @@ fn any_tuple_struct_pat(bcx: block, m: &[@Match], col: uint) -> bool { }) } -type mk_fail = fn@() -> BasicBlockRef; +pub type mk_fail = fn@() -> BasicBlockRef; -fn pick_col(m: &[@Match]) -> uint { +pub fn pick_col(m: &[@Match]) -> uint { fn score(p: @ast::pat) -> uint { match p.node { ast::pat_lit(_) | ast::pat_enum(_, _) | ast::pat_range(_, _) => 1u, @@ -1008,18 +1024,15 @@ fn pick_col(m: &[@Match]) -> uint { return best_col; } -enum branch_kind { no_branch, single, switch, compare, compare_vec_len, } - -impl branch_kind : cmp::Eq { - pure fn eq(&self, other: &branch_kind) -> bool { - ((*self) as uint) == ((*other) as uint) - } - pure fn ne(&self, other: &branch_kind) -> bool { !(*self).eq(other) } -} +#[deriving_eq] +pub enum branch_kind { no_branch, single, switch, compare, compare_vec_len, } // Compiles a comparison between two things. -fn compare_values(cx: block, lhs: ValueRef, rhs: ValueRef, rhs_t: ty::t) -> - Result { +pub fn compare_values(cx: block, + lhs: ValueRef, + rhs: ValueRef, + rhs_t: ty::t) + -> Result { let _icx = cx.insn_ctxt("compare_values"); if ty::type_is_scalar(rhs_t) { let rs = compare_scalar_types(cx, lhs, rhs, rhs_t, ast::eq); @@ -1059,11 +1072,10 @@ fn compare_values(cx: block, lhs: ValueRef, rhs: ValueRef, rhs_t: ty::t) -> } } -fn store_non_ref_bindings(bcx: block, - data: &ArmData, - opt_temp_cleanups: Option<&DVec>) - -> block -{ +pub fn store_non_ref_bindings(bcx: block, + data: &ArmData, + opt_temp_cleanups: Option<&DVec>) + -> block { /*! * * For each copy/move binding, copy the value from the value @@ -1099,9 +1111,9 @@ fn store_non_ref_bindings(bcx: block, return bcx; } -fn insert_lllocals(bcx: block, - data: &ArmData, - add_cleans: bool) -> block { +pub fn insert_lllocals(bcx: block, + data: &ArmData, + add_cleans: bool) -> block { /*! * * For each binding in `data.bindings_map`, adds an appropriate entry into @@ -1139,14 +1151,13 @@ fn insert_lllocals(bcx: block, return bcx; } -fn compile_guard(bcx: block, - guard_expr: @ast::expr, - data: &ArmData, - m: &[@Match], - vals: &[ValueRef], - chk: Option) - -> block -{ +pub fn compile_guard(bcx: block, + guard_expr: @ast::expr, + data: &ArmData, + m: &[@Match], + vals: &[ValueRef], + chk: Option) + -> block { debug!("compile_guard(bcx=%s, guard_expr=%s, m=%s, vals=%?)", bcx.to_str(), bcx.expr_to_str(guard_expr), @@ -1194,11 +1205,10 @@ fn compile_guard(bcx: block, } } -fn compile_submatch(bcx: block, - m: &[@Match], - vals: &[ValueRef], - chk: Option) -{ +pub fn compile_submatch(bcx: block, + m: &[@Match], + vals: &[ValueRef], + chk: Option) { debug!("compile_submatch(bcx=%s, m=%s, vals=%?)", bcx.to_str(), matches_to_str(bcx, m), @@ -1530,21 +1540,21 @@ fn compile_submatch(bcx: block, } } -fn trans_match(bcx: block, - match_expr: @ast::expr, - discr_expr: @ast::expr, - arms: ~[ast::arm], - dest: Dest) -> block { +pub fn trans_match(bcx: block, + match_expr: @ast::expr, + discr_expr: @ast::expr, + arms: ~[ast::arm], + dest: Dest) -> block { let _icx = bcx.insn_ctxt("match::trans_match"); do with_scope(bcx, match_expr.info(), ~"match") |bcx| { trans_match_inner(bcx, discr_expr, arms, dest) } } -fn trans_match_inner(scope_cx: block, - discr_expr: @ast::expr, - arms: &[ast::arm], - dest: Dest) -> block { +pub fn trans_match_inner(scope_cx: block, + discr_expr: @ast::expr, + arms: &[ast::arm], + dest: Dest) -> block { let _icx = scope_cx.insn_ctxt("match::trans_match_inner"); let mut bcx = scope_cx; let tcx = bcx.tcx(); @@ -1659,7 +1669,7 @@ fn trans_match_inner(scope_cx: block, } } -enum IrrefutablePatternBindingMode { +pub enum IrrefutablePatternBindingMode { // Stores the association between node ID and LLVM value in `lllocals`. BindLocal, // Stores the association between node ID and LLVM value in `llargs`. @@ -1667,12 +1677,12 @@ enum IrrefutablePatternBindingMode { } // Not match-related, but similar to the pattern-munging code above -fn bind_irrefutable_pat(bcx: block, - pat: @ast::pat, - val: ValueRef, - make_copy: bool, - binding_mode: IrrefutablePatternBindingMode) - -> block { +pub fn bind_irrefutable_pat(bcx: block, + pat: @ast::pat, + val: ValueRef, + make_copy: bool, + binding_mode: IrrefutablePatternBindingMode) + -> block { let _icx = bcx.insn_ctxt("match::bind_irrefutable_pat"); let ccx = bcx.fcx.ccx; let mut bcx = bcx; diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 7961350a69ec..864ffd5e8202 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -55,6 +55,7 @@ use middle::trans::expr; use middle::trans::foreign; use middle::trans::glue; use middle::trans::inline; +use middle::trans::machine; use middle::trans::meth; use middle::trans::monomorphize; use middle::trans::reachable; @@ -88,7 +89,7 @@ use syntax::visit; use syntax::visit::vt; use syntax::{ast, ast_util, codemap, ast_map}; -struct icx_popper { +pub struct icx_popper { ccx: @crate_ctxt, drop { if self.ccx.sess.count_llvm_insns() { @@ -97,17 +98,17 @@ struct icx_popper { } } -fn icx_popper(ccx: @crate_ctxt) -> icx_popper { +pub fn icx_popper(ccx: @crate_ctxt) -> icx_popper { icx_popper { ccx: ccx } } -trait get_insn_ctxt { +pub trait get_insn_ctxt { fn insn_ctxt(s: &str) -> icx_popper; } -impl @crate_ctxt: get_insn_ctxt { +pub impl @crate_ctxt: get_insn_ctxt { fn insn_ctxt(s: &str) -> icx_popper { debug!("new insn_ctxt: %s", s); if self.sess.count_llvm_insns() { @@ -117,27 +118,27 @@ impl @crate_ctxt: get_insn_ctxt { } } -impl block: get_insn_ctxt { +pub impl block: get_insn_ctxt { fn insn_ctxt(s: &str) -> icx_popper { self.ccx().insn_ctxt(s) } } -impl fn_ctxt: get_insn_ctxt { +pub impl fn_ctxt: get_insn_ctxt { fn insn_ctxt(s: &str) -> icx_popper { self.ccx.insn_ctxt(s) } } -fn log_fn_time(ccx: @crate_ctxt, +name: ~str, start: time::Timespec, - end: time::Timespec) { +pub fn log_fn_time(ccx: @crate_ctxt, +name: ~str, start: time::Timespec, + end: time::Timespec) { let elapsed = 1000 * ((end.sec - start.sec) as int) + ((end.nsec as int) - (start.nsec as int)) / 1000000; ccx.stats.fn_times.push({ident: name, time: elapsed}); } -fn decl_fn(llmod: ModuleRef, name: ~str, cc: lib::llvm::CallConv, - llty: TypeRef) -> ValueRef { +pub fn decl_fn(llmod: ModuleRef, name: ~str, cc: lib::llvm::CallConv, + llty: TypeRef) -> ValueRef { let llfn: ValueRef = str::as_c_str(name, |buf| { unsafe { llvm::LLVMGetOrInsertFunction(llmod, buf, llty) @@ -149,24 +150,25 @@ fn decl_fn(llmod: ModuleRef, name: ~str, cc: lib::llvm::CallConv, return llfn; } -fn decl_cdecl_fn(llmod: ModuleRef, +name: ~str, llty: TypeRef) -> ValueRef { +pub fn decl_cdecl_fn(llmod: ModuleRef, +name: ~str, llty: TypeRef) + -> ValueRef { return decl_fn(llmod, name, lib::llvm::CCallConv, llty); } // Only use this if you are going to actually define the function. It's // not valid to simply declare a function as internal. -fn decl_internal_cdecl_fn(llmod: ModuleRef, +name: ~str, llty: TypeRef) -> +pub fn decl_internal_cdecl_fn(llmod: ModuleRef, +name: ~str, llty: TypeRef) -> ValueRef { let llfn = decl_cdecl_fn(llmod, name, llty); lib::llvm::SetLinkage(llfn, lib::llvm::InternalLinkage); return llfn; } -fn get_extern_fn(externs: HashMap<~str, ValueRef>, - llmod: ModuleRef, - +name: ~str, - cc: lib::llvm::CallConv, - ty: TypeRef) -> ValueRef { +pub fn get_extern_fn(externs: HashMap<~str, ValueRef>, + llmod: ModuleRef, + +name: ~str, + cc: lib::llvm::CallConv, + ty: TypeRef) -> ValueRef { // XXX: Bad copy. if externs.contains_key(copy name) { return externs.get(name); } // XXX: Bad copy. @@ -175,8 +177,8 @@ fn get_extern_fn(externs: HashMap<~str, ValueRef>, return f; } -fn get_extern_const(externs: HashMap<~str, ValueRef>, llmod: ModuleRef, - +name: ~str, ty: TypeRef) -> ValueRef { +pub fn get_extern_const(externs: HashMap<~str, ValueRef>, llmod: ModuleRef, + +name: ~str, ty: TypeRef) -> ValueRef { unsafe { // XXX: Bad copy. if externs.contains_key(copy name) { return externs.get(name); } @@ -188,11 +190,11 @@ fn get_extern_const(externs: HashMap<~str, ValueRef>, llmod: ModuleRef, } } -fn get_simple_extern_fn(cx: block, - externs: HashMap<~str, ValueRef>, - llmod: ModuleRef, - +name: ~str, - n_args: int) -> ValueRef { + fn get_simple_extern_fn(cx: block, + externs: HashMap<~str, ValueRef>, + llmod: ModuleRef, + +name: ~str, + n_args: int) -> ValueRef { let _icx = cx.insn_ctxt("get_simple_extern_fn"); let ccx = cx.fcx.ccx; let inputs = vec::from_elem(n_args as uint, ccx.int_type); @@ -201,8 +203,8 @@ fn get_simple_extern_fn(cx: block, return get_extern_fn(externs, llmod, name, lib::llvm::CCallConv, t); } -fn trans_foreign_call(cx: block, externs: HashMap<~str, ValueRef>, - llmod: ModuleRef, +name: ~str, args: ~[ValueRef]) -> +pub fn trans_foreign_call(cx: block, externs: HashMap<~str, ValueRef>, + llmod: ModuleRef, +name: ~str, args: ~[ValueRef]) -> ValueRef { let _icx = cx.insn_ctxt("trans_foreign_call"); let n = args.len() as int; @@ -211,13 +213,13 @@ fn trans_foreign_call(cx: block, externs: HashMap<~str, ValueRef>, return Call(cx, llforeign, args); } -fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { +pub fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let _icx = cx.insn_ctxt("umax"); let cond = ICmp(cx, lib::llvm::IntULT, a, b); return Select(cx, cond, b, a); } -fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { +pub fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let _icx = cx.insn_ctxt("umin"); let cond = ICmp(cx, lib::llvm::IntULT, a, b); return Select(cx, cond, a, b); @@ -226,7 +228,7 @@ fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { // Given a pointer p, returns a pointer sz(p) (i.e., inc'd by sz bytes). // The type of the returned pointer is always i8*. If you care about the // return type, use bump_ptr(). -fn ptr_offs(bcx: block, base: ValueRef, sz: ValueRef) -> ValueRef { +pub fn ptr_offs(bcx: block, base: ValueRef, sz: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("ptr_offs"); let raw = PointerCast(bcx, base, T_ptr(T_i8())); InBoundsGEP(bcx, raw, ~[sz]) @@ -234,7 +236,7 @@ fn ptr_offs(bcx: block, base: ValueRef, sz: ValueRef) -> ValueRef { // Increment a pointer by a given amount and then cast it to be a pointer // to a given type. -fn bump_ptr(bcx: block, t: ty::t, base: ValueRef, sz: ValueRef) -> +pub fn bump_ptr(bcx: block, t: ty::t, base: ValueRef, sz: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("bump_ptr"); let ccx = bcx.ccx(); @@ -246,9 +248,9 @@ fn bump_ptr(bcx: block, t: ty::t, base: ValueRef, sz: ValueRef) -> // Replacement for the LLVM 'GEP' instruction when field indexing into a enum. // @llblobptr is the data part of a enum value; its actual type // is meaningless, as it will be cast away. -fn GEP_enum(bcx: block, llblobptr: ValueRef, enum_id: ast::def_id, - variant_id: ast::def_id, ty_substs: ~[ty::t], - ix: uint) -> ValueRef { +pub fn GEP_enum(bcx: block, llblobptr: ValueRef, enum_id: ast::def_id, + variant_id: ast::def_id, ty_substs: ~[ty::t], + ix: uint) -> ValueRef { let _icx = bcx.insn_ctxt("GEP_enum"); let ccx = bcx.ccx(); let variant = ty::enum_variant_with_id(ccx.tcx, enum_id, variant_id); @@ -267,9 +269,9 @@ fn GEP_enum(bcx: block, llblobptr: ValueRef, enum_id: ast::def_id, // known. // // The runtime equivalent is box_body() in "rust_internal.h". -fn opaque_box_body(bcx: block, - body_t: ty::t, - boxptr: ValueRef) -> ValueRef { +pub fn opaque_box_body(bcx: block, + body_t: ty::t, + boxptr: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("opaque_box_body"); let ccx = bcx.ccx(); let boxptr = PointerCast(bcx, boxptr, T_ptr(T_box_header(ccx))); @@ -279,10 +281,10 @@ fn opaque_box_body(bcx: block, // malloc_raw_dyn: allocates a box to contain a given type, but with a // potentially dynamic size. -fn malloc_raw_dyn(bcx: block, - t: ty::t, - heap: heap, - size: ValueRef) -> Result { +pub fn malloc_raw_dyn(bcx: block, + t: ty::t, + heap: heap, + size: ValueRef) -> Result { let _icx = bcx.insn_ctxt("malloc_raw"); let ccx = bcx.ccx(); @@ -322,7 +324,7 @@ fn malloc_raw_dyn(bcx: block, * address space 0. Otherwise the resulting (non-box) pointer will be in the * wrong address space and thus be the wrong type. */ -fn non_gc_box_cast(bcx: block, val: ValueRef) -> ValueRef { +pub fn non_gc_box_cast(bcx: block, val: ValueRef) -> ValueRef { unsafe { debug!("non_gc_box_cast"); add_comment(bcx, ~"non_gc_box_cast"); @@ -336,13 +338,13 @@ fn non_gc_box_cast(bcx: block, val: ValueRef) -> ValueRef { // malloc_raw: expects an unboxed type and returns a pointer to // enough space for a box of that type. This includes a rust_opaque_box // header. -fn malloc_raw(bcx: block, t: ty::t, heap: heap) -> Result { +pub fn malloc_raw(bcx: block, t: ty::t, heap: heap) -> Result { malloc_raw_dyn(bcx, t, heap, llsize_of(bcx.ccx(), type_of(bcx.ccx(), t))) } // malloc_general_dyn: usefully wraps malloc_raw_dyn; allocates a box, // and pulls out the body -fn malloc_general_dyn(bcx: block, t: ty::t, heap: heap, size: ValueRef) +pub fn malloc_general_dyn(bcx: block, t: ty::t, heap: heap, size: ValueRef) -> {bcx: block, box: ValueRef, body: ValueRef} { let _icx = bcx.insn_ctxt("malloc_general"); let Result {bcx: bcx, val: llbox} = malloc_raw_dyn(bcx, t, heap, size); @@ -351,27 +353,27 @@ fn malloc_general_dyn(bcx: block, t: ty::t, heap: heap, size: ValueRef) return {bcx: bcx, box: llbox, body: body}; } -fn malloc_general(bcx: block, t: ty::t, heap: heap) +pub fn malloc_general(bcx: block, t: ty::t, heap: heap) -> {bcx: block, box: ValueRef, body: ValueRef} { malloc_general_dyn(bcx, t, heap, llsize_of(bcx.ccx(), type_of(bcx.ccx(), t))) } -fn malloc_boxed(bcx: block, t: ty::t) +pub fn malloc_boxed(bcx: block, t: ty::t) -> {bcx: block, box: ValueRef, body: ValueRef} { malloc_general(bcx, t, heap_shared) } -fn malloc_unique(bcx: block, t: ty::t) +pub fn malloc_unique(bcx: block, t: ty::t) -> {bcx: block, box: ValueRef, body: ValueRef} { malloc_general(bcx, t, heap_exchange) } // Type descriptor and type glue stuff -fn get_tydesc_simple(ccx: @crate_ctxt, t: ty::t) -> ValueRef { +pub fn get_tydesc_simple(ccx: @crate_ctxt, t: ty::t) -> ValueRef { get_tydesc(ccx, t).tydesc } -fn get_tydesc(ccx: @crate_ctxt, t: ty::t) -> @tydesc_info { +pub fn get_tydesc(ccx: @crate_ctxt, t: ty::t) -> @tydesc_info { match ccx.tydescs.find(t) { Some(inf) => inf, _ => { @@ -383,7 +385,7 @@ fn get_tydesc(ccx: @crate_ctxt, t: ty::t) -> @tydesc_info { } } -fn set_no_inline(f: ValueRef) { +pub fn set_no_inline(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, lib::llvm::NoInlineAttribute as c_ulonglong, @@ -391,7 +393,7 @@ fn set_no_inline(f: ValueRef) { } } -fn set_no_unwind(f: ValueRef) { +pub fn set_no_unwind(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, lib::llvm::NoUnwindAttribute as c_ulonglong, @@ -401,7 +403,7 @@ fn set_no_unwind(f: ValueRef) { // Tell LLVM to emit the information necessary to unwind the stack for the // function f. -fn set_uwtable(f: ValueRef) { +pub fn set_uwtable(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, lib::llvm::UWTableAttribute as c_ulonglong, @@ -409,15 +411,15 @@ fn set_uwtable(f: ValueRef) { } } -fn set_inline_hint(f: ValueRef) { +pub fn set_inline_hint(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, lib::llvm::InlineHintAttribute as c_ulonglong, 0u as c_ulonglong); } } -fn set_inline_hint_if_appr(attrs: ~[ast::attribute], - llfn: ValueRef) { +pub fn set_inline_hint_if_appr(attrs: ~[ast::attribute], + llfn: ValueRef) { match attr::find_inline_attr(attrs) { attr::ia_hint => set_inline_hint(llfn), attr::ia_always => set_always_inline(llfn), @@ -426,20 +428,20 @@ fn set_inline_hint_if_appr(attrs: ~[ast::attribute], } } -fn set_always_inline(f: ValueRef) { +pub fn set_always_inline(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, lib::llvm::AlwaysInlineAttribute as c_ulonglong, 0u as c_ulonglong); } } -fn set_custom_stack_growth_fn(f: ValueRef) { +pub fn set_custom_stack_growth_fn(f: ValueRef) { unsafe { llvm::LLVMAddFunctionAttr(f, 0u as c_ulonglong, 1u as c_ulonglong); } } -fn set_glue_inlining(f: ValueRef, t: ty::t) { +pub fn set_glue_inlining(f: ValueRef, t: ty::t) { if ty::type_is_structural(t) { set_no_inline(f); } else { set_always_inline(f); } @@ -447,7 +449,7 @@ fn set_glue_inlining(f: ValueRef, t: ty::t) { // Double-check that we never ask LLVM to declare the same symbol twice. It // silently mangles such symbols, breaking our linkage model. -fn note_unique_llvm_symbol(ccx: @crate_ctxt, +sym: ~str) { +pub fn note_unique_llvm_symbol(ccx: @crate_ctxt, +sym: ~str) { // XXX: Bad copy. if ccx.all_llvm_symbols.contains_key(copy sym) { ccx.sess.bug(~"duplicate LLVM symbol: " + sym); @@ -456,8 +458,8 @@ fn note_unique_llvm_symbol(ccx: @crate_ctxt, +sym: ~str) { } -fn get_res_dtor(ccx: @crate_ctxt, did: ast::def_id, - parent_id: ast::def_id, substs: ~[ty::t]) +pub fn get_res_dtor(ccx: @crate_ctxt, did: ast::def_id, + parent_id: ast::def_id, substs: ~[ty::t]) -> ValueRef { let _icx = ccx.insn_ctxt("trans_res_dtor"); if !substs.is_empty() { @@ -480,7 +482,7 @@ fn get_res_dtor(ccx: @crate_ctxt, did: ast::def_id, } // Structural comparison: a rather involved form of glue. -fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: ~str) { +pub fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: ~str) { if cx.sess.opts.save_temps { let _: () = str::as_c_str(s, |buf| { unsafe { @@ -492,10 +494,10 @@ fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: ~str) { // Used only for creating scalar comparison glue. -enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, } +pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, } -fn compare_scalar_types(cx: block, lhs: ValueRef, rhs: ValueRef, - t: ty::t, op: ast::binop) -> Result { +pub fn compare_scalar_types(cx: block, lhs: ValueRef, rhs: ValueRef, + t: ty::t, op: ast::binop) -> Result { let f = |a| compare_scalar_values(cx, lhs, rhs, a, op); match ty::get(t).sty { @@ -521,8 +523,8 @@ fn compare_scalar_types(cx: block, lhs: ValueRef, rhs: ValueRef, // A helper function to do the actual comparison of scalar values. -fn compare_scalar_values(cx: block, lhs: ValueRef, rhs: ValueRef, - nt: scalar_type, op: ast::binop) -> ValueRef { +pub fn compare_scalar_values(cx: block, lhs: ValueRef, rhs: ValueRef, + nt: scalar_type, op: ast::binop) -> ValueRef { let _icx = cx.insn_ctxt("compare_scalar_values"); fn die(cx: block) -> ! { cx.tcx().sess.bug(~"compare_scalar_values: must be a\ @@ -578,20 +580,20 @@ fn compare_scalar_values(cx: block, lhs: ValueRef, rhs: ValueRef, } } -type val_pair_fn = fn@(block, ValueRef, ValueRef) -> block; -type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> block; +pub type val_pair_fn = fn@(block, ValueRef, ValueRef) -> block; +pub type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> block; -fn load_inbounds(cx: block, p: ValueRef, idxs: &[uint]) -> ValueRef { +pub fn load_inbounds(cx: block, p: ValueRef, idxs: &[uint]) -> ValueRef { return Load(cx, GEPi(cx, p, idxs)); } -fn store_inbounds(cx: block, v: ValueRef, p: ValueRef, idxs: &[uint]) { +pub fn store_inbounds(cx: block, v: ValueRef, p: ValueRef, idxs: &[uint]) { Store(cx, v, GEPi(cx, p, idxs)); } // Iterates through the elements of a structural type. -fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, - f: val_and_ty_fn) -> block { +pub fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, + f: val_and_ty_fn) -> block { let _icx = cx.insn_ctxt("iter_structural_ty"); fn iter_variant(cx: block, a_tup: ValueRef, @@ -691,15 +693,15 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, return cx; } -fn cast_shift_expr_rhs(cx: block, op: ast::binop, - lhs: ValueRef, rhs: ValueRef) -> ValueRef { +pub fn cast_shift_expr_rhs(cx: block, op: ast::binop, + lhs: ValueRef, rhs: ValueRef) -> ValueRef { cast_shift_rhs(op, lhs, rhs, |a,b| Trunc(cx, a, b), |a,b| ZExt(cx, a, b)) } -fn cast_shift_const_rhs(op: ast::binop, - lhs: ValueRef, rhs: ValueRef) -> ValueRef { +pub fn cast_shift_const_rhs(op: ast::binop, + lhs: ValueRef, rhs: ValueRef) -> ValueRef { unsafe { cast_shift_rhs(op, lhs, rhs, |a, b| unsafe { llvm::LLVMConstTrunc(a, b) }, @@ -707,11 +709,11 @@ fn cast_shift_const_rhs(op: ast::binop, } } -fn cast_shift_rhs(op: ast::binop, - lhs: ValueRef, rhs: ValueRef, - trunc: fn(ValueRef, TypeRef) -> ValueRef, - zext: fn(ValueRef, TypeRef) -> ValueRef - ) -> ValueRef { +pub fn cast_shift_rhs(op: ast::binop, + lhs: ValueRef, rhs: ValueRef, + trunc: fn(ValueRef, TypeRef) -> ValueRef, + zext: fn(ValueRef, TypeRef) -> ValueRef) + -> ValueRef { // Shifts may have any size int on the rhs unsafe { if ast_util::is_shift_binop(op) { @@ -734,8 +736,8 @@ fn cast_shift_rhs(op: ast::binop, } } -fn fail_if_zero(cx: block, span: span, divmod: ast::binop, - rhs: ValueRef, rhs_t: ty::t) -> block { +pub fn fail_if_zero(cx: block, span: span, divmod: ast::binop, + rhs: ValueRef, rhs_t: ty::t) -> block { let text = if divmod == ast::div { ~"divide by zero" } else { @@ -760,11 +762,11 @@ fn fail_if_zero(cx: block, span: span, divmod: ast::binop, } } -fn null_env_ptr(bcx: block) -> ValueRef { +pub fn null_env_ptr(bcx: block) -> ValueRef { C_null(T_opaque_box_ptr(bcx.ccx())) } -fn trans_external_path(ccx: @crate_ctxt, did: ast::def_id, t: ty::t) +pub fn trans_external_path(ccx: @crate_ctxt, did: ast::def_id, t: ty::t) -> ValueRef { let name = csearch::get_symbol(ccx.sess.cstore, did); match ty::get(t).sty { @@ -780,8 +782,8 @@ fn trans_external_path(ccx: @crate_ctxt, did: ast::def_id, t: ty::t) }; } -fn get_discrim_val(cx: @crate_ctxt, span: span, enum_did: ast::def_id, - variant_did: ast::def_id) -> ValueRef { +pub fn get_discrim_val(cx: @crate_ctxt, span: span, enum_did: ast::def_id, + variant_did: ast::def_id) -> ValueRef { // Can't use `discrims` from the crate context here because // those discriminants have an extra level of indirection, // and there's no LLVM constant load instruction. @@ -804,7 +806,7 @@ fn get_discrim_val(cx: @crate_ctxt, span: span, enum_did: ast::def_id, } } -fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef { +pub fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef { unsafe { let _icx = ccx.insn_ctxt("lookup_discriminant"); match ccx.discrims.find(vid) { @@ -825,7 +827,7 @@ fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef { } } -fn invoke(bcx: block, llfn: ValueRef, +llargs: ~[ValueRef]) -> block { +pub fn invoke(bcx: block, llfn: ValueRef, +llargs: ~[ValueRef]) -> block { let _icx = bcx.insn_ctxt("invoke_"); if bcx.unreachable { return bcx; } if need_invoke(bcx) { @@ -840,7 +842,7 @@ fn invoke(bcx: block, llfn: ValueRef, +llargs: ~[ValueRef]) -> block { } } -fn need_invoke(bcx: block) -> bool { +pub fn need_invoke(bcx: block) -> bool { if (bcx.ccx().sess.opts.debugging_opts & session::no_landing_pads != 0) { return false; } @@ -878,7 +880,7 @@ fn need_invoke(bcx: block) -> bool { } } -fn have_cached_lpad(bcx: block) -> bool { +pub fn have_cached_lpad(bcx: block) -> bool { let mut res = false; do in_lpad_scope_cx(bcx) |inf| { match inf.landing_pad { @@ -889,7 +891,7 @@ fn have_cached_lpad(bcx: block) -> bool { return res; } -fn in_lpad_scope_cx(bcx: block, f: fn(scope_info)) { +pub fn in_lpad_scope_cx(bcx: block, f: fn(scope_info)) { let mut bcx = bcx; loop { match bcx.kind { @@ -904,7 +906,7 @@ fn in_lpad_scope_cx(bcx: block, f: fn(scope_info)) { } } -fn get_landing_pad(bcx: block) -> BasicBlockRef { +pub fn get_landing_pad(bcx: block) -> BasicBlockRef { let _icx = bcx.insn_ctxt("get_landing_pad"); let mut cached = None, pad_bcx = bcx; // Guaranteed to be set below @@ -964,10 +966,10 @@ fn get_landing_pad(bcx: block) -> BasicBlockRef { // block, so an SSA value that is valid in the inner block may not be valid in // the outer block. In fact, the inner block may not even execute. Rather // than generate the full SSA form, we just use an alloca'd value. -fn add_root_cleanup(bcx: block, - root_info: RootInfo, - root_loc: ValueRef, - ty: ty::t) { +pub fn add_root_cleanup(bcx: block, + root_info: RootInfo, + root_loc: ValueRef, + ty: ty::t) { debug!("add_root_cleanup(bcx=%s, \ scope=%d, \ @@ -1006,7 +1008,7 @@ fn add_root_cleanup(bcx: block, } } -fn do_spill(bcx: block, v: ValueRef, t: ty::t) -> ValueRef { +pub fn do_spill(bcx: block, v: ValueRef, t: ty::t) -> ValueRef { if ty::type_is_bot(t) { return C_null(T_ptr(T_i8())); } @@ -1020,25 +1022,25 @@ fn do_spill(bcx: block, v: ValueRef, t: ty::t) -> ValueRef { // [Note-arg-mode] // ++ mode is temporary, due to how borrowck treats enums. With hope, // will go away anyway when we get rid of modes. -fn do_spill_noroot(++cx: block, v: ValueRef) -> ValueRef { +pub fn do_spill_noroot(++cx: block, v: ValueRef) -> ValueRef { let llptr = alloca(cx, val_ty(v)); Store(cx, v, llptr); return llptr; } -fn spill_if_immediate(cx: block, v: ValueRef, t: ty::t) -> ValueRef { +pub fn spill_if_immediate(cx: block, v: ValueRef, t: ty::t) -> ValueRef { let _icx = cx.insn_ctxt("spill_if_immediate"); if ty::type_is_immediate(t) { return do_spill(cx, v, t); } return v; } -fn load_if_immediate(cx: block, v: ValueRef, t: ty::t) -> ValueRef { +pub fn load_if_immediate(cx: block, v: ValueRef, t: ty::t) -> ValueRef { let _icx = cx.insn_ctxt("load_if_immediate"); if ty::type_is_immediate(t) { return Load(cx, v); } return v; } -fn trans_trace(bcx: block, sp_opt: Option, +trace_str: ~str) { +pub fn trans_trace(bcx: block, sp_opt: Option, +trace_str: ~str) { if !bcx.sess().trace() { return; } let _icx = bcx.insn_ctxt("trans_trace"); // XXX: Bad copy. @@ -1063,18 +1065,18 @@ fn trans_trace(bcx: block, sp_opt: Option, +trace_str: ~str) { Call(bcx, ccx.upcalls.trace, args); } -fn build_return(bcx: block) { +pub fn build_return(bcx: block) { let _icx = bcx.insn_ctxt("build_return"); Br(bcx, bcx.fcx.llreturn); } -fn ignore_lhs(_bcx: block, local: @ast::local) -> bool { +pub fn ignore_lhs(_bcx: block, local: @ast::local) -> bool { match local.node.pat.node { ast::pat_wild => true, _ => false } } -fn init_local(bcx: block, local: @ast::local) -> block { +pub fn init_local(bcx: block, local: @ast::local) -> block { debug!("init_local(bcx=%s, local.id=%?)", bcx.to_str(), local.node.id); @@ -1126,7 +1128,7 @@ fn init_local(bcx: block, local: @ast::local) -> block { _match::BindLocal); } -fn trans_stmt(cx: block, s: ast::stmt) -> block { +pub fn trans_stmt(cx: block, s: ast::stmt) -> block { let _icx = cx.insn_ctxt("trans_stmt"); debug!("trans_stmt(%s)", stmt_to_str(s, cx.tcx().sess.intr())); @@ -1162,8 +1164,8 @@ fn trans_stmt(cx: block, s: ast::stmt) -> block { // You probably don't want to use this one. See the // next three functions instead. -fn new_block(cx: fn_ctxt, parent: Option, +kind: block_kind, - is_lpad: bool, +name: ~str, opt_node_info: Option) +pub fn new_block(cx: fn_ctxt, parent: Option, +kind: block_kind, + is_lpad: bool, +name: ~str, opt_node_info: Option) -> block { let s = if cx.ccx.sess.opts.save_temps || cx.ccx.sess.opts.debuginfo { @@ -1188,7 +1190,7 @@ fn new_block(cx: fn_ctxt, parent: Option, +kind: block_kind, } } -fn simple_block_scope() -> block_kind { +pub fn simple_block_scope() -> block_kind { block_scope(scope_info { loop_break: None, loop_label: None, @@ -1199,20 +1201,24 @@ fn simple_block_scope() -> block_kind { } // Use this when you're at the top block of a function or the like. -fn top_scope_block(fcx: fn_ctxt, opt_node_info: Option) -> block { +pub fn top_scope_block(fcx: fn_ctxt, opt_node_info: Option) + -> block { return new_block(fcx, None, simple_block_scope(), false, ~"function top level", opt_node_info); } -fn scope_block(bcx: block, - opt_node_info: Option, - +n: ~str) -> block { +pub fn scope_block(bcx: block, + opt_node_info: Option, + +n: ~str) -> block { return new_block(bcx.fcx, Some(bcx), simple_block_scope(), bcx.is_lpad, n, opt_node_info); } -fn loop_scope_block(bcx: block, loop_break: block, loop_label: Option, - +n: ~str, opt_node_info: Option) -> block { +pub fn loop_scope_block(bcx: block, + loop_break: block, + loop_label: Option, + +n: ~str, + opt_node_info: Option) -> block { return new_block(bcx.fcx, Some(bcx), block_scope(scope_info { loop_break: Some(loop_break), loop_label: loop_label, @@ -1223,16 +1229,16 @@ fn loop_scope_block(bcx: block, loop_break: block, loop_label: Option, } // Use this when creating a block for the inside of a landing pad. -fn lpad_block(bcx: block, +n: ~str) -> block { +pub fn lpad_block(bcx: block, +n: ~str) -> block { new_block(bcx.fcx, Some(bcx), block_non_scope, true, n, None) } // Use this when you're making a general CFG BB within a scope. -fn sub_block(bcx: block, +n: ~str) -> block { +pub fn sub_block(bcx: block, +n: ~str) -> block { new_block(bcx.fcx, Some(bcx), block_non_scope, bcx.is_lpad, n, None) } -fn raw_block(fcx: fn_ctxt, is_lpad: bool, llbb: BasicBlockRef) -> block { +pub fn raw_block(fcx: fn_ctxt, is_lpad: bool, llbb: BasicBlockRef) -> block { mk_block(llbb, None, block_non_scope, is_lpad, None, fcx) } @@ -1244,14 +1250,14 @@ fn raw_block(fcx: fn_ctxt, is_lpad: bool, llbb: BasicBlockRef) -> block { // need to make sure those variables go out of scope when the block ends. We // do that by running a 'cleanup' function for each variable. // trans_block_cleanups runs all the cleanup functions for the block. -fn trans_block_cleanups(bcx: block, +cleanups: ~[cleanup]) -> block { +pub fn trans_block_cleanups(bcx: block, +cleanups: ~[cleanup]) -> block { trans_block_cleanups_(bcx, cleanups, false) } -fn trans_block_cleanups_(bcx: block, - +cleanups: ~[cleanup], - /* cleanup_cx: block, */ is_lpad: bool) -> - block { +pub fn trans_block_cleanups_(bcx: block, + +cleanups: ~[cleanup], + /* cleanup_cx: block, */ + is_lpad: bool) -> block { let _icx = bcx.insn_ctxt("trans_block_cleanups"); // NB: Don't short-circuit even if this block is unreachable because // GC-based cleanup needs to the see that the roots are live. @@ -1276,9 +1282,9 @@ fn trans_block_cleanups_(bcx: block, // In the last argument, Some(block) mean jump to this block, and none means // this is a landing pad and leaving should be accomplished with a resume // instruction. -fn cleanup_and_leave(bcx: block, - upto: Option, - leave: Option) { +pub fn cleanup_and_leave(bcx: block, + upto: Option, + leave: Option) { let _icx = bcx.insn_ctxt("cleanup_and_leave"); let mut cur = bcx, bcx = bcx; let is_lpad = leave == None; @@ -1320,13 +1326,12 @@ fn cleanup_and_leave(bcx: block, } } -fn cleanup_and_Br(bcx: block, upto: block, - target: BasicBlockRef) { +pub fn cleanup_and_Br(bcx: block, upto: block, target: BasicBlockRef) { let _icx = bcx.insn_ctxt("cleanup_and_Br"); cleanup_and_leave(bcx, Some(upto.llbb), Some(target)); } -fn leave_block(bcx: block, out_of: block) -> block { +pub fn leave_block(bcx: block, out_of: block) -> block { let _icx = bcx.insn_ctxt("leave_block"); let next_cx = sub_block(block_parent(out_of), ~"next"); if bcx.unreachable { Unreachable(next_cx); } @@ -1334,8 +1339,10 @@ fn leave_block(bcx: block, out_of: block) -> block { next_cx } -fn with_scope(bcx: block, opt_node_info: Option, - +name: ~str, f: fn(block) -> block) -> block { +pub fn with_scope(bcx: block, + opt_node_info: Option, + +name: ~str, + f: fn(block) -> block) -> block { let _icx = bcx.insn_ctxt("with_scope"); debug!("with_scope(bcx=%s, opt_node_info=%?, name=%s)", @@ -1347,11 +1354,10 @@ fn with_scope(bcx: block, opt_node_info: Option, leave_block(f(scope_cx), scope_cx) } -fn with_scope_result(bcx: block, - opt_node_info: Option, - +name: ~str, - f: fn(block) -> Result) - -> Result { +pub fn with_scope_result(bcx: block, + opt_node_info: Option, + +name: ~str, + f: fn(block) -> Result) -> Result { let _icx = bcx.insn_ctxt("with_scope_result"); let scope_cx = scope_block(bcx, opt_node_info, name); Br(bcx, scope_cx.llbb); @@ -1359,10 +1365,9 @@ fn with_scope_result(bcx: block, rslt(leave_block(bcx, scope_cx), val) } -fn with_scope_datumblock(bcx: block, opt_node_info: Option, - +name: ~str, f: fn(block) -> datum::DatumBlock) - -> datum::DatumBlock -{ +pub fn with_scope_datumblock(bcx: block, opt_node_info: Option, + +name: ~str, f: fn(block) -> datum::DatumBlock) + -> datum::DatumBlock { use middle::trans::datum::DatumBlock; let _icx = bcx.insn_ctxt("with_scope_result"); @@ -1372,7 +1377,7 @@ fn with_scope_datumblock(bcx: block, opt_node_info: Option, DatumBlock {bcx: leave_block(bcx, scope_cx), datum: datum} } -fn block_locals(b: ast::blk, it: fn(@ast::local)) { +pub fn block_locals(b: ast::blk, it: fn(@ast::local)) { for vec::each(b.node.stmts) |s| { match s.node { ast::stmt_decl(d, _) => { @@ -1390,7 +1395,7 @@ fn block_locals(b: ast::blk, it: fn(@ast::local)) { } } -fn alloc_local(cx: block, local: @ast::local) -> block { +pub fn alloc_local(cx: block, local: @ast::local) -> block { let _icx = cx.insn_ctxt("alloc_local"); let t = node_id_type(cx, local.node.id); let simple_name = match local.node.pat.node { @@ -1412,7 +1417,7 @@ fn alloc_local(cx: block, local: @ast::local) -> block { } -fn with_cond(bcx: block, val: ValueRef, f: fn(block) -> block) -> block { +pub fn with_cond(bcx: block, val: ValueRef, f: fn(block) -> block) -> block { let _icx = bcx.insn_ctxt("with_cond"); let next_cx = base::sub_block(bcx, ~"next"); let cond_cx = base::sub_block(bcx, ~"cond"); @@ -1422,8 +1427,8 @@ fn with_cond(bcx: block, val: ValueRef, f: fn(block) -> block) -> block { next_cx } -fn call_memcpy(cx: block, dst: ValueRef, src: ValueRef, - n_bytes: ValueRef) { +pub fn call_memcpy(cx: block, dst: ValueRef, src: ValueRef, + n_bytes: ValueRef) { // FIXME (Related to #1645, I think?): Provide LLVM with better // alignment information when the alignment is statically known (it must // be nothing more than a constant int, or LLVM complains -- not even a @@ -1443,7 +1448,7 @@ fn call_memcpy(cx: block, dst: ValueRef, src: ValueRef, Call(cx, memcpy, ~[dst_ptr, src_ptr, size, align, volatile]); } -fn memcpy_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) { +pub fn memcpy_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) { let _icx = bcx.insn_ctxt("memcpy_ty"); let ccx = bcx.ccx(); if ty::type_is_structural(t) { @@ -1454,7 +1459,7 @@ fn memcpy_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) { } } -fn zero_mem(cx: block, llptr: ValueRef, t: ty::t) { +pub fn zero_mem(cx: block, llptr: ValueRef, t: ty::t) { let _icx = cx.insn_ctxt("zero_mem"); let bcx = cx; let ccx = cx.ccx(); @@ -1467,7 +1472,7 @@ fn zero_mem(cx: block, llptr: ValueRef, t: ty::t) { // allocation for large data structures, and the generated code will be // awful. (A telltale sign of this is large quantities of // `mov [byte ptr foo],0` in the generated code.) -fn memzero(cx: block, llptr: ValueRef, llty: TypeRef) { +pub fn memzero(cx: block, llptr: ValueRef, llty: TypeRef) { let _icx = cx.insn_ctxt("memzero"); let ccx = cx.ccx(); @@ -1484,13 +1489,13 @@ fn memzero(cx: block, llptr: ValueRef, llty: TypeRef) { let llintrinsicfn = ccx.intrinsics.get(intrinsic_key); let llptr = PointerCast(cx, llptr, T_ptr(T_i8())); let llzeroval = C_u8(0); - let size = IntCast(cx, shape::llsize_of(ccx, llty), ccx.int_type); + let size = IntCast(cx, machine::llsize_of(ccx, llty), ccx.int_type); let align = C_i32(1i32); let volatile = C_bool(false); Call(cx, llintrinsicfn, ~[llptr, llzeroval, size, align, volatile]); } -fn alloc_ty(bcx: block, t: ty::t) -> ValueRef { +pub fn alloc_ty(bcx: block, t: ty::t) -> ValueRef { let _icx = bcx.insn_ctxt("alloc_ty"); let ccx = bcx.ccx(); let llty = type_of::type_of(ccx, t); @@ -1500,11 +1505,11 @@ fn alloc_ty(bcx: block, t: ty::t) -> ValueRef { return val; } -fn alloca(cx: block, t: TypeRef) -> ValueRef { +pub fn alloca(cx: block, t: TypeRef) -> ValueRef { alloca_maybe_zeroed(cx, t, false) } -fn alloca_maybe_zeroed(cx: block, t: TypeRef, zero: bool) -> ValueRef { +pub fn alloca_maybe_zeroed(cx: block, t: TypeRef, zero: bool) -> ValueRef { let _icx = cx.insn_ctxt("alloca"); if cx.unreachable { unsafe { @@ -1517,7 +1522,7 @@ fn alloca_maybe_zeroed(cx: block, t: TypeRef, zero: bool) -> ValueRef { return p; } -fn arrayalloca(cx: block, t: TypeRef, v: ValueRef) -> ValueRef { +pub fn arrayalloca(cx: block, t: TypeRef, v: ValueRef) -> ValueRef { let _icx = cx.insn_ctxt("arrayalloca"); if cx.unreachable { unsafe { @@ -1529,7 +1534,7 @@ fn arrayalloca(cx: block, t: TypeRef, v: ValueRef) -> ValueRef { } // Creates the standard set of basic blocks for a function -fn mk_standard_basic_blocks(llfn: ValueRef) -> +pub fn mk_standard_basic_blocks(llfn: ValueRef) -> {sa: BasicBlockRef, rt: BasicBlockRef} { unsafe { {sa: str::as_c_str(~"static_allocas", @@ -1546,13 +1551,13 @@ fn mk_standard_basic_blocks(llfn: ValueRef) -> // - create_llargs_for_fn_args. // - new_fn_ctxt // - trans_args -fn new_fn_ctxt_w_id(ccx: @crate_ctxt, - +path: path, - llfndecl: ValueRef, - id: ast::node_id, - impl_id: Option, - +param_substs: Option, - sp: Option) -> fn_ctxt { +pub fn new_fn_ctxt_w_id(ccx: @crate_ctxt, + +path: path, + llfndecl: ValueRef, + id: ast::node_id, + impl_id: Option, + +param_substs: Option, + sp: Option) -> fn_ctxt { let llbbs = mk_standard_basic_blocks(llfndecl); return @fn_ctxt_ { llfn: llfndecl, @@ -1576,11 +1581,11 @@ fn new_fn_ctxt_w_id(ccx: @crate_ctxt, }; } -fn new_fn_ctxt(ccx: @crate_ctxt, - +path: path, - llfndecl: ValueRef, - sp: Option) - -> fn_ctxt { +pub fn new_fn_ctxt(ccx: @crate_ctxt, + +path: path, + llfndecl: ValueRef, + sp: Option) + -> fn_ctxt { return new_fn_ctxt_w_id(ccx, path, llfndecl, -1, None, None, sp); } @@ -1598,9 +1603,9 @@ fn new_fn_ctxt(ccx: @crate_ctxt, // spaces that have been created for them (by code in the llallocas field of // the function's fn_ctxt). create_llargs_for_fn_args populates the llargs // field of the fn_ctxt with -fn create_llargs_for_fn_args(cx: fn_ctxt, - ty_self: self_arg, - args: ~[ast::arg]) -> ~[ValueRef] { +pub fn create_llargs_for_fn_args(cx: fn_ctxt, + ty_self: self_arg, + args: ~[ast::arg]) -> ~[ValueRef] { let _icx = cx.insn_ctxt("create_llargs_for_fn_args"); match ty_self { @@ -1631,11 +1636,11 @@ fn create_llargs_for_fn_args(cx: fn_ctxt, }) } -fn copy_args_to_allocas(fcx: fn_ctxt, - bcx: block, - args: &[ast::arg], - raw_llargs: &[ValueRef], - arg_tys: &[ty::arg]) -> block { +pub fn copy_args_to_allocas(fcx: fn_ctxt, + bcx: block, + args: &[ast::arg], + raw_llargs: &[ValueRef], + arg_tys: &[ty::arg]) -> block { let _icx = fcx.insn_ctxt("copy_args_to_allocas"); let tcx = bcx.tcx(); let mut bcx = bcx; @@ -1709,14 +1714,14 @@ fn copy_args_to_allocas(fcx: fn_ctxt, // Ties up the llstaticallocas -> llloadenv -> lltop edges, // and builds the return block. -fn finish_fn(fcx: fn_ctxt, lltop: BasicBlockRef) { +pub fn finish_fn(fcx: fn_ctxt, lltop: BasicBlockRef) { let _icx = fcx.insn_ctxt("finish_fn"); tie_up_header_blocks(fcx, lltop); let ret_cx = raw_block(fcx, false, fcx.llreturn); RetVoid(ret_cx); } -fn tie_up_header_blocks(fcx: fn_ctxt, lltop: BasicBlockRef) { +pub fn tie_up_header_blocks(fcx: fn_ctxt, lltop: BasicBlockRef) { let _icx = fcx.insn_ctxt("tie_up_header_blocks"); match fcx.llloadenv { Some(copy ll) => { @@ -1729,22 +1734,22 @@ fn tie_up_header_blocks(fcx: fn_ctxt, lltop: BasicBlockRef) { } } -enum self_arg { impl_self(ty::t), impl_owned_self(ty::t), no_self, } +pub enum self_arg { impl_self(ty::t), impl_owned_self(ty::t), no_self, } // trans_closure: Builds an LLVM function out of a source function. // If the function closes over its environment a closure will be // returned. -fn trans_closure(ccx: @crate_ctxt, - +path: path, - decl: ast::fn_decl, - body: ast::blk, - llfndecl: ValueRef, - ty_self: self_arg, - +param_substs: Option, - id: ast::node_id, - impl_id: Option, - maybe_load_env: fn(fn_ctxt), - finish: fn(block)) { +pub fn trans_closure(ccx: @crate_ctxt, + +path: path, + decl: ast::fn_decl, + body: ast::blk, + llfndecl: ValueRef, + ty_self: self_arg, + +param_substs: Option, + id: ast::node_id, + impl_id: Option, + maybe_load_env: fn(fn_ctxt), + finish: fn(block)) { ccx.stats.n_closures += 1; let _icx = ccx.insn_ctxt("trans_closure"); set_uwtable(llfndecl); @@ -1798,15 +1803,15 @@ fn trans_closure(ccx: @crate_ctxt, // trans_fn: creates an LLVM function corresponding to a source language // function. -fn trans_fn(ccx: @crate_ctxt, - +path: path, - decl: ast::fn_decl, - body: ast::blk, - llfndecl: ValueRef, - ty_self: self_arg, - +param_substs: Option, - id: ast::node_id, - impl_id: Option) { +pub fn trans_fn(ccx: @crate_ctxt, + +path: path, + decl: ast::fn_decl, + body: ast::blk, + llfndecl: ValueRef, + ty_self: self_arg, + +param_substs: Option, + id: ast::node_id, + impl_id: Option) { let do_time = ccx.sess.trans_stats(); let start = if do_time { time::get_time() } else { time::Timespec::new(0, 0) }; @@ -1828,14 +1833,14 @@ fn trans_fn(ccx: @crate_ctxt, } } -fn trans_enum_variant(ccx: @crate_ctxt, - enum_id: ast::node_id, - variant: ast::variant, - args: ~[ast::variant_arg], - disr: int, - is_degen: bool, - +param_substs: Option, - llfndecl: ValueRef) { +pub fn trans_enum_variant(ccx: @crate_ctxt, + enum_id: ast::node_id, + variant: ast::variant, + args: ~[ast::variant_arg], + disr: int, + is_degen: bool, + +param_substs: Option, + llfndecl: ValueRef) { let _icx = ccx.insn_ctxt("trans_enum_variant"); // Translate variant arguments to function arguments. let fn_args = do args.map |varg| { @@ -1894,11 +1899,11 @@ fn trans_enum_variant(ccx: @crate_ctxt, // NB: In theory this should be merged with the function above. But the AST // structures are completely different, so very little code would be shared. -fn trans_tuple_struct(ccx: @crate_ctxt, - fields: ~[@ast::struct_field], - ctor_id: ast::node_id, - +param_substs: Option, - llfndecl: ValueRef) { +pub fn trans_tuple_struct(ccx: @crate_ctxt, + fields: ~[@ast::struct_field], + ctor_id: ast::node_id, + +param_substs: Option, + llfndecl: ValueRef) { let _icx = ccx.insn_ctxt("trans_tuple_struct"); // Translate struct fields to function arguments. @@ -1942,14 +1947,14 @@ fn trans_tuple_struct(ccx: @crate_ctxt, finish_fn(fcx, lltop); } -fn trans_struct_dtor(ccx: @crate_ctxt, - +path: path, - body: ast::blk, - dtor_id: ast::node_id, - +psubsts: Option, - hash_id: Option, - parent_id: ast::def_id) - -> ValueRef { +pub fn trans_struct_dtor(ccx: @crate_ctxt, + +path: path, + body: ast::blk, + dtor_id: ast::node_id, + +psubsts: Option, + hash_id: Option, + parent_id: ast::def_id) + -> ValueRef { let tcx = ccx.tcx; /* Look up the parent class's def_id */ let mut class_ty = ty::lookup_item_type(tcx, parent_id).ty; @@ -1980,10 +1985,10 @@ fn trans_struct_dtor(ccx: @crate_ctxt, lldecl } -fn trans_enum_def(ccx: @crate_ctxt, enum_definition: ast::enum_def, - id: ast::node_id, tps: ~[ast::ty_param], degen: bool, - path: @ast_map::path, vi: @~[ty::VariantInfo], - i: &mut uint) { +pub fn trans_enum_def(ccx: @crate_ctxt, enum_definition: ast::enum_def, + id: ast::node_id, tps: ~[ast::ty_param], degen: bool, + path: @ast_map::path, vi: @~[ty::VariantInfo], + i: &mut uint) { for vec::each(enum_definition.variants) |variant| { let disr_val = vi[*i].disr_val; *i += 1; @@ -2015,7 +2020,7 @@ fn trans_enum_def(ccx: @crate_ctxt, enum_definition: ast::enum_def, } } -fn trans_item(ccx: @crate_ctxt, item: ast::item) { +pub fn trans_item(ccx: @crate_ctxt, item: ast::item) { let _icx = ccx.insn_ctxt("trans_item"); let path = match ccx.tcx.items.get(item.id) { ast_map::node_item(_, p) => p, @@ -2081,9 +2086,9 @@ fn trans_item(ccx: @crate_ctxt, item: ast::item) { } } -fn trans_struct_def(ccx: @crate_ctxt, struct_def: @ast::struct_def, - tps: ~[ast::ty_param], path: @ast_map::path, - id: ast::node_id) { +pub fn trans_struct_def(ccx: @crate_ctxt, struct_def: @ast::struct_def, + tps: ~[ast::ty_param], path: @ast_map::path, + id: ast::node_id) { // If there are type parameters, the destructor and constructor will be // monomorphized, so we don't translate them here. if tps.len() == 0u { @@ -2112,49 +2117,49 @@ fn trans_struct_def(ccx: @crate_ctxt, struct_def: @ast::struct_def, // separate modules in the compiled program. That's because modules exist // only as a convenience for humans working with the code, to organize names // and control visibility. -fn trans_mod(ccx: @crate_ctxt, m: ast::_mod) { +pub fn trans_mod(ccx: @crate_ctxt, m: ast::_mod) { let _icx = ccx.insn_ctxt("trans_mod"); for vec::each(m.items) |item| { trans_item(ccx, **item); } } -fn get_pair_fn_ty(llpairty: TypeRef) -> TypeRef { +pub fn get_pair_fn_ty(llpairty: TypeRef) -> TypeRef { // Bit of a kludge: pick the fn typeref out of the pair. return struct_elt(llpairty, 0u); } -fn register_fn(ccx: @crate_ctxt, - sp: span, - +path: path, - node_id: ast::node_id, - attrs: &[ast::attribute]) - -> ValueRef { +pub fn register_fn(ccx: @crate_ctxt, + sp: span, + +path: path, + node_id: ast::node_id, + attrs: &[ast::attribute]) + -> ValueRef { let t = ty::node_id_to_type(ccx.tcx, node_id); register_fn_full(ccx, sp, path, node_id, attrs, t) } -fn register_fn_full(ccx: @crate_ctxt, - sp: span, - +path: path, - node_id: ast::node_id, - attrs: &[ast::attribute], - node_type: ty::t) - -> ValueRef { +pub fn register_fn_full(ccx: @crate_ctxt, + sp: span, + +path: path, + node_id: ast::node_id, + attrs: &[ast::attribute], + node_type: ty::t) + -> ValueRef { let llfty = type_of_fn_from_ty(ccx, node_type); register_fn_fuller(ccx, sp, path, node_id, attrs, node_type, lib::llvm::CCallConv, llfty) } -fn register_fn_fuller(ccx: @crate_ctxt, - sp: span, - +path: path, - node_id: ast::node_id, - attrs: &[ast::attribute], - node_type: ty::t, - cc: lib::llvm::CallConv, - llfty: TypeRef) - -> ValueRef { +pub fn register_fn_fuller(ccx: @crate_ctxt, + sp: span, + +path: path, + node_id: ast::node_id, + attrs: &[ast::attribute], + node_type: ty::t, + cc: lib::llvm::CallConv, + llfty: TypeRef) + -> ValueRef { debug!("register_fn_fuller creating fn for item %d with path %s", node_id, ast_map::path_to_str(path, ccx.sess.parse_sess.interner)); @@ -2178,7 +2183,7 @@ fn register_fn_fuller(ccx: @crate_ctxt, llfn } -fn is_main_fn(sess: &Session, node_id: ast::node_id) -> bool { +pub fn is_main_fn(sess: &Session, node_id: ast::node_id) -> bool { match sess.main_fn { Some((main_id, _)) => node_id == main_id, None => false @@ -2187,7 +2192,7 @@ fn is_main_fn(sess: &Session, node_id: ast::node_id) -> bool { // Create a _rust_main(args: ~[str]) function which will be called from the // runtime rust_start function -fn create_main_wrapper(ccx: @crate_ctxt, _sp: span, main_llfn: ValueRef) { +pub fn create_main_wrapper(ccx: @crate_ctxt, _sp: span, main_llfn: ValueRef) { let llfn = create_main(ccx, main_llfn); create_entry_fn(ccx, llfn); @@ -2271,8 +2276,8 @@ fn create_main_wrapper(ccx: @crate_ctxt, _sp: span, main_llfn: ValueRef) { } } -fn fill_fn_pair(bcx: block, pair: ValueRef, llfn: ValueRef, - llenvptr: ValueRef) { +pub fn fill_fn_pair(bcx: block, pair: ValueRef, llfn: ValueRef, + llenvptr: ValueRef) { let ccx = bcx.ccx(); let code_cell = GEPi(bcx, pair, [0u, abi::fn_field_code]); Store(bcx, llfn, code_cell); @@ -2281,7 +2286,7 @@ fn fill_fn_pair(bcx: block, pair: ValueRef, llfn: ValueRef, Store(bcx, llenvblobptr, env_cell); } -fn item_path(ccx: @crate_ctxt, i: @ast::item) -> path { +pub fn item_path(ccx: @crate_ctxt, i: @ast::item) -> path { vec::append( /*bad*/copy *match ccx.tcx.items.get(i.id) { ast_map::node_item(_, p) => p, @@ -2293,11 +2298,11 @@ fn item_path(ccx: @crate_ctxt, i: @ast::item) -> path { /* If there's already a symbol for the dtor with and substs , return it; otherwise, create one and register it, returning it as well */ -fn get_dtor_symbol(ccx: @crate_ctxt, - +path: path, - id: ast::node_id, - +substs: Option) - -> ~str { +pub fn get_dtor_symbol(ccx: @crate_ctxt, + +path: path, + id: ast::node_id, + +substs: Option) + -> ~str { let t = ty::node_id_to_type(ccx.tcx, id); match ccx.item_symbols.find(id) { Some(ref s) => (/*bad*/copy *s), @@ -2331,7 +2336,7 @@ fn get_dtor_symbol(ccx: @crate_ctxt, } } -fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { +pub fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { debug!("get_item_val(id=`%?`)", id); let tcx = ccx.tcx; match ccx.item_vals.find(id) { @@ -2500,8 +2505,10 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { } } -fn register_method(ccx: @crate_ctxt, id: ast::node_id, pth: @ast_map::path, - m: @ast::method) -> ValueRef { +pub fn register_method(ccx: @crate_ctxt, + id: ast::node_id, + pth: @ast_map::path, + m: @ast::method) -> ValueRef { let mty = ty::node_id_to_type(ccx.tcx, id); let pth = vec::append(/*bad*/copy *pth, ~[path_name((ccx.names)(~"meth")), path_name(m.ident)]); @@ -2511,7 +2518,7 @@ fn register_method(ccx: @crate_ctxt, id: ast::node_id, pth: @ast_map::path, } // The constant translation pass. -fn trans_constant(ccx: @crate_ctxt, it: @ast::item) { +pub fn trans_constant(ccx: @crate_ctxt, it: @ast::item) { let _icx = ccx.insn_ctxt("trans_constant"); match it.node { ast::item_enum(ref enum_definition, _) => { @@ -2548,7 +2555,7 @@ fn trans_constant(ccx: @crate_ctxt, it: @ast::item) { } } -fn trans_constants(ccx: @crate_ctxt, crate: &ast::crate) { +pub fn trans_constants(ccx: @crate_ctxt, crate: &ast::crate) { visit::visit_crate( *crate, (), visit::mk_simple_visitor(@visit::SimpleVisitor { @@ -2557,18 +2564,18 @@ fn trans_constants(ccx: @crate_ctxt, crate: &ast::crate) { })); } -fn vp2i(cx: block, v: ValueRef) -> ValueRef { +pub fn vp2i(cx: block, v: ValueRef) -> ValueRef { let ccx = cx.ccx(); return PtrToInt(cx, v, ccx.int_type); } -fn p2i(ccx: @crate_ctxt, v: ValueRef) -> ValueRef { +pub fn p2i(ccx: @crate_ctxt, v: ValueRef) -> ValueRef { unsafe { return llvm::LLVMConstPtrToInt(v, ccx.int_type); } } -fn declare_intrinsics(llmod: ModuleRef) -> HashMap<~str, ValueRef> { +pub fn declare_intrinsics(llmod: ModuleRef) -> HashMap<~str, ValueRef> { let T_memcpy32_args: ~[TypeRef] = ~[T_ptr(T_i8()), T_ptr(T_i8()), T_i32(), T_i32(), T_i1()]; let T_memcpy64_args: ~[TypeRef] = @@ -2761,8 +2768,8 @@ fn declare_intrinsics(llmod: ModuleRef) -> HashMap<~str, ValueRef> { return intrinsics; } -fn declare_dbg_intrinsics(llmod: ModuleRef, - intrinsics: HashMap<~str, ValueRef>) { +pub fn declare_dbg_intrinsics(llmod: ModuleRef, + intrinsics: HashMap<~str, ValueRef>) { let declare = decl_cdecl_fn(llmod, ~"llvm.dbg.declare", T_fn(~[T_metadata(), T_metadata()], T_void())); @@ -2774,7 +2781,7 @@ fn declare_dbg_intrinsics(llmod: ModuleRef, intrinsics.insert(~"llvm.dbg.value", value); } -fn trap(bcx: block) { +pub fn trap(bcx: block) { let v: ~[ValueRef] = ~[]; match bcx.ccx().intrinsics.find(~"llvm.trap") { Some(x) => { Call(bcx, x, v); }, @@ -2782,7 +2789,7 @@ fn trap(bcx: block) { } } -fn decl_gc_metadata(ccx: @crate_ctxt, llmod_id: ~str) { +pub fn decl_gc_metadata(ccx: @crate_ctxt, llmod_id: ~str) { if !ccx.sess.opts.gc || !ccx.uses_gc { return; } @@ -2800,7 +2807,7 @@ fn decl_gc_metadata(ccx: @crate_ctxt, llmod_id: ~str) { } } -fn create_module_map(ccx: @crate_ctxt) -> ValueRef { +pub fn create_module_map(ccx: @crate_ctxt) -> ValueRef { let elttype = T_struct(~[ccx.int_type, ccx.int_type]); let maptype = T_array(elttype, ccx.module_data.size() + 1u); let map = str::as_c_str(~"_rust_mod_map", |buf| { @@ -2826,8 +2833,8 @@ fn create_module_map(ccx: @crate_ctxt) -> ValueRef { } -fn decl_crate_map(sess: session::Session, mapmeta: link_meta, - llmod: ModuleRef) -> ValueRef { +pub fn decl_crate_map(sess: session::Session, mapmeta: link_meta, + llmod: ModuleRef) -> ValueRef { let targ_cfg = sess.targ_cfg; let int_type = T_int(targ_cfg); let mut n_subcrates = 1; @@ -2849,7 +2856,7 @@ fn decl_crate_map(sess: session::Session, mapmeta: link_meta, return map; } -fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { +pub fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { let mut subcrates: ~[ValueRef] = ~[]; let mut i = 1; let cstore = ccx.sess.cstore; @@ -2890,7 +2897,7 @@ fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { } } -fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) -> encoder::encode_parms { +pub fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) -> encoder::encode_parms { let encode_inlined_item: encoder::encode_inlined_item = |ecx, ebml_w, path, ii| astencode::encode_inlined_item(ecx, ebml_w, path, ii, cx.maps); @@ -2908,7 +2915,7 @@ fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) -> encoder::encode_parms { }; } -fn write_metadata(cx: @crate_ctxt, crate: &ast::crate) { +pub fn write_metadata(cx: @crate_ctxt, crate: &ast::crate) { if !cx.sess.building_library { return; } let encode_parms = crate_ctxt_to_encode_parms(cx); let llmeta = C_bytes(encoder::encode_metadata(encode_parms, crate)); @@ -2936,17 +2943,17 @@ fn write_metadata(cx: @crate_ctxt, crate: &ast::crate) { } // Writes the current ABI version into the crate. -fn write_abi_version(ccx: @crate_ctxt) { +pub fn write_abi_version(ccx: @crate_ctxt) { mk_global(ccx, ~"rust_abi_version", C_uint(ccx, abi::abi_version), false); } -fn trans_crate(sess: session::Session, - crate: @ast::crate, - tcx: ty::ctxt, - output: &Path, - emap2: resolve::ExportMap2, - maps: astencode::maps) -> (ModuleRef, link_meta) { +pub fn trans_crate(sess: session::Session, + crate: @ast::crate, + tcx: ty::ctxt, + output: &Path, + emap2: resolve::ExportMap2, + maps: astencode::maps) -> (ModuleRef, link_meta) { let symbol_hasher = @hash::default_state(); let link_meta = diff --git a/src/librustc/middle/trans/build.rs b/src/librustc/middle/trans/build.rs index 060bef1a44c6..584e5d9f1848 100644 --- a/src/librustc/middle/trans/build.rs +++ b/src/librustc/middle/trans/build.rs @@ -25,19 +25,19 @@ use core::vec; use std::map::HashMap; use syntax::codemap; -fn terminate(cx: block, _: &str) { +pub fn terminate(cx: block, _: &str) { unsafe { cx.terminated = true; } } -fn check_not_terminated(cx: block) { +pub fn check_not_terminated(cx: block) { if cx.terminated { fail ~"already terminated!"; } } -fn B(cx: block) -> BuilderRef { +pub fn B(cx: block) -> BuilderRef { unsafe { let b = cx.fcx.ccx.builder.B; llvm::LLVMPositionBuilderAtEnd(b, cx.llbb); @@ -45,7 +45,7 @@ fn B(cx: block) -> BuilderRef { } } -fn count_insn(cx: block, category: &str) { +pub fn count_insn(cx: block, category: &str) { if cx.ccx().sess.count_llvm_insns() { let h = cx.ccx().stats.llvm_insns; @@ -96,7 +96,7 @@ fn count_insn(cx: block, category: &str) { // for (fail/break/return statements, call to diverging functions, etc), and // further instructions to the block should simply be ignored. -fn RetVoid(cx: block) { +pub fn RetVoid(cx: block) { unsafe { if cx.unreachable { return; } check_not_terminated(cx); @@ -106,7 +106,7 @@ fn RetVoid(cx: block) { } } -fn Ret(cx: block, V: ValueRef) { +pub fn Ret(cx: block, V: ValueRef) { unsafe { if cx.unreachable { return; } check_not_terminated(cx); @@ -116,7 +116,7 @@ fn Ret(cx: block, V: ValueRef) { } } -fn AggregateRet(cx: block, RetVals: ~[ValueRef]) { +pub fn AggregateRet(cx: block, RetVals: ~[ValueRef]) { if cx.unreachable { return; } check_not_terminated(cx); terminate(cx, "AggregateRet"); @@ -126,7 +126,7 @@ fn AggregateRet(cx: block, RetVals: ~[ValueRef]) { } } -fn Br(cx: block, Dest: BasicBlockRef) { +pub fn Br(cx: block, Dest: BasicBlockRef) { unsafe { if cx.unreachable { return; } check_not_terminated(cx); @@ -136,8 +136,8 @@ fn Br(cx: block, Dest: BasicBlockRef) { } } -fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef, - Else: BasicBlockRef) { +pub fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef, + Else: BasicBlockRef) { unsafe { if cx.unreachable { return; } check_not_terminated(cx); @@ -147,7 +147,7 @@ fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef, } } -fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint) +pub fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } @@ -157,14 +157,14 @@ fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint) } } -fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) { +pub fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) { unsafe { if llvm::LLVMIsUndef(S) == lib::llvm::True { return; } llvm::LLVMAddCase(S, OnVal, Dest); } } -fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) { +pub fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) { unsafe { if cx.unreachable { return; } check_not_terminated(cx); @@ -176,15 +176,15 @@ fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) { // This is a really awful way to get a zero-length c-string, but better (and a // lot more efficient) than doing str::as_c_str("", ...) every time. -fn noname() -> *libc::c_char { +pub fn noname() -> *libc::c_char { unsafe { const cnull: uint = 0u; return cast::reinterpret_cast(&ptr::addr_of(&cnull)); } } -fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], - Then: BasicBlockRef, Catch: BasicBlockRef) { +pub fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], + Then: BasicBlockRef, Catch: BasicBlockRef) { if cx.unreachable { return; } check_not_terminated(cx); terminate(cx, "Invoke"); @@ -201,8 +201,8 @@ fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], } } -fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], - Then: BasicBlockRef, Catch: BasicBlockRef) { +pub fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], + Then: BasicBlockRef, Catch: BasicBlockRef) { if cx.unreachable { return; } check_not_terminated(cx); terminate(cx, "FastInvoke"); @@ -215,7 +215,7 @@ fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], } } -fn Unreachable(cx: block) { +pub fn Unreachable(cx: block) { unsafe { if cx.unreachable { return; } cx.unreachable = true; @@ -226,14 +226,14 @@ fn Unreachable(cx: block) { } } -fn _Undef(val: ValueRef) -> ValueRef { +pub fn _Undef(val: ValueRef) -> ValueRef { unsafe { return llvm::LLVMGetUndef(val_ty(val)); } } /* Arithmetic */ -fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "add"); @@ -241,7 +241,7 @@ fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nswadd"); @@ -249,7 +249,7 @@ fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nuwadd"); @@ -257,7 +257,7 @@ fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "fadd"); @@ -265,7 +265,7 @@ fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "sub"); @@ -273,7 +273,7 @@ fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nwsub"); @@ -281,7 +281,7 @@ fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nuwsub"); @@ -289,7 +289,7 @@ fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "sub"); @@ -297,7 +297,7 @@ fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "mul"); @@ -305,7 +305,7 @@ fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nswmul"); @@ -313,7 +313,7 @@ fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "nuwmul"); @@ -321,7 +321,7 @@ fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "fmul"); @@ -329,7 +329,7 @@ fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "udiv"); @@ -337,7 +337,7 @@ fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "sdiv"); @@ -345,7 +345,7 @@ fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "extractsdiv"); @@ -353,7 +353,7 @@ fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "fdiv"); @@ -361,7 +361,7 @@ fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "urem"); @@ -369,7 +369,7 @@ fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "srem"); @@ -377,7 +377,7 @@ fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "frem"); @@ -385,7 +385,7 @@ fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "shl"); @@ -393,7 +393,7 @@ fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "lshr"); @@ -401,7 +401,7 @@ fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "ashr"); @@ -409,7 +409,7 @@ fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "and"); @@ -417,7 +417,7 @@ fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "or"); @@ -425,7 +425,7 @@ fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "xor"); @@ -433,7 +433,8 @@ fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) + -> ValueRef { unsafe { if cx.unreachable { return _Undef(LHS); } count_insn(cx, "binop"); @@ -441,7 +442,7 @@ fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Neg(cx: block, V: ValueRef) -> ValueRef { +pub fn Neg(cx: block, V: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } count_insn(cx, "neg"); @@ -449,7 +450,7 @@ fn Neg(cx: block, V: ValueRef) -> ValueRef { } } -fn NSWNeg(cx: block, V: ValueRef) -> ValueRef { +pub fn NSWNeg(cx: block, V: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } count_insn(cx, "nswneg"); @@ -457,14 +458,14 @@ fn NSWNeg(cx: block, V: ValueRef) -> ValueRef { } } -fn NUWNeg(cx: block, V: ValueRef) -> ValueRef { +pub fn NUWNeg(cx: block, V: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } count_insn(cx, "nuwneg"); return llvm::LLVMBuildNUWNeg(B(cx), V, noname()); } } -fn FNeg(cx: block, V: ValueRef) -> ValueRef { +pub fn FNeg(cx: block, V: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } count_insn(cx, "fneg"); @@ -472,7 +473,7 @@ fn FNeg(cx: block, V: ValueRef) -> ValueRef { } } -fn Not(cx: block, V: ValueRef) -> ValueRef { +pub fn Not(cx: block, V: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(V); } count_insn(cx, "not"); @@ -481,7 +482,7 @@ fn Not(cx: block, V: ValueRef) -> ValueRef { } /* Memory */ -fn Malloc(cx: block, Ty: TypeRef) -> ValueRef { +pub fn Malloc(cx: block, Ty: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_i8())); } count_insn(cx, "malloc"); @@ -489,7 +490,7 @@ fn Malloc(cx: block, Ty: TypeRef) -> ValueRef { } } -fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { +pub fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_i8())); } count_insn(cx, "arraymalloc"); @@ -497,7 +498,7 @@ fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { } } -fn Alloca(cx: block, Ty: TypeRef) -> ValueRef { +pub fn Alloca(cx: block, Ty: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(Ty)); } count_insn(cx, "alloca"); @@ -505,7 +506,7 @@ fn Alloca(cx: block, Ty: TypeRef) -> ValueRef { } } -fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { +pub fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(Ty)); } count_insn(cx, "arrayalloca"); @@ -513,7 +514,7 @@ fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef { } } -fn Free(cx: block, PointerVal: ValueRef) { +pub fn Free(cx: block, PointerVal: ValueRef) { unsafe { if cx.unreachable { return; } count_insn(cx, "free"); @@ -521,7 +522,7 @@ fn Free(cx: block, PointerVal: ValueRef) { } } -fn Load(cx: block, PointerVal: ValueRef) -> ValueRef { +pub fn Load(cx: block, PointerVal: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; if cx.unreachable { @@ -535,7 +536,7 @@ fn Load(cx: block, PointerVal: ValueRef) -> ValueRef { } } -fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) { +pub fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) { unsafe { if cx.unreachable { return; } debug!("Store %s -> %s", @@ -546,7 +547,7 @@ fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) { } } -fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef { +pub fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_nil())); } count_insn(cx, "gep"); @@ -559,13 +560,13 @@ fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef { // in C_i32() // // XXX: Use a small-vector optimization to avoid allocations here. -fn GEPi(cx: block, base: ValueRef, ixs: &[uint]) -> ValueRef { +pub fn GEPi(cx: block, base: ValueRef, ixs: &[uint]) -> ValueRef { let v = do vec::map(ixs) |i| { C_i32(*i as i32) }; count_insn(cx, "gepi"); return InBoundsGEP(cx, base, v); } -fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> +pub fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_nil())); } @@ -579,7 +580,7 @@ fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: &[ValueRef]) -> } } -fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef { +pub fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_nil())); } count_insn(cx, "structgep"); @@ -590,7 +591,7 @@ fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef { } } -fn GlobalString(cx: block, _Str: *libc::c_char) -> ValueRef { +pub fn GlobalString(cx: block, _Str: *libc::c_char) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_i8())); } count_insn(cx, "globalstring"); @@ -598,7 +599,7 @@ fn GlobalString(cx: block, _Str: *libc::c_char) -> ValueRef { } } -fn GlobalStringPtr(cx: block, _Str: *libc::c_char) -> ValueRef { +pub fn GlobalStringPtr(cx: block, _Str: *libc::c_char) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_ptr(T_i8())); } count_insn(cx, "globalstringptr"); @@ -607,7 +608,7 @@ fn GlobalStringPtr(cx: block, _Str: *libc::c_char) -> ValueRef { } /* Casts */ -fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "trunc"); @@ -615,7 +616,7 @@ fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "zext"); @@ -623,7 +624,7 @@ fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "sext"); @@ -631,7 +632,7 @@ fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "fptoui"); @@ -639,7 +640,7 @@ fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "fptosi"); @@ -647,7 +648,7 @@ fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "uitofp"); @@ -655,7 +656,7 @@ fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "sitofp"); @@ -663,7 +664,7 @@ fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "fptrunc"); @@ -671,7 +672,7 @@ fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "fpext"); @@ -679,7 +680,7 @@ fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "ptrtoint"); @@ -687,7 +688,7 @@ fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "inttoptr"); @@ -695,7 +696,7 @@ fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "bitcast"); @@ -703,7 +704,7 @@ fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "zextorbitcast"); @@ -711,7 +712,7 @@ fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "sextorbitcast"); @@ -719,7 +720,7 @@ fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "truncorbitcast"); @@ -727,7 +728,7 @@ fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef, _: *u8) +pub fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef, _: *u8) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } @@ -736,7 +737,7 @@ fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef, _: *u8) } } -fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "pointercast"); @@ -744,7 +745,7 @@ fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "intcast"); @@ -752,7 +753,7 @@ fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { } } -fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { +pub fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(DestTy); } count_insn(cx, "fpcast"); @@ -762,7 +763,7 @@ fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef { /* Comparisons */ -fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef) +pub fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_i1()); } @@ -771,7 +772,7 @@ fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef) } } -fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef) +pub fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_i1()); } @@ -781,7 +782,7 @@ fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef) } /* Miscellaneous instructions */ -fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef { +pub fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty); } count_insn(cx, "emptyphi"); @@ -789,7 +790,7 @@ fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef { } } -fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef]) +pub fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef]) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty); } @@ -803,7 +804,7 @@ fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef]) } } -fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { +pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { unsafe { if llvm::LLVMIsUndef(phi) == lib::llvm::True { return; } let valptr = cast::reinterpret_cast(&ptr::addr_of(&val)); @@ -812,7 +813,7 @@ fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { } } -fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef { +pub fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; let ty = val_ty(Fn); @@ -823,7 +824,7 @@ fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef { } } -fn add_span_comment(bcx: block, sp: span, text: ~str) { +pub fn add_span_comment(bcx: block, sp: span, text: ~str) { let ccx = bcx.ccx(); if !ccx.sess.no_asm_comments() { let s = text + ~" (" + ccx.sess.codemap.span_to_str(sp) @@ -833,7 +834,7 @@ fn add_span_comment(bcx: block, sp: span, text: ~str) { } } -fn add_comment(bcx: block, text: ~str) { +pub fn add_comment(bcx: block, text: ~str) { unsafe { let ccx = bcx.ccx(); if !ccx.sess.no_asm_comments() { @@ -852,7 +853,7 @@ fn add_comment(bcx: block, text: ~str) { } } -fn Call(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef { +pub fn Call(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } unsafe { count_insn(cx, "call"); @@ -867,7 +868,7 @@ fn Call(cx: block, Fn: ValueRef, Args: &[ValueRef]) -> ValueRef { } } -fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef { +pub fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } unsafe { count_insn(cx, "fastcall"); @@ -878,8 +879,8 @@ fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef { } } -fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef], - Conv: CallConv) -> ValueRef { +pub fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef], + Conv: CallConv) -> ValueRef { if cx.unreachable { return _UndefReturn(cx, Fn); } unsafe { count_insn(cx, "callwithconv"); @@ -890,7 +891,7 @@ fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef], } } -fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) -> +pub fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return _Undef(Then); } @@ -899,7 +900,7 @@ fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) -> } } -fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef { +pub fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(Ty); } count_insn(cx, "vaarg"); @@ -907,7 +908,7 @@ fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef { } } -fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) -> +pub fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_nil()); } @@ -916,7 +917,7 @@ fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) -> } } -fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef, +pub fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef, Index: ValueRef) { unsafe { if cx.unreachable { return; } @@ -925,8 +926,8 @@ fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef, } } -fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef, - Mask: ValueRef) { +pub fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef, + Mask: ValueRef) { unsafe { if cx.unreachable { return; } count_insn(cx, "shufflevector"); @@ -934,7 +935,7 @@ fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef, } } -fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef { +pub fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_nil()); } count_insn(cx, "extractvalue"); @@ -943,8 +944,8 @@ fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef { } } -fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef, - Index: uint) { +pub fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef, + Index: uint) { unsafe { if cx.unreachable { return; } count_insn(cx, "insertvalue"); @@ -953,7 +954,7 @@ fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef, } } -fn IsNull(cx: block, Val: ValueRef) -> ValueRef { +pub fn IsNull(cx: block, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_i1()); } count_insn(cx, "isnull"); @@ -961,7 +962,7 @@ fn IsNull(cx: block, Val: ValueRef) -> ValueRef { } } -fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef { +pub fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef { unsafe { if cx.unreachable { return llvm::LLVMGetUndef(T_i1()); } count_insn(cx, "isnotnull"); @@ -969,7 +970,7 @@ fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef { } } -fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { +pub fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { unsafe { let ccx = cx.fcx.ccx; if cx.unreachable { return llvm::LLVMGetUndef(ccx.int_type); } @@ -978,7 +979,7 @@ fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef { } } -fn Trap(cx: block) { +pub fn Trap(cx: block) { unsafe { if cx.unreachable { return; } let b = B(cx); @@ -998,8 +999,8 @@ fn Trap(cx: block) { } } -fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef, - NumClauses: uint) -> ValueRef { +pub fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef, + NumClauses: uint) -> ValueRef { unsafe { check_not_terminated(cx); assert !cx.unreachable; @@ -1009,14 +1010,14 @@ fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef, } } -fn SetCleanup(cx: block, LandingPad: ValueRef) { +pub fn SetCleanup(cx: block, LandingPad: ValueRef) { unsafe { count_insn(cx, "setcleanup"); llvm::LLVMSetCleanup(LandingPad, lib::llvm::True); } } -fn Resume(cx: block, Exn: ValueRef) -> ValueRef { +pub fn Resume(cx: block, Exn: ValueRef) -> ValueRef { unsafe { check_not_terminated(cx); terminate(cx, "Resume"); @@ -1026,16 +1027,16 @@ fn Resume(cx: block, Exn: ValueRef) -> ValueRef { } // Atomic Operations -fn AtomicCmpXchg(cx: block, dst: ValueRef, - cmp: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { +pub fn AtomicCmpXchg(cx: block, dst: ValueRef, + cmp: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { unsafe { llvm::LLVMBuildAtomicCmpXchg(B(cx), dst, cmp, src, order) } } -fn AtomicRMW(cx: block, op: AtomicBinOp, - dst: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { +pub fn AtomicRMW(cx: block, op: AtomicBinOp, + dst: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { unsafe { llvm::LLVMBuildAtomicRMW(B(cx), op, dst, src, order) } diff --git a/src/librustc/middle/trans/cabi.rs b/src/librustc/middle/trans/cabi.rs index 426909fd726b..908c56776141 100644 --- a/src/librustc/middle/trans/cabi.rs +++ b/src/librustc/middle/trans/cabi.rs @@ -13,29 +13,26 @@ use middle::trans::base::*; use middle::trans::build::*; use middle::trans::common::*; -export ABIInfo, LLVMType, FnType; -export llvm_abi_info; - -trait ABIInfo { +pub trait ABIInfo { fn compute_info(&self, atys: &[TypeRef], rty: TypeRef, ret_def: bool) -> FnType; } -struct LLVMType { +pub struct LLVMType { cast: bool, ty: TypeRef } -struct FnType { +pub struct FnType { arg_tys: ~[LLVMType], ret_ty: LLVMType, attrs: ~[Option], sret: bool } -impl FnType { +pub impl FnType { fn decl_fn(&self, decl: fn(fnty: TypeRef) -> ValueRef) -> ValueRef { let atys = vec::map(self.arg_tys, |t| t.ty); let rty = self.ret_ty.ty; @@ -208,7 +205,7 @@ impl LLVM_ABIInfo: ABIInfo { } } -fn llvm_abi_info() -> ABIInfo { +pub fn llvm_abi_info() -> ABIInfo { return LLVM_ABIInfo as ABIInfo; } diff --git a/src/librustc/middle/trans/cabi_x86_64.rs b/src/librustc/middle/trans/cabi_x86_64.rs index 7125764e7479..e4a70241d1a6 100644 --- a/src/librustc/middle/trans/cabi_x86_64.rs +++ b/src/librustc/middle/trans/cabi_x86_64.rs @@ -17,8 +17,6 @@ use lib::llvm::{StructRetAttribute, ByValAttribute}; use middle::trans::common::*; use middle::trans::cabi::*; -export x86_64_abi_info; - enum x86_64_reg_class { no_class, integer_class, @@ -412,6 +410,6 @@ impl X86_64_ABIInfo: ABIInfo { } } -fn x86_64_abi_info() -> ABIInfo { +pub fn x86_64_abi_info() -> ABIInfo { return X86_64_ABIInfo as ABIInfo; } diff --git a/src/librustc/middle/trans/closure.rs b/src/librustc/middle/trans/closure.rs index d37f25ba2c83..f6a6c5af5ff5 100644 --- a/src/librustc/middle/trans/closure.rs +++ b/src/librustc/middle/trans/closure.rs @@ -23,6 +23,7 @@ use middle::trans::common::*; use middle::trans::datum::{Datum, INIT, ByRef, ByValue, FromLvalue}; use middle::trans::expr; use middle::trans::glue; +use middle::trans::machine; use middle::trans::type_of::*; use util::ppaux::ty_to_str; @@ -103,7 +104,7 @@ use syntax::print::pprust::expr_to_str; // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -enum EnvAction { +pub enum EnvAction { /// Copy the value from this llvm ValueRef into the environment. EnvStore, @@ -114,12 +115,12 @@ enum EnvAction { EnvRef } -struct EnvValue { +pub struct EnvValue { action: EnvAction, datum: Datum } -impl EnvAction { +pub impl EnvAction { fn to_str() -> ~str { match self { EnvStore => ~"EnvStore", @@ -129,21 +130,21 @@ impl EnvAction { } } -impl EnvValue { +pub impl EnvValue { fn to_str(ccx: @crate_ctxt) -> ~str { fmt!("%s(%s)", self.action.to_str(), self.datum.to_str(ccx)) } } -fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t { +pub fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t { let cbox_ty = tuplify_box_ty(tcx, cdata_ty); return ty::mk_imm_uniq(tcx, cbox_ty); } // Given a closure ty, emits a corresponding tuple ty -fn mk_closure_tys(tcx: ty::ctxt, - bound_values: ~[EnvValue]) - -> ty::t { +pub fn mk_closure_tys(tcx: ty::ctxt, + bound_values: ~[EnvValue]) + -> ty::t { // determine the types of the values in the env. Note that this // is the actual types that will be stored in the map, not the // logical types as the user sees them, so by-ref upvars must be @@ -159,9 +160,8 @@ fn mk_closure_tys(tcx: ty::ctxt, return cdata_ty; } -fn allocate_cbox(bcx: block, proto: ast::Proto, cdata_ty: ty::t) - -> Result -{ +pub fn allocate_cbox(bcx: block, proto: ast::Proto, cdata_ty: ty::t) + -> Result { let _icx = bcx.insn_ctxt("closure::allocate_cbox"); let ccx = bcx.ccx(), tcx = ccx.tcx; @@ -196,7 +196,7 @@ fn allocate_cbox(bcx: block, proto: ast::Proto, cdata_ty: ty::t) } } -type closure_result = { +pub type closure_result = { llbox: ValueRef, // llvalue of ptr to closure cdata_ty: ty::t, // type of the closure data bcx: block // final bcx @@ -206,9 +206,9 @@ type closure_result = { // construct a closure out of them. If copying is true, it is a // heap allocated closure that copies the upvars into environment. // Otherwise, it is stack allocated and copies pointers to the upvars. -fn store_environment(bcx: block, - bound_values: ~[EnvValue], - proto: ast::Proto) -> closure_result { +pub fn store_environment(bcx: block, + bound_values: ~[EnvValue], + proto: ast::Proto) -> closure_result { let _icx = bcx.insn_ctxt("closure::store_environment"); let ccx = bcx.ccx(), tcx = ccx.tcx; @@ -263,10 +263,10 @@ fn store_environment(bcx: block, // Given a context and a list of upvars, build a closure. This just // collects the upvars and packages them up for store_environment. -fn build_closure(bcx0: block, - cap_vars: ~[capture::capture_var], - proto: ast::Proto, - include_ret_handle: Option) -> closure_result { +pub fn build_closure(bcx0: block, + cap_vars: ~[capture::capture_var], + proto: ast::Proto, + include_ret_handle: Option) -> closure_result { let _icx = bcx0.insn_ctxt("closure::build_closure"); // If we need to, package up the iterator body to call let mut bcx = bcx0;; @@ -326,11 +326,11 @@ fn build_closure(bcx0: block, // Given an enclosing block context, a new function context, a closure type, // and a list of upvars, generate code to load and populate the environment // with the upvars and type descriptors. -fn load_environment(fcx: fn_ctxt, - cdata_ty: ty::t, - cap_vars: ~[capture::capture_var], - load_ret_handle: bool, - proto: ast::Proto) { +pub fn load_environment(fcx: fn_ctxt, + cdata_ty: ty::t, + cap_vars: ~[capture::capture_var], + load_ret_handle: bool, + proto: ast::Proto) { let _icx = fcx.insn_ctxt("closure::load_environment"); let llloadenv = match fcx.llloadenv { @@ -377,16 +377,15 @@ fn load_environment(fcx: fn_ctxt, } } -fn trans_expr_fn(bcx: block, - proto: ast::Proto, - +decl: ast::fn_decl, - +body: ast::blk, - outer_id: ast::node_id, - user_id: ast::node_id, - cap_clause: ast::capture_clause, - is_loop_body: Option>, - dest: expr::Dest) -> block -{ +pub fn trans_expr_fn(bcx: block, + proto: ast::Proto, + +decl: ast::fn_decl, + +body: ast::blk, + outer_id: ast::node_id, + user_id: ast::node_id, + cap_clause: ast::capture_clause, + is_loop_body: Option>, + dest: expr::Dest) -> block { /*! * * Translates the body of a closure expression. @@ -462,13 +461,11 @@ fn trans_expr_fn(bcx: block, return bcx; } -fn make_fn_glue( - cx: block, - v: ValueRef, - t: ty::t, - glue_fn: fn@(block, v: ValueRef, t: ty::t) -> block) - -> block -{ +pub fn make_fn_glue(cx: block, + v: ValueRef, + t: ty::t, + glue_fn: fn@(block, v: ValueRef, t: ty::t) -> block) + -> block { let _icx = cx.insn_ctxt("closure::make_fn_glue"); let bcx = cx; let tcx = cx.tcx(); @@ -487,12 +484,11 @@ fn make_fn_glue( } } -fn make_opaque_cbox_take_glue( +pub fn make_opaque_cbox_take_glue( bcx: block, proto: ast::Proto, cboxptr: ValueRef) // ptr to ptr to the opaque closure - -> block -{ + -> block { // Easy cases: let _icx = bcx.insn_ctxt("closure::make_opaque_cbox_take_glue"); match proto { @@ -521,7 +517,7 @@ fn make_opaque_cbox_take_glue( let sz = Load(bcx, GEPi(bcx, tydesc, [0u, abi::tydesc_field_size])); // Adjust sz to account for the rust_opaque_box header fields - let sz = Add(bcx, sz, shape::llsize_of(ccx, T_box_header(ccx))); + let sz = Add(bcx, sz, machine::llsize_of(ccx, T_box_header(ccx))); // Allocate memory, update original ptr, and copy existing data let opaque_tydesc = PointerCast(bcx, tydesc, T_ptr(T_i8())); @@ -547,7 +543,7 @@ fn make_opaque_cbox_take_glue( } } -fn make_opaque_cbox_drop_glue( +pub fn make_opaque_cbox_drop_glue( bcx: block, proto: ast::Proto, cboxptr: ValueRef) // ptr to the opaque closure @@ -568,7 +564,7 @@ fn make_opaque_cbox_drop_glue( } } -fn make_opaque_cbox_free_glue( +pub fn make_opaque_cbox_free_glue( bcx: block, proto: ast::Proto, cbox: ValueRef) // ptr to ptr to the opaque closure diff --git a/src/librustc/middle/trans/common.rs b/src/librustc/middle/trans/common.rs index 3babfbd8285f..7ab90dd1ef7d 100644 --- a/src/librustc/middle/trans/common.rs +++ b/src/librustc/middle/trans/common.rs @@ -61,8 +61,8 @@ use syntax::parse::token::ident_interner; use syntax::print::pprust::expr_to_str; use syntax::{ast, ast_map}; -type namegen = fn@(~str) -> ident; -fn new_namegen(intr: @ident_interner) -> namegen { +pub type namegen = fn@(~str) -> ident; +pub fn new_namegen(intr: @ident_interner) -> namegen { return fn@(prefix: ~str) -> ident { // XXX: Bad copies. return intr.gensym(@fmt!("%s_%u", @@ -71,23 +71,23 @@ fn new_namegen(intr: @ident_interner) -> namegen { }; } -type addrspace = c_uint; +pub type addrspace = c_uint; // Address spaces communicate to LLVM which destructors need to run for -// specifc types. +// specific types. // 0 is ignored by the GC, and is used for all non-GC'd pointers. // 1 is for opaque GC'd boxes. // >= 2 are for specific types (e.g. resources). -const default_addrspace: addrspace = 0; -const gc_box_addrspace: addrspace = 1; +pub const default_addrspace: addrspace = 0; +pub const gc_box_addrspace: addrspace = 1; -type addrspace_gen = fn@() -> addrspace; -fn new_addrspace_gen() -> addrspace_gen { +pub type addrspace_gen = fn@() -> addrspace; +pub fn new_addrspace_gen() -> addrspace_gen { let i = @mut 1; return fn@() -> addrspace { *i += 1; *i }; } -type tydesc_info = +pub type tydesc_info = {ty: ty::t, tydesc: ValueRef, size: ValueRef, @@ -124,7 +124,7 @@ type tydesc_info = * */ -type stats = +pub type stats = {mut n_static_tydescs: uint, mut n_glues_created: uint, mut n_null_glues: uint, @@ -137,7 +137,7 @@ type stats = llvm_insns: HashMap<~str, uint>, fn_times: @mut ~[{ident: ~str, time: int}]}; -struct BuilderRef_res { +pub struct BuilderRef_res { B: BuilderRef, drop { unsafe { @@ -146,14 +146,14 @@ struct BuilderRef_res { } } -fn BuilderRef_res(B: BuilderRef) -> BuilderRef_res { +pub fn BuilderRef_res(B: BuilderRef) -> BuilderRef_res { BuilderRef_res { B: B } } // Crate context. Every crate we compile has one of these. -struct crate_ctxt { +pub struct crate_ctxt { sess: session::Session, llmod: ModuleRef, td: target_data, @@ -225,24 +225,24 @@ struct crate_ctxt { } // Types used for llself. -struct ValSelfData { +pub struct ValSelfData { v: ValueRef, t: ty::t, is_owned: bool } -enum local_val { local_mem(ValueRef), local_imm(ValueRef), } +pub enum local_val { local_mem(ValueRef), local_imm(ValueRef), } // Here `self_ty` is the real type of the self parameter to this method. It // will only be set in the case of default methods. -struct param_substs { +pub struct param_substs { tys: ~[ty::t], vtables: Option, bounds: @~[ty::param_bounds], self_ty: Option } -fn param_substs_to_str(tcx: ty::ctxt, substs: ¶m_substs) -> ~str { +pub fn param_substs_to_str(tcx: ty::ctxt, substs: ¶m_substs) -> ~str { fmt!("param_substs {tys:%?, vtables:%?, bounds:%?}", substs.tys.map(|t| ty_to_str(tcx, *t)), substs.vtables.map(|vs| vs.map(|v| v.to_str(tcx))), @@ -251,7 +251,7 @@ fn param_substs_to_str(tcx: ty::ctxt, substs: ¶m_substs) -> ~str { // Function context. Every LLVM function we create will have one of // these. -struct fn_ctxt_ { +pub struct fn_ctxt_ { // The ValueRef returned from a call to llvm::LLVMAddFunction; the // address of the first instruction in the sequence of // instructions for this function that will go in the .text @@ -319,7 +319,7 @@ struct fn_ctxt_ { pub type fn_ctxt = @fn_ctxt_; -fn warn_not_to_commit(ccx: @crate_ctxt, msg: ~str) { +pub fn warn_not_to_commit(ccx: @crate_ctxt, msg: ~str) { if !ccx.do_not_commit_warning_issued { ccx.do_not_commit_warning_issued = true; ccx.sess.warn(msg + ~" -- do not commit like this!"); @@ -327,52 +327,32 @@ fn warn_not_to_commit(ccx: @crate_ctxt, msg: ~str) { } // Heap selectors. Indicate which heap something should go on. -enum heap { +pub enum heap { heap_shared, heap_exchange, } -enum cleantype { +#[deriving_eq] +pub enum cleantype { normal_exit_only, normal_exit_and_unwind } -enum cleanup { +pub enum cleanup { clean(fn@(block) -> block, cleantype), clean_temp(ValueRef, fn@(block) -> block, cleantype), } -impl cleantype : cmp::Eq { - pure fn eq(&self, other: &cleantype) -> bool { - match (*self) { - normal_exit_only => { - match (*other) { - normal_exit_only => true, - _ => false - } - } - normal_exit_and_unwind => { - match (*other) { - normal_exit_and_unwind => true, - _ => false - } - } - } - } - pure fn ne(&self, other: &cleantype) -> bool { !(*self).eq(other) } -} - // Used to remember and reuse existing cleanup paths // target: none means the path ends in an resume instruction -type cleanup_path = {target: Option, - dest: BasicBlockRef}; +pub type cleanup_path = {target: Option, dest: BasicBlockRef}; -fn scope_clean_changed(scope_info: scope_info) { +pub fn scope_clean_changed(scope_info: scope_info) { if scope_info.cleanup_paths.len() > 0u { scope_info.cleanup_paths = ~[]; } scope_info.landing_pad = None; } -fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype { +pub fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype { if ty::type_needs_unwind_cleanup(cx, ty) { normal_exit_and_unwind } else { @@ -386,7 +366,7 @@ fn cleanup_type(cx: ty::ctxt, ty: ty::t) -> cleantype { // but have trouble knowing where non-immediates are on the stack. For // non-immediates, we must add an additional level of indirection, which // allows us to alloca a pointer with the right addrspace. -fn root_for_cleanup(bcx: block, v: ValueRef, t: ty::t) +pub fn root_for_cleanup(bcx: block, v: ValueRef, t: ty::t) -> {root: ValueRef, rooted: bool} { let ccx = bcx.ccx(); @@ -401,7 +381,7 @@ fn root_for_cleanup(bcx: block, v: ValueRef, t: ty::t) } } -fn add_clean(bcx: block, val: ValueRef, t: ty::t) { +pub fn add_clean(bcx: block, val: ValueRef, t: ty::t) { if !ty::type_needs_drop(bcx.tcx(), t) { return; } debug!("add_clean(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, val), @@ -416,7 +396,7 @@ fn add_clean(bcx: block, val: ValueRef, t: ty::t) { } } -fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) { +pub fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) { if !ty::type_needs_drop(cx.tcx(), ty) { return; } debug!("add_clean_temp_immediate(%s, %s, %s)", cx.to_str(), val_str(cx.ccx().tn, val), @@ -429,7 +409,7 @@ fn add_clean_temp_immediate(cx: block, val: ValueRef, ty: ty::t) { scope_clean_changed(scope_info); } } -fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) { +pub fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) { if !ty::type_needs_drop(bcx.tcx(), t) { return; } debug!("add_clean_temp_mem(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, val), @@ -443,7 +423,7 @@ fn add_clean_temp_mem(bcx: block, val: ValueRef, t: ty::t) { scope_clean_changed(scope_info); } } -fn add_clean_frozen_root(bcx: block, val: ValueRef, t: ty::t) { +pub fn add_clean_frozen_root(bcx: block, val: ValueRef, t: ty::t) { debug!("add_clean_frozen_root(%s, %s, %s)", bcx.to_str(), val_str(bcx.ccx().tn, val), ty_to_str(bcx.ccx().tcx, t)); @@ -468,7 +448,7 @@ fn add_clean_frozen_root(bcx: block, val: ValueRef, t: ty::t) { scope_clean_changed(scope_info); } } -fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { +pub fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { let free_fn = match heap { heap_shared => { let f: @fn(block) -> block = |a| glue::trans_free(a, ptr); @@ -490,7 +470,7 @@ fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { // to a system where we can also cancel the cleanup on local variables, but // this will be more involved. For now, we simply zero out the local, and the // drop glue checks whether it is zero. -fn revoke_clean(cx: block, val: ValueRef) { +pub fn revoke_clean(cx: block, val: ValueRef) { do in_scope_cx(cx) |scope_info| { let cleanup_pos = vec::position( scope_info.cleanups, @@ -509,14 +489,14 @@ fn revoke_clean(cx: block, val: ValueRef) { } } -fn block_cleanups(bcx: block) -> ~[cleanup] { +pub fn block_cleanups(bcx: block) -> ~[cleanup] { match bcx.kind { block_non_scope => ~[], block_scope(ref inf) => /*bad*/copy inf.cleanups } } -enum block_kind { +pub enum block_kind { // A scope at the end of which temporary values created inside of it are // cleaned up. May correspond to an actual block in the language, but also // to an implicit scope, for example, calls introduce an implicit scope in @@ -530,7 +510,7 @@ enum block_kind { block_non_scope, } -struct scope_info { +pub struct scope_info { loop_break: Option, loop_label: Option, // A list of functions that must be run at when leaving this @@ -544,32 +524,32 @@ struct scope_info { mut landing_pad: Option, } -trait get_node_info { +pub trait get_node_info { fn info() -> Option; } -impl @ast::expr: get_node_info { +pub impl @ast::expr: get_node_info { fn info() -> Option { Some({id: self.id, span: self.span}) } } -impl ast::blk: get_node_info { +pub impl ast::blk: get_node_info { fn info() -> Option { Some({id: self.node.id, span: self.span}) } } // XXX: Work around a trait parsing bug. remove after snapshot -type optional_boxed_ast_expr = Option<@ast::expr>; +pub type optional_boxed_ast_expr = Option<@ast::expr>; -impl optional_boxed_ast_expr: get_node_info { +pub impl optional_boxed_ast_expr: get_node_info { fn info() -> Option { self.chain_ref(|s| s.info()) } } -type node_info = { +pub type node_info = { id: ast::node_id, span: span }; @@ -579,7 +559,7 @@ type node_info = { // code. Each basic block we generate is attached to a function, typically // with many basic blocks per function. All the basic blocks attached to a // function are organized as a directed graph. -struct block_ { +pub struct block_ { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // block to the function pointed to by llfn. We insert @@ -600,8 +580,8 @@ struct block_ { fcx: fn_ctxt } -fn block_(llbb: BasicBlockRef, parent: Option, -kind: block_kind, - is_lpad: bool, node_info: Option, fcx: fn_ctxt) +pub fn block_(llbb: BasicBlockRef, parent: Option, -kind: block_kind, + is_lpad: bool, node_info: Option, fcx: fn_ctxt) -> block_ { block_ { @@ -618,49 +598,49 @@ fn block_(llbb: BasicBlockRef, parent: Option, -kind: block_kind, /* This must be enum and not type, or trans goes into an infinite loop (#2572) */ -enum block = @block_; +pub enum block = @block_; -fn mk_block(llbb: BasicBlockRef, parent: Option, -kind: block_kind, +pub fn mk_block(llbb: BasicBlockRef, parent: Option, -kind: block_kind, is_lpad: bool, node_info: Option, fcx: fn_ctxt) -> block { block(@block_(llbb, parent, move kind, is_lpad, node_info, fcx)) } // First two args are retptr, env -const first_real_arg: uint = 2u; +pub const first_real_arg: uint = 2u; -struct Result { +pub struct Result { bcx: block, val: ValueRef } -fn rslt(bcx: block, val: ValueRef) -> Result { +pub fn rslt(bcx: block, val: ValueRef) -> Result { Result {bcx: bcx, val: val} } -impl Result { +pub impl Result { fn unpack(bcx: &mut block) -> ValueRef { *bcx = self.bcx; return self.val; } } -fn ty_str(tn: type_names, t: TypeRef) -> @str { +pub fn ty_str(tn: type_names, t: TypeRef) -> @str { return lib::llvm::type_to_str(tn, t); } -fn val_ty(v: ValueRef) -> TypeRef { +pub fn val_ty(v: ValueRef) -> TypeRef { unsafe { return llvm::LLVMTypeOf(v); } } -fn val_str(tn: type_names, v: ValueRef) -> @str { +pub fn val_str(tn: type_names, v: ValueRef) -> @str { return ty_str(tn, val_ty(v)); } // Returns the nth element of the given LLVM structure type. -fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef { +pub fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef { unsafe { let elt_count = llvm::LLVMCountStructElementTypes(llstructty) as uint; assert (n < elt_count); @@ -672,7 +652,7 @@ fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef { } } -fn in_scope_cx(cx: block, f: fn(scope_info)) { +pub fn in_scope_cx(cx: block, f: fn(scope_info)) { let mut cur = cx; loop { match cur.kind { @@ -688,7 +668,7 @@ fn in_scope_cx(cx: block, f: fn(scope_info)) { } } -fn block_parent(cx: block) -> block { +pub fn block_parent(cx: block) -> block { match cx.parent { Some(b) => b, None => cx.sess().bug(fmt!("block_parent called on root block %?", @@ -698,7 +678,7 @@ fn block_parent(cx: block) -> block { // Accessors -impl block { +pub impl block { pure fn ccx() -> @crate_ctxt { self.fcx.ccx } pure fn tcx() -> ty::ctxt { self.fcx.ccx.tcx } pure fn sess() -> Session { self.fcx.ccx.sess } @@ -753,35 +733,35 @@ impl block { } // LLVM type constructors. -fn T_void() -> TypeRef { +pub fn T_void() -> TypeRef { unsafe { return llvm::LLVMVoidType(); } } -fn T_nil() -> TypeRef { +pub fn T_nil() -> TypeRef { return T_struct(~[]) } -fn T_metadata() -> TypeRef { unsafe { return llvm::LLVMMetadataType(); } } +pub fn T_metadata() -> TypeRef { unsafe { return llvm::LLVMMetadataType(); } } -fn T_i1() -> TypeRef { unsafe { return llvm::LLVMInt1Type(); } } +pub fn T_i1() -> TypeRef { unsafe { return llvm::LLVMInt1Type(); } } -fn T_i8() -> TypeRef { unsafe { return llvm::LLVMInt8Type(); } } +pub fn T_i8() -> TypeRef { unsafe { return llvm::LLVMInt8Type(); } } -fn T_i16() -> TypeRef { unsafe { return llvm::LLVMInt16Type(); } } +pub fn T_i16() -> TypeRef { unsafe { return llvm::LLVMInt16Type(); } } -fn T_i32() -> TypeRef { unsafe { return llvm::LLVMInt32Type(); } } +pub fn T_i32() -> TypeRef { unsafe { return llvm::LLVMInt32Type(); } } -fn T_i64() -> TypeRef { unsafe { return llvm::LLVMInt64Type(); } } +pub fn T_i64() -> TypeRef { unsafe { return llvm::LLVMInt64Type(); } } -fn T_f32() -> TypeRef { unsafe { return llvm::LLVMFloatType(); } } +pub fn T_f32() -> TypeRef { unsafe { return llvm::LLVMFloatType(); } } -fn T_f64() -> TypeRef { unsafe { return llvm::LLVMDoubleType(); } } +pub fn T_f64() -> TypeRef { unsafe { return llvm::LLVMDoubleType(); } } -fn T_bool() -> TypeRef { return T_i1(); } +pub fn T_bool() -> TypeRef { return T_i1(); } -fn T_int(targ_cfg: @session::config) -> TypeRef { +pub fn T_int(targ_cfg: @session::config) -> TypeRef { return match targ_cfg.arch { session::arch_x86 => T_i32(), session::arch_x86_64 => T_i64(), @@ -789,7 +769,7 @@ fn T_int(targ_cfg: @session::config) -> TypeRef { }; } -fn T_int_ty(cx: @crate_ctxt, t: ast::int_ty) -> TypeRef { +pub fn T_int_ty(cx: @crate_ctxt, t: ast::int_ty) -> TypeRef { match t { ast::ty_i => cx.int_type, ast::ty_char => T_char(), @@ -800,7 +780,7 @@ fn T_int_ty(cx: @crate_ctxt, t: ast::int_ty) -> TypeRef { } } -fn T_uint_ty(cx: @crate_ctxt, t: ast::uint_ty) -> TypeRef { +pub fn T_uint_ty(cx: @crate_ctxt, t: ast::uint_ty) -> TypeRef { match t { ast::ty_u => cx.int_type, ast::ty_u8 => T_i8(), @@ -810,7 +790,7 @@ fn T_uint_ty(cx: @crate_ctxt, t: ast::uint_ty) -> TypeRef { } } -fn T_float_ty(cx: @crate_ctxt, t: ast::float_ty) -> TypeRef { +pub fn T_float_ty(cx: @crate_ctxt, t: ast::float_ty) -> TypeRef { match t { ast::ty_f => cx.float_type, ast::ty_f32 => T_f32(), @@ -818,7 +798,7 @@ fn T_float_ty(cx: @crate_ctxt, t: ast::float_ty) -> TypeRef { } } -fn T_float(targ_cfg: @session::config) -> TypeRef { +pub fn T_float(targ_cfg: @session::config) -> TypeRef { return match targ_cfg.arch { session::arch_x86 => T_f64(), session::arch_x86_64 => T_f64(), @@ -826,13 +806,13 @@ fn T_float(targ_cfg: @session::config) -> TypeRef { }; } -fn T_char() -> TypeRef { return T_i32(); } +pub fn T_char() -> TypeRef { return T_i32(); } -fn T_size_t(targ_cfg: @session::config) -> TypeRef { +pub fn T_size_t(targ_cfg: @session::config) -> TypeRef { return T_int(targ_cfg); } -fn T_fn(inputs: ~[TypeRef], output: TypeRef) -> TypeRef { +pub fn T_fn(inputs: ~[TypeRef], output: TypeRef) -> TypeRef { unsafe { return llvm::LLVMFunctionType(output, to_ptr(inputs), inputs.len() as c_uint, @@ -840,23 +820,23 @@ fn T_fn(inputs: ~[TypeRef], output: TypeRef) -> TypeRef { } } -fn T_fn_pair(cx: @crate_ctxt, tfn: TypeRef) -> TypeRef { +pub fn T_fn_pair(cx: @crate_ctxt, tfn: TypeRef) -> TypeRef { return T_struct(~[T_ptr(tfn), T_opaque_cbox_ptr(cx)]); } -fn T_ptr(t: TypeRef) -> TypeRef { +pub fn T_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, default_addrspace); } } -fn T_root(t: TypeRef, addrspace: addrspace) -> TypeRef { +pub fn T_root(t: TypeRef, addrspace: addrspace) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, addrspace); } } -fn T_struct(elts: ~[TypeRef]) -> TypeRef { +pub fn T_struct(elts: ~[TypeRef]) -> TypeRef { unsafe { return llvm::LLVMStructType(to_ptr(elts), elts.len() as c_uint, @@ -864,14 +844,14 @@ fn T_struct(elts: ~[TypeRef]) -> TypeRef { } } -fn T_named_struct(name: ~str) -> TypeRef { +pub fn T_named_struct(name: ~str) -> TypeRef { unsafe { let c = llvm::LLVMGetGlobalContext(); return str::as_c_str(name, |buf| llvm::LLVMStructCreateNamed(c, buf)); } } -fn set_struct_body(t: TypeRef, elts: ~[TypeRef]) { +pub fn set_struct_body(t: TypeRef, elts: ~[TypeRef]) { unsafe { llvm::LLVMStructSetBody(t, to_ptr(elts), @@ -880,15 +860,15 @@ fn set_struct_body(t: TypeRef, elts: ~[TypeRef]) { } } -fn T_empty_struct() -> TypeRef { return T_struct(~[]); } +pub fn T_empty_struct() -> TypeRef { return T_struct(~[]); } // A vtable is, in reality, a vtable pointer followed by zero or more pointers // to tydescs and other vtables that it closes over. But the types and number // of those are rarely known to the code that needs to manipulate them, so // they are described by this opaque type. -fn T_vtable() -> TypeRef { T_array(T_ptr(T_i8()), 1u) } +pub fn T_vtable() -> TypeRef { T_array(T_ptr(T_i8()), 1u) } -fn T_task(targ_cfg: @session::config) -> TypeRef { +pub fn T_task(targ_cfg: @session::config) -> TypeRef { let t = T_named_struct(~"task"); // Refcount @@ -910,7 +890,7 @@ fn T_task(targ_cfg: @session::config) -> TypeRef { return t; } -fn T_tydesc_field(cx: @crate_ctxt, field: uint) -> TypeRef { +pub fn T_tydesc_field(cx: @crate_ctxt, field: uint) -> TypeRef { // Bit of a kludge: pick the fn typeref out of the tydesc.. unsafe { @@ -925,7 +905,7 @@ fn T_tydesc_field(cx: @crate_ctxt, field: uint) -> TypeRef { } } -fn T_generic_glue_fn(cx: @crate_ctxt) -> TypeRef { +pub fn T_generic_glue_fn(cx: @crate_ctxt) -> TypeRef { let s = @"glue_fn"; match name_has_type(cx.tn, s) { Some(t) => return t, @@ -936,7 +916,7 @@ fn T_generic_glue_fn(cx: @crate_ctxt) -> TypeRef { return t; } -fn T_tydesc(targ_cfg: @session::config) -> TypeRef { +pub fn T_tydesc(targ_cfg: @session::config) -> TypeRef { let tydesc = T_named_struct(~"tydesc"); let tydescpp = T_ptr(T_ptr(tydesc)); let pvoid = T_ptr(T_i8()); @@ -953,32 +933,32 @@ fn T_tydesc(targ_cfg: @session::config) -> TypeRef { return tydesc; } -fn T_array(t: TypeRef, n: uint) -> TypeRef { +pub fn T_array(t: TypeRef, n: uint) -> TypeRef { unsafe { return llvm::LLVMArrayType(t, n as c_uint); } } // Interior vector. -fn T_vec2(targ_cfg: @session::config, t: TypeRef) -> TypeRef { +pub fn T_vec2(targ_cfg: @session::config, t: TypeRef) -> TypeRef { return T_struct(~[T_int(targ_cfg), // fill T_int(targ_cfg), // alloc T_array(t, 0u)]); // elements } -fn T_vec(ccx: @crate_ctxt, t: TypeRef) -> TypeRef { +pub fn T_vec(ccx: @crate_ctxt, t: TypeRef) -> TypeRef { return T_vec2(ccx.sess.targ_cfg, t); } // Note that the size of this one is in bytes. -fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef { +pub fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef { return T_vec2(targ_cfg, T_i8()); } // Let T be the content of a box @T. tuplify_box_ty(t) returns the // representation of @T as a tuple (i.e., the ty::t version of what T_box() // returns). -fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { +pub fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { let ptr = ty::mk_ptr( tcx, ty::mt {ty: ty::mk_nil(tcx), mutbl: ast::m_imm} @@ -988,58 +968,58 @@ fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { t]); } -fn T_box_header_fields(cx: @crate_ctxt) -> ~[TypeRef] { +pub fn T_box_header_fields(cx: @crate_ctxt) -> ~[TypeRef] { let ptr = T_ptr(T_i8()); return ~[cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr]; } -fn T_box_header(cx: @crate_ctxt) -> TypeRef { +pub fn T_box_header(cx: @crate_ctxt) -> TypeRef { return T_struct(T_box_header_fields(cx)); } -fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef { +pub fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef { return T_struct(vec::append(T_box_header_fields(cx), ~[t])); } -fn T_box_ptr(t: TypeRef) -> TypeRef { +pub fn T_box_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, gc_box_addrspace); } } -fn T_opaque_box(cx: @crate_ctxt) -> TypeRef { +pub fn T_opaque_box(cx: @crate_ctxt) -> TypeRef { return T_box(cx, T_i8()); } -fn T_opaque_box_ptr(cx: @crate_ctxt) -> TypeRef { +pub fn T_opaque_box_ptr(cx: @crate_ctxt) -> TypeRef { return T_box_ptr(T_opaque_box(cx)); } -fn T_unique(cx: @crate_ctxt, t: TypeRef) -> TypeRef { +pub fn T_unique(cx: @crate_ctxt, t: TypeRef) -> TypeRef { return T_struct(vec::append(T_box_header_fields(cx), ~[t])); } -fn T_unique_ptr(t: TypeRef) -> TypeRef { +pub fn T_unique_ptr(t: TypeRef) -> TypeRef { unsafe { return llvm::LLVMPointerType(t, gc_box_addrspace); } } -fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { +pub fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { return T_struct(~[cx.int_type]); // Refcount } -fn T_chan(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { +pub fn T_chan(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { return T_struct(~[cx.int_type]); // Refcount } -fn T_taskptr(cx: @crate_ctxt) -> TypeRef { return T_ptr(cx.task_type); } +pub fn T_taskptr(cx: @crate_ctxt) -> TypeRef { return T_ptr(cx.task_type); } // This type must never be used directly; it must always be cast away. -fn T_typaram(tn: type_names) -> TypeRef { +pub fn T_typaram(tn: type_names) -> TypeRef { let s = @"typaram"; match name_has_type(tn, s) { Some(t) => return t, @@ -1050,19 +1030,21 @@ fn T_typaram(tn: type_names) -> TypeRef { return t; } -fn T_typaram_ptr(tn: type_names) -> TypeRef { return T_ptr(T_typaram(tn)); } +pub fn T_typaram_ptr(tn: type_names) -> TypeRef { + return T_ptr(T_typaram(tn)); +} -fn T_opaque_cbox_ptr(cx: @crate_ctxt) -> TypeRef { +pub fn T_opaque_cbox_ptr(cx: @crate_ctxt) -> TypeRef { // closures look like boxes (even when they are fn~ or fn&) // see trans_closure.rs return T_opaque_box_ptr(cx); } -fn T_enum_discrim(cx: @crate_ctxt) -> TypeRef { +pub fn T_enum_discrim(cx: @crate_ctxt) -> TypeRef { return cx.int_type; } -fn T_opaque_enum(cx: @crate_ctxt) -> TypeRef { +pub fn T_opaque_enum(cx: @crate_ctxt) -> TypeRef { let s = @"opaque_enum"; match name_has_type(cx.tn, s) { Some(t) => return t, @@ -1073,15 +1055,15 @@ fn T_opaque_enum(cx: @crate_ctxt) -> TypeRef { return t; } -fn T_opaque_enum_ptr(cx: @crate_ctxt) -> TypeRef { +pub fn T_opaque_enum_ptr(cx: @crate_ctxt) -> TypeRef { return T_ptr(T_opaque_enum(cx)); } -fn T_captured_tydescs(cx: @crate_ctxt, n: uint) -> TypeRef { +pub fn T_captured_tydescs(cx: @crate_ctxt, n: uint) -> TypeRef { return T_struct(vec::from_elem::(n, T_ptr(cx.tydesc_type))); } -fn T_opaque_trait(cx: @crate_ctxt, vstore: ty::vstore) -> TypeRef { +pub fn T_opaque_trait(cx: @crate_ctxt, vstore: ty::vstore) -> TypeRef { match vstore { ty::vstore_box => { T_struct(~[T_ptr(cx.tydesc_type), T_opaque_box_ptr(cx)]) @@ -1095,60 +1077,62 @@ fn T_opaque_trait(cx: @crate_ctxt, vstore: ty::vstore) -> TypeRef { } } -fn T_opaque_port_ptr() -> TypeRef { return T_ptr(T_i8()); } +pub fn T_opaque_port_ptr() -> TypeRef { return T_ptr(T_i8()); } -fn T_opaque_chan_ptr() -> TypeRef { return T_ptr(T_i8()); } +pub fn T_opaque_chan_ptr() -> TypeRef { return T_ptr(T_i8()); } // LLVM constant constructors. -fn C_null(t: TypeRef) -> ValueRef { +pub fn C_null(t: TypeRef) -> ValueRef { unsafe { return llvm::LLVMConstNull(t); } } -fn C_integral(t: TypeRef, u: u64, sign_extend: Bool) -> ValueRef { +pub fn C_integral(t: TypeRef, u: u64, sign_extend: Bool) -> ValueRef { unsafe { return llvm::LLVMConstInt(t, u, sign_extend); } } -fn C_floating(s: ~str, t: TypeRef) -> ValueRef { +pub fn C_floating(s: ~str, t: TypeRef) -> ValueRef { unsafe { return str::as_c_str(s, |buf| llvm::LLVMConstRealOfString(t, buf)); } } -fn C_nil() -> ValueRef { +pub fn C_nil() -> ValueRef { return C_struct(~[]); } -fn C_bool(b: bool) -> ValueRef { +pub fn C_bool(b: bool) -> ValueRef { C_integral(T_bool(), if b { 1u64 } else { 0u64 }, False) } -fn C_i32(i: i32) -> ValueRef { +pub fn C_i32(i: i32) -> ValueRef { return C_integral(T_i32(), i as u64, True); } -fn C_i64(i: i64) -> ValueRef { +pub fn C_i64(i: i64) -> ValueRef { return C_integral(T_i64(), i as u64, True); } -fn C_int(cx: @crate_ctxt, i: int) -> ValueRef { +pub fn C_int(cx: @crate_ctxt, i: int) -> ValueRef { return C_integral(cx.int_type, i as u64, True); } -fn C_uint(cx: @crate_ctxt, i: uint) -> ValueRef { +pub fn C_uint(cx: @crate_ctxt, i: uint) -> ValueRef { return C_integral(cx.int_type, i as u64, False); } -fn C_u8(i: uint) -> ValueRef { return C_integral(T_i8(), i as u64, False); } +pub fn C_u8(i: uint) -> ValueRef { + return C_integral(T_i8(), i as u64, False); +} // This is a 'c-like' raw string, which differs from // our boxed-and-length-annotated strings. -fn C_cstr(cx: @crate_ctxt, +s: ~str) -> ValueRef { +pub fn C_cstr(cx: @crate_ctxt, +s: ~str) -> ValueRef { unsafe { match cx.const_cstr_cache.find(s) { Some(llval) => return llval, @@ -1173,7 +1157,7 @@ fn C_cstr(cx: @crate_ctxt, +s: ~str) -> ValueRef { // NB: Do not use `do_spill_noroot` to make this into a constant string, or // you will be kicked off fast isel. See issue #4352 for an example of this. -fn C_estr_slice(cx: @crate_ctxt, +s: ~str) -> ValueRef { +pub fn C_estr_slice(cx: @crate_ctxt, +s: ~str) -> ValueRef { unsafe { let len = str::len(s); let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s), T_ptr(T_i8())); @@ -1182,7 +1166,7 @@ fn C_estr_slice(cx: @crate_ctxt, +s: ~str) -> ValueRef { } // Returns a Plain Old LLVM String: -fn C_postr(s: ~str) -> ValueRef { +pub fn C_postr(s: ~str) -> ValueRef { unsafe { return do str::as_c_str(s) |buf| { llvm::LLVMConstString(buf, str::len(s) as c_uint, False) @@ -1190,7 +1174,7 @@ fn C_postr(s: ~str) -> ValueRef { } } -fn C_zero_byte_arr(size: uint) -> ValueRef { +pub fn C_zero_byte_arr(size: uint) -> ValueRef { unsafe { let mut i = 0u; let mut elts: ~[ValueRef] = ~[]; @@ -1201,7 +1185,7 @@ fn C_zero_byte_arr(size: uint) -> ValueRef { } } -fn C_struct(elts: &[ValueRef]) -> ValueRef { +pub fn C_struct(elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstStruct(ptr, len as c_uint, False) @@ -1209,7 +1193,7 @@ fn C_struct(elts: &[ValueRef]) -> ValueRef { } } -fn C_packed_struct(elts: &[ValueRef]) -> ValueRef { +pub fn C_packed_struct(elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstStruct(ptr, len as c_uint, True) @@ -1217,7 +1201,7 @@ fn C_packed_struct(elts: &[ValueRef]) -> ValueRef { } } -fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef { +pub fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef { unsafe { do vec::as_imm_buf(elts) |ptr, len| { llvm::LLVMConstNamedStruct(T, ptr, len as c_uint) @@ -1225,14 +1209,14 @@ fn C_named_struct(T: TypeRef, elts: &[ValueRef]) -> ValueRef { } } -fn C_array(ty: TypeRef, elts: ~[ValueRef]) -> ValueRef { +pub fn C_array(ty: TypeRef, elts: ~[ValueRef]) -> ValueRef { unsafe { return llvm::LLVMConstArray(ty, vec::raw::to_ptr(elts), elts.len() as c_uint); } } -fn C_bytes(bytes: ~[u8]) -> ValueRef { +pub fn C_bytes(bytes: ~[u8]) -> ValueRef { unsafe { return llvm::LLVMConstString( cast::reinterpret_cast(&vec::raw::to_ptr(bytes)), @@ -1240,7 +1224,7 @@ fn C_bytes(bytes: ~[u8]) -> ValueRef { } } -fn C_bytes_plus_null(bytes: ~[u8]) -> ValueRef { +pub fn C_bytes_plus_null(bytes: ~[u8]) -> ValueRef { unsafe { return llvm::LLVMConstString( cast::reinterpret_cast(&vec::raw::to_ptr(bytes)), @@ -1248,7 +1232,7 @@ fn C_bytes_plus_null(bytes: ~[u8]) -> ValueRef { } } -fn C_shape(ccx: @crate_ctxt, +bytes: ~[u8]) -> ValueRef { +pub fn C_shape(ccx: @crate_ctxt, +bytes: ~[u8]) -> ValueRef { unsafe { let llshape = C_bytes_plus_null(bytes); let name = fmt!("shape%u", (ccx.names)(~"shape").repr); @@ -1262,14 +1246,15 @@ fn C_shape(ccx: @crate_ctxt, +bytes: ~[u8]) -> ValueRef { } } -fn get_param(fndecl: ValueRef, param: uint) -> ValueRef { +pub fn get_param(fndecl: ValueRef, param: uint) -> ValueRef { unsafe { llvm::LLVMGetParam(fndecl, param as c_uint) } } // Used to identify cached monomorphized functions and vtables -enum mono_param_id { +#[deriving_eq] +pub enum mono_param_id { mono_precise(ty::t, Option<~[mono_id]>), mono_any, mono_repr(uint /* size */, @@ -1278,43 +1263,16 @@ enum mono_param_id { datum::DatumMode), } -struct mono_id_ { +#[deriving_eq] +pub struct mono_id_ { def: ast::def_id, params: ~[mono_param_id], impl_did_opt: Option } -type mono_id = @mono_id_; +pub type mono_id = @mono_id_; -impl mono_param_id : cmp::Eq { - pure fn eq(&self, other: &mono_param_id) -> bool { - match (self, other) { - (&mono_precise(ty_a, ref ids_a), - &mono_precise(ty_b, ref ids_b)) => { - ty_a == ty_b && ids_a == ids_b - } - (&mono_any, &mono_any) => true, - (&mono_repr(size_a, align_a, is_float_a, mode_a), - &mono_repr(size_b, align_b, is_float_b, mode_b)) => { - size_a == size_b && align_a == align_b && - is_float_a == is_float_b && mode_a == mode_b - } - (&mono_precise(*), _) => false, - (&mono_any, _) => false, - (&mono_repr(*), _) => false - } - } - pure fn ne(&self, other: &mono_param_id) -> bool { !(*self).eq(other) } -} - -impl mono_id_ : cmp::Eq { - pure fn eq(&self, other: &mono_id_) -> bool { - (*self).def == (*other).def && (*self).params == (*other).params - } - pure fn ne(&self, other: &mono_id_) -> bool { !(*self).eq(other) } -} - -impl mono_param_id : to_bytes::IterBytes { +pub impl mono_param_id : to_bytes::IterBytes { pure fn iter_bytes(&self, +lsb0: bool, f: to_bytes::Cb) { match /*bad*/copy *self { mono_precise(t, mids) => @@ -1328,29 +1286,29 @@ impl mono_param_id : to_bytes::IterBytes { } } -impl mono_id_ : to_bytes::IterBytes { +pub impl mono_id_ : to_bytes::IterBytes { pure fn iter_bytes(&self, +lsb0: bool, f: to_bytes::Cb) { to_bytes::iter_bytes_2(&self.def, &self.params, lsb0, f); } } -fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { +pub fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let cond = build::ICmp(cx, lib::llvm::IntULT, a, b); return build::Select(cx, cond, b, a); } -fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { +pub fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef { let cond = build::ICmp(cx, lib::llvm::IntULT, a, b); return build::Select(cx, cond, a, b); } -fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef { +pub fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef { let mask = build::Sub(cx, align, C_int(cx.ccx(), 1)); let bumped = build::Add(cx, off, mask); return build::And(cx, bumped, build::Not(cx, mask)); } -fn path_str(sess: session::Session, p: path) -> ~str { +pub fn path_str(sess: session::Session, p: path) -> ~str { let mut r = ~"", first = true; for vec::each(p) |e| { match *e { @@ -1364,7 +1322,7 @@ fn path_str(sess: session::Session, p: path) -> ~str { r } -fn monomorphize_type(bcx: block, t: ty::t) -> ty::t { +pub fn monomorphize_type(bcx: block, t: ty::t) -> ty::t { match /*bad*/copy bcx.fcx.param_substs { Some(substs) => { ty::subst_tps(bcx.tcx(), substs.tys, substs.self_ty, t) @@ -1373,17 +1331,17 @@ fn monomorphize_type(bcx: block, t: ty::t) -> ty::t { } } -fn node_id_type(bcx: block, id: ast::node_id) -> ty::t { +pub fn node_id_type(bcx: block, id: ast::node_id) -> ty::t { let tcx = bcx.tcx(); let t = ty::node_id_to_type(tcx, id); monomorphize_type(bcx, t) } -fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t { +pub fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t { node_id_type(bcx, ex.id) } -fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] { +pub fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] { let tcx = bcx.tcx(); let params = ty::node_id_to_type_params(tcx, id); match /*bad*/copy bcx.fcx.param_substs { @@ -1396,23 +1354,22 @@ fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] { } } -fn node_vtables(bcx: block, id: ast::node_id) -> Option { +pub fn node_vtables(bcx: block, id: ast::node_id) + -> Option { let raw_vtables = bcx.ccx().maps.vtable_map.find(id); raw_vtables.map( - |vts| meth::resolve_vtables_in_fn_ctxt(bcx.fcx, *vts)) + |vts| resolve_vtables_in_fn_ctxt(bcx.fcx, *vts)) } -fn resolve_vtables_in_fn_ctxt(fcx: fn_ctxt, vts: typeck::vtable_res) - -> typeck::vtable_res -{ +pub fn resolve_vtables_in_fn_ctxt(fcx: fn_ctxt, vts: typeck::vtable_res) + -> typeck::vtable_res { @vec::map(*vts, |d| resolve_vtable_in_fn_ctxt(fcx, copy *d)) } // Apply the typaram substitutions in the fn_ctxt to a vtable. This should // eliminate any vtable_params. -fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, +vt: typeck::vtable_origin) - -> typeck::vtable_origin -{ +pub fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, +vt: typeck::vtable_origin) + -> typeck::vtable_origin { let tcx = fcx.ccx.tcx; match vt { typeck::vtable_static(trait_id, tys, sub) => { @@ -1443,10 +1400,9 @@ fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, +vt: typeck::vtable_origin) } } -fn find_vtable(tcx: ty::ctxt, ps: ¶m_substs, +pub fn find_vtable(tcx: ty::ctxt, ps: ¶m_substs, n_param: uint, n_bound: uint) - -> typeck::vtable_origin -{ + -> typeck::vtable_origin { debug!("find_vtable_in_fn_ctxt(n_param=%u, n_bound=%u, ps=%?)", n_param, n_bound, param_substs_to_str(tcx, ps)); @@ -1459,7 +1415,7 @@ fn find_vtable(tcx: ty::ctxt, ps: ¶m_substs, /*bad*/ copy ps.vtables.get()[vtable_off] } -fn dummy_substs(+tps: ~[ty::t]) -> ty::substs { +pub fn dummy_substs(+tps: ~[ty::t]) -> ty::substs { substs { self_r: Some(ty::re_bound(ty::br_self)), self_ty: None, @@ -1467,13 +1423,13 @@ fn dummy_substs(+tps: ~[ty::t]) -> ty::substs { } } -fn struct_field(index: uint) -> [uint * 3] { +pub fn struct_field(index: uint) -> [uint * 3] { //! The GEPi sequence to access a field of a record/struct. [0, 0, index] } -fn struct_dtor() -> [uint * 2] { +pub fn struct_dtor() -> [uint * 2] { //! The GEPi sequence to access the dtor of a struct. [0, 1] diff --git a/src/librustc/middle/trans/consts.rs b/src/librustc/middle/trans/consts.rs index 01533e162830..947e67c9e43f 100644 --- a/src/librustc/middle/trans/consts.rs +++ b/src/librustc/middle/trans/consts.rs @@ -15,11 +15,12 @@ use middle::trans::base::get_insn_ctxt; use middle::trans::common::*; use middle::trans::consts; use middle::trans::expr; +use middle::trans::machine; use middle::ty; use syntax::{ast, ast_util, codemap, ast_map}; -fn const_lit(cx: @crate_ctxt, e: @ast::expr, lit: ast::lit) +pub fn const_lit(cx: @crate_ctxt, e: @ast::expr, lit: ast::lit) -> ValueRef { let _icx = cx.insn_ctxt("trans_lit"); match lit.node { @@ -58,7 +59,7 @@ fn const_lit(cx: @crate_ctxt, e: @ast::expr, lit: ast::lit) } } -fn const_ptrcast(cx: @crate_ctxt, a: ValueRef, t: TypeRef) -> ValueRef { +pub fn const_ptrcast(cx: @crate_ctxt, a: ValueRef, t: TypeRef) -> ValueRef { unsafe { let b = llvm::LLVMConstPointerCast(a, T_ptr(t)); assert cx.const_globals.insert(b as int, a); @@ -66,20 +67,20 @@ fn const_ptrcast(cx: @crate_ctxt, a: ValueRef, t: TypeRef) -> ValueRef { } } -fn const_vec(cx: @crate_ctxt, e: @ast::expr, es: &[@ast::expr]) +pub fn const_vec(cx: @crate_ctxt, e: @ast::expr, es: &[@ast::expr]) -> (ValueRef, ValueRef, TypeRef) { unsafe { let vec_ty = ty::expr_ty(cx.tcx, e); let unit_ty = ty::sequence_element_type(cx.tcx, vec_ty); let llunitty = type_of::type_of(cx, unit_ty); let v = C_array(llunitty, es.map(|e| const_expr(cx, *e))); - let unit_sz = shape::llsize_of(cx, llunitty); + let unit_sz = machine::llsize_of(cx, llunitty); let sz = llvm::LLVMConstMul(C_uint(cx, es.len()), unit_sz); return (v, sz, llunitty); } } -fn const_deref(cx: @crate_ctxt, v: ValueRef) -> ValueRef { +pub fn const_deref(cx: @crate_ctxt, v: ValueRef) -> ValueRef { unsafe { let v = match cx.const_globals.find(v as int) { Some(v) => v, @@ -91,7 +92,8 @@ fn const_deref(cx: @crate_ctxt, v: ValueRef) -> ValueRef { } } -fn const_get_elt(cx: @crate_ctxt, v: ValueRef, us: &[c_uint]) -> ValueRef { +pub fn const_get_elt(cx: @crate_ctxt, v: ValueRef, us: &[c_uint]) + -> ValueRef { unsafe { let r = do vec::as_imm_buf(us) |p, len| { llvm::LLVMConstExtractValue(v, p, len as c_uint) @@ -104,7 +106,7 @@ fn const_get_elt(cx: @crate_ctxt, v: ValueRef, us: &[c_uint]) -> ValueRef { } } -fn const_autoderef(cx: @crate_ctxt, ty: ty::t, v: ValueRef) +pub fn const_autoderef(cx: @crate_ctxt, ty: ty::t, v: ValueRef) -> (ty::t, ValueRef) { let mut t1 = ty; let mut v1 = v; @@ -120,7 +122,7 @@ fn const_autoderef(cx: @crate_ctxt, ty: ty::t, v: ValueRef) } } -fn get_const_val(cx: @crate_ctxt, def_id: ast::def_id) -> ValueRef { +pub fn get_const_val(cx: @crate_ctxt, def_id: ast::def_id) -> ValueRef { if !ast_util::is_local(def_id) { cx.tcx.sess.bug(~"cross-crate constants"); } @@ -137,7 +139,7 @@ fn get_const_val(cx: @crate_ctxt, def_id: ast::def_id) -> ValueRef { cx.const_values.get(def_id.node) } -fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef { +pub fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef { unsafe { let _icx = cx.insn_ctxt("const_expr"); return match /*bad*/copy e.node { @@ -244,7 +246,7 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef { ty::vstore_slice(_) => { let unit_ty = ty::sequence_element_type(cx.tcx, bt); let llunitty = type_of::type_of(cx, unit_ty); - let unit_sz = shape::llsize_of(cx, llunitty); + let unit_sz = machine::llsize_of(cx, llunitty); (const_deref(cx, const_get_elt(cx, bv, [0])), llvm::LLVMConstUDiv(const_get_elt(cx, bv, [1]), @@ -450,7 +452,7 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef { Some(ast::def_variant(tid, vid)) => { let ety = ty::expr_ty(cx.tcx, e); let degen = ty::enum_is_univariant(cx.tcx, tid); - let size = shape::static_size_of_enum(cx, ety); + let size = machine::static_size_of_enum(cx, ety); let discrim = base::get_discrim_val(cx, e.span, tid, vid); let c_args = C_struct(args.map(|a| const_expr(cx, *a))); @@ -474,7 +476,7 @@ fn const_expr(cx: @crate_ctxt, e: @ast::expr) -> ValueRef { } } -fn trans_const(ccx: @crate_ctxt, _e: @ast::expr, id: ast::node_id) { +pub fn trans_const(ccx: @crate_ctxt, _e: @ast::expr, id: ast::node_id) { unsafe { let _icx = ccx.insn_ctxt("trans_const"); let g = base::get_item_val(ccx, id); diff --git a/src/librustc/middle/trans/debuginfo.rs b/src/librustc/middle/trans/debuginfo.rs index 6b368555637b..127f5d5d3397 100644 --- a/src/librustc/middle/trans/debuginfo.rs +++ b/src/librustc/middle/trans/debuginfo.rs @@ -17,7 +17,7 @@ use middle::pat_util::*; use middle::trans::base; use middle::trans::build::B; use middle::trans::common::*; -use middle::trans::shape; +use middle::trans::machine; use middle::trans::type_of; use middle::trans; use middle::ty; @@ -33,13 +33,6 @@ use syntax::codemap::{span, CharPos}; use syntax::parse::token::ident_interner; use syntax::{ast, codemap, ast_util, ast_map}; -export create_local_var; -export create_function; -export create_arg; -export update_source_pos; -export debug_ctxt; -export mk_ctxt; - const LLVMDebugVersion: int = (9 << 16); const DW_LANG_RUST: int = 0x9000; @@ -111,13 +104,13 @@ fn add_named_metadata(cx: @crate_ctxt, name: ~str, val: ValueRef) { //////////////// -type debug_ctxt = { +pub type debug_ctxt = { llmetadata: metadata_cache, names: namegen, crate_file: ~str }; -fn mk_ctxt(+crate: ~str, intr: @ident_interner) -> debug_ctxt { +pub fn mk_ctxt(+crate: ~str, intr: @ident_interner) -> debug_ctxt { {llmetadata: map::HashMap(), names: new_namegen(intr), crate_file: crate} @@ -313,8 +306,8 @@ fn create_block(cx: block) -> @metadata { fn size_and_align_of(cx: @crate_ctxt, t: ty::t) -> (int, int) { let llty = type_of::type_of(cx, t); - (shape::llsize_of_real(cx, llty) as int, - shape::llalign_of_pref(cx, llty) as int) + (machine::llsize_of_real(cx, llty) as int, + machine::llalign_of_pref(cx, llty) as int) } fn create_basic_type(cx: @crate_ctxt, t: ty::t, span: span) @@ -654,7 +647,7 @@ fn create_var(type_tag: int, context: ValueRef, +name: ~str, file: ValueRef, return llmdnode(lldata); } -fn create_local_var(bcx: block, local: @ast::local) +pub fn create_local_var(bcx: block, local: @ast::local) -> @metadata { unsafe { let cx = bcx.ccx(); @@ -705,7 +698,7 @@ fn create_local_var(bcx: block, local: @ast::local) } } -fn create_arg(bcx: block, arg: ast::arg, sp: span) +pub fn create_arg(bcx: block, arg: ast::arg, sp: span) -> Option<@metadata> { unsafe { let fcx = bcx.fcx, cx = fcx.ccx; @@ -752,7 +745,7 @@ fn create_arg(bcx: block, arg: ast::arg, sp: span) } } -fn update_source_pos(cx: block, s: span) { +pub fn update_source_pos(cx: block, s: span) { if !cx.sess().opts.debuginfo { return; } @@ -769,7 +762,7 @@ fn update_source_pos(cx: block, s: span) { } } -fn create_function(fcx: fn_ctxt) -> @metadata { +pub fn create_function(fcx: fn_ctxt) -> @metadata { let cx = fcx.ccx; let dbg_cx = (/*bad*/copy cx.dbg_cx).get(); diff --git a/src/librustc/middle/trans/foreign.rs b/src/librustc/middle/trans/foreign.rs index 1ebd497a61d5..e725e484a894 100644 --- a/src/librustc/middle/trans/foreign.rs +++ b/src/librustc/middle/trans/foreign.rs @@ -40,9 +40,6 @@ use syntax::{ast, ast_util}; use syntax::{attr, ast_map}; use syntax::parse::token::special_idents; -export link_name, trans_foreign_mod, register_foreign_fn, trans_foreign_fn, - trans_intrinsic; - fn abi_info(arch: session::arch) -> cabi::ABIInfo { return match arch { arch_x86_64 => x86_64_abi_info(), @@ -50,7 +47,7 @@ fn abi_info(arch: session::arch) -> cabi::ABIInfo { } } -fn link_name(ccx: @crate_ctxt, i: @ast::foreign_item) -> ~str { +pub fn link_name(ccx: @crate_ctxt, i: @ast::foreign_item) -> ~str { match attr::first_attr_value_str_by_name(i.attrs, ~"link_name") { None => ccx.sess.str_of(i.ident), option::Some(ref ln) => (/*bad*/copy *ln) @@ -206,8 +203,9 @@ fn build_wrap_fn_(ccx: @crate_ctxt, // stack pointer appropriately to avoid a round of copies. (In fact, the shim // function itself is unnecessary). We used to do this, in fact, and will // perhaps do so in the future. -fn trans_foreign_mod(ccx: @crate_ctxt, - foreign_mod: ast::foreign_mod, abi: ast::foreign_abi) { +pub fn trans_foreign_mod(ccx: @crate_ctxt, + foreign_mod: ast::foreign_mod, + abi: ast::foreign_abi) { let _icx = ccx.insn_ctxt("foreign::trans_foreign_mod"); @@ -332,10 +330,12 @@ fn trans_foreign_mod(ccx: @crate_ctxt, } } -fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, - +path: ast_map::path, +substs: param_substs, - ref_id: Option) -{ +pub fn trans_intrinsic(ccx: @crate_ctxt, + decl: ValueRef, + item: @ast::foreign_item, + +path: ast_map::path, + +substs: param_substs, + ref_id: Option) { debug!("trans_intrinsic(item.ident=%s)", ccx.sess.str_of(item.ident)); // XXX: Bad copy. @@ -433,7 +433,7 @@ fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, ~"size_of" => { let tp_ty = substs.tys[0]; let lltp_ty = type_of::type_of(ccx, tp_ty); - Store(bcx, C_uint(ccx, shape::llsize_of_real(ccx, lltp_ty)), + Store(bcx, C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty)), fcx.llretptr); } ~"move_val" => { @@ -464,13 +464,13 @@ fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, ~"min_align_of" => { let tp_ty = substs.tys[0]; let lltp_ty = type_of::type_of(ccx, tp_ty); - Store(bcx, C_uint(ccx, shape::llalign_of_min(ccx, lltp_ty)), + Store(bcx, C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty)), fcx.llretptr); } ~"pref_align_of"=> { let tp_ty = substs.tys[0]; let lltp_ty = type_of::type_of(ccx, tp_ty); - Store(bcx, C_uint(ccx, shape::llalign_of_pref(ccx, lltp_ty)), + Store(bcx, C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)), fcx.llretptr); } ~"get_tydesc" => { @@ -839,9 +839,12 @@ fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, finish_fn(fcx, lltop); } -fn trans_foreign_fn(ccx: @crate_ctxt, +path: ast_map::path, - decl: ast::fn_decl, body: ast::blk, llwrapfn: ValueRef, - id: ast::node_id) { +pub fn trans_foreign_fn(ccx: @crate_ctxt, + +path: ast_map::path, + decl: ast::fn_decl, + body: ast::blk, + llwrapfn: ValueRef, + id: ast::node_id) { let _icx = ccx.insn_ctxt("foreign::build_foreign_fn"); fn build_rust_fn(ccx: @crate_ctxt, +path: ast_map::path, @@ -930,12 +933,12 @@ fn trans_foreign_fn(ccx: @crate_ctxt, +path: ast_map::path, build_wrap_fn(ccx, llshimfn, llwrapfn, tys) } -fn register_foreign_fn(ccx: @crate_ctxt, - sp: span, - +path: ast_map::path, - node_id: ast::node_id, - attrs: &[ast::attribute]) - -> ValueRef { +pub fn register_foreign_fn(ccx: @crate_ctxt, + sp: span, + +path: ast_map::path, + node_id: ast::node_id, + attrs: &[ast::attribute]) + -> ValueRef { let _icx = ccx.insn_ctxt("foreign::register_foreign_fn"); let t = ty::node_id_to_type(ccx.tcx, node_id); let (llargtys, llretty, ret_ty) = c_arg_and_ret_lltys(ccx, node_id); diff --git a/src/librustc/middle/trans/meth.rs b/src/librustc/middle/trans/meth.rs index 0ee3e3e451a9..bb815df6b75a 100644 --- a/src/librustc/middle/trans/meth.rs +++ b/src/librustc/middle/trans/meth.rs @@ -39,7 +39,10 @@ use syntax::ast_util::local_def; use syntax::print::pprust::expr_to_str; use syntax::{ast, ast_map}; -fn macros() { include!("macros.rs"); } // FIXME(#3114): Macro import/export. +pub fn macros() { + // FIXME(#3114): Macro import/export. + include!("macros.rs"); +} /** The main "translation" pass for methods. Generates code @@ -47,9 +50,9 @@ for non-monomorphized methods only. Other methods will be generated once they are invoked with specific type parameters, see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`. */ -fn trans_impl(ccx: @crate_ctxt, +path: path, name: ast::ident, - methods: ~[@ast::method], tps: ~[ast::ty_param], - self_ty: Option, id: ast::node_id) { +pub fn trans_impl(ccx: @crate_ctxt, +path: path, name: ast::ident, + methods: ~[@ast::method], tps: ~[ast::ty_param], + self_ty: Option, id: ast::node_id) { let _icx = ccx.insn_ctxt("impl::trans_impl"); if tps.len() > 0u { return; } let sub_path = vec::append_one(path, path_name(name)); @@ -93,13 +96,13 @@ Translates a (possibly monomorphized) method body. - `llfn`: the LLVM ValueRef for the method - `impl_id`: the node ID of the impl this method is inside */ -fn trans_method(ccx: @crate_ctxt, - +path: path, - method: &ast::method, - +param_substs: Option, - base_self_ty: Option, - llfn: ValueRef, - impl_id: ast::def_id) { +pub fn trans_method(ccx: @crate_ctxt, + +path: path, + method: &ast::method, + +param_substs: Option, + base_self_ty: Option, + llfn: ValueRef, + impl_id: ast::def_id) { // figure out how self is being passed let self_arg = match method.self_ty.node { ast::sty_static => { @@ -148,9 +151,9 @@ fn trans_method(ccx: @crate_ctxt, Some(impl_id)); } -fn trans_self_arg(bcx: block, - base: @ast::expr, - mentry: typeck::method_map_entry) -> Result { +pub fn trans_self_arg(bcx: block, + base: @ast::expr, + mentry: typeck::method_map_entry) -> Result { let _icx = bcx.insn_ctxt("impl::trans_self_arg"); let mut temp_cleanups = ~[]; @@ -172,9 +175,11 @@ fn trans_self_arg(bcx: block, return result; } -fn trans_method_callee(bcx: block, callee_id: ast::node_id, - self: @ast::expr, mentry: typeck::method_map_entry) -> - Callee { +pub fn trans_method_callee(bcx: block, + callee_id: ast::node_id, + self: @ast::expr, + mentry: typeck::method_map_entry) + -> Callee { let _icx = bcx.insn_ctxt("impl::trans_method_callee"); // Replace method_self with method_static here. @@ -243,7 +248,7 @@ fn trans_method_callee(bcx: block, callee_id: ast::node_id, }) => { match bcx.fcx.param_substs { Some(ref substs) => { - let vtbl = base::find_vtable(bcx.tcx(), substs, p, b); + let vtbl = find_vtable(bcx.tcx(), substs, p, b); trans_monomorphized_callee(bcx, callee_id, self, mentry, trait_id, off, vtbl) } @@ -265,11 +270,11 @@ fn trans_method_callee(bcx: block, callee_id: ast::node_id, } } -fn trans_static_method_callee(bcx: block, - method_id: ast::def_id, - trait_id: ast::def_id, - callee_id: ast::node_id) -> FnData -{ +pub fn trans_static_method_callee(bcx: block, + method_id: ast::def_id, + trait_id: ast::def_id, + callee_id: ast::node_id) + -> FnData { let _icx = bcx.insn_ctxt("impl::trans_static_method_callee"); let ccx = bcx.ccx(); @@ -348,13 +353,13 @@ fn trans_static_method_callee(bcx: block, } } -fn method_from_methods(ms: ~[@ast::method], name: ast::ident) +pub fn method_from_methods(ms: ~[@ast::method], name: ast::ident) -> Option { ms.find(|m| m.ident == name).map(|m| local_def(m.id)) } -fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id, - name: ast::ident) -> ast::def_id { +pub fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id, + name: ast::ident) -> ast::def_id { if impl_id.crate == ast::local_crate { match ccx.tcx.items.get(impl_id.node) { ast_map::node_item(@ast::item { @@ -370,8 +375,8 @@ fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id, } } -fn method_with_name_or_default(ccx: @crate_ctxt, impl_id: ast::def_id, - name: ast::ident) -> ast::def_id { +pub fn method_with_name_or_default(ccx: @crate_ctxt, impl_id: ast::def_id, + name: ast::ident) -> ast::def_id { if impl_id.crate == ast::local_crate { match ccx.tcx.items.get(impl_id.node) { ast_map::node_item(@ast::item { @@ -404,8 +409,8 @@ fn method_with_name_or_default(ccx: @crate_ctxt, impl_id: ast::def_id, } } -fn method_ty_param_count(ccx: @crate_ctxt, m_id: ast::def_id, - i_id: ast::def_id) -> uint { +pub fn method_ty_param_count(ccx: @crate_ctxt, m_id: ast::def_id, + i_id: ast::def_id) -> uint { debug!("method_ty_param_count: m_id: %?, i_id: %?", m_id, i_id); if m_id.crate == ast::local_crate { match ccx.tcx.items.find(m_id.node) { @@ -431,15 +436,14 @@ fn method_ty_param_count(ccx: @crate_ctxt, m_id: ast::def_id, } } -fn trans_monomorphized_callee(bcx: block, - callee_id: ast::node_id, - base: @ast::expr, - mentry: typeck::method_map_entry, - trait_id: ast::def_id, - n_method: uint, - +vtbl: typeck::vtable_origin) - -> Callee -{ +pub fn trans_monomorphized_callee(bcx: block, + callee_id: ast::node_id, + base: @ast::expr, + mentry: typeck::method_map_entry, + trait_id: ast::def_id, + n_method: uint, + +vtbl: typeck::vtable_origin) + -> Callee { let _icx = bcx.insn_ctxt("impl::trans_monomorphized_callee"); return match vtbl { typeck::vtable_static(impl_did, rcvr_substs, rcvr_origins) => { @@ -495,13 +499,12 @@ fn trans_monomorphized_callee(bcx: block, } -fn combine_impl_and_methods_tps(bcx: block, - mth_did: ast::def_id, - impl_did: ast::def_id, - callee_id: ast::node_id, - +rcvr_substs: ~[ty::t]) - -> ~[ty::t] -{ +pub fn combine_impl_and_methods_tps(bcx: block, + mth_did: ast::def_id, + impl_did: ast::def_id, + callee_id: ast::node_id, + +rcvr_substs: ~[ty::t]) + -> ~[ty::t] { /*! * * Creates a concatenated set of substitutions which includes @@ -534,13 +537,12 @@ fn combine_impl_and_methods_tps(bcx: block, return ty_substs; } -fn combine_impl_and_methods_origins(bcx: block, - mth_did: ast::def_id, - impl_did: ast::def_id, - callee_id: ast::node_id, - rcvr_origins: typeck::vtable_res) - -> typeck::vtable_res -{ +pub fn combine_impl_and_methods_origins(bcx: block, + mth_did: ast::def_id, + impl_did: ast::def_id, + callee_id: ast::node_id, + rcvr_origins: typeck::vtable_res) + -> typeck::vtable_res { /*! * * Similar to `combine_impl_and_methods_tps`, but for vtables. @@ -576,14 +578,13 @@ fn combine_impl_and_methods_origins(bcx: block, } -fn trans_trait_callee(bcx: block, - callee_id: ast::node_id, - n_method: uint, - self_expr: @ast::expr, - vstore: ty::vstore, - explicit_self: ast::self_ty_) - -> Callee -{ +pub fn trans_trait_callee(bcx: block, + callee_id: ast::node_id, + n_method: uint, + self_expr: @ast::expr, + vstore: ty::vstore, + explicit_self: ast::self_ty_) + -> Callee { //! // // Create a method callee where the method is coming from a trait @@ -614,14 +615,13 @@ fn trans_trait_callee(bcx: block, explicit_self) } -fn trans_trait_callee_from_llval(bcx: block, - callee_ty: ty::t, - n_method: uint, - llpair: ValueRef, - vstore: ty::vstore, - explicit_self: ast::self_ty_) - -> Callee -{ +pub fn trans_trait_callee_from_llval(bcx: block, + callee_ty: ty::t, + n_method: uint, + llpair: ValueRef, + vstore: ty::vstore, + explicit_self: ast::self_ty_) + -> Callee { //! // // Same as `trans_trait_callee()` above, except that it is given @@ -743,7 +743,9 @@ fn trans_trait_callee_from_llval(bcx: block, }; } -fn vtable_id(ccx: @crate_ctxt, +origin: typeck::vtable_origin) -> mono_id { +pub fn vtable_id(ccx: @crate_ctxt, + +origin: typeck::vtable_origin) + -> mono_id { match origin { typeck::vtable_static(impl_id, substs, sub_vtables) => { monomorphize::make_mono_id( @@ -770,7 +772,9 @@ fn vtable_id(ccx: @crate_ctxt, +origin: typeck::vtable_origin) -> mono_id { } } -fn get_vtable(ccx: @crate_ctxt, +origin: typeck::vtable_origin) -> ValueRef { +pub fn get_vtable(ccx: @crate_ctxt, + +origin: typeck::vtable_origin) + -> ValueRef { // XXX: Bad copy. let hash_id = vtable_id(ccx, copy origin); match ccx.vtables.find(hash_id) { @@ -784,7 +788,7 @@ fn get_vtable(ccx: @crate_ctxt, +origin: typeck::vtable_origin) -> ValueRef { } } -fn make_vtable(ccx: @crate_ctxt, ptrs: ~[ValueRef]) -> ValueRef { +pub fn make_vtable(ccx: @crate_ctxt, ptrs: ~[ValueRef]) -> ValueRef { unsafe { let _icx = ccx.insn_ctxt("impl::make_vtable"); let tbl = C_struct(ptrs); @@ -799,8 +803,11 @@ fn make_vtable(ccx: @crate_ctxt, ptrs: ~[ValueRef]) -> ValueRef { } } -fn make_impl_vtable(ccx: @crate_ctxt, impl_id: ast::def_id, substs: ~[ty::t], - vtables: typeck::vtable_res) -> ValueRef { +pub fn make_impl_vtable(ccx: @crate_ctxt, + impl_id: ast::def_id, + substs: ~[ty::t], + vtables: typeck::vtable_res) + -> ValueRef { let _icx = ccx.insn_ctxt("impl::make_impl_vtable"); let tcx = ccx.tcx; @@ -840,13 +847,12 @@ fn make_impl_vtable(ccx: @crate_ctxt, impl_id: ast::def_id, substs: ~[ty::t], })) } -fn trans_trait_cast(bcx: block, - val: @ast::expr, - id: ast::node_id, - dest: expr::Dest, - vstore: ty::vstore) - -> block -{ +pub fn trans_trait_cast(bcx: block, + val: @ast::expr, + id: ast::node_id, + dest: expr::Dest, + vstore: ty::vstore) + -> block { let mut bcx = bcx; let _icx = bcx.insn_ctxt("impl::trans_cast"); diff --git a/src/librustc/middle/trans/monomorphize.rs b/src/librustc/middle/trans/monomorphize.rs index 9924e5d97cc0..d7deb2a4da7e 100644 --- a/src/librustc/middle/trans/monomorphize.rs +++ b/src/librustc/middle/trans/monomorphize.rs @@ -351,7 +351,7 @@ pub fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: ~[ty::t], { let llty = type_of::type_of(ccx, subst); let size = machine::llbitsize_of_real(ccx, llty); - let align = shape::llalign_of_pref(ccx, llty); + let align = machine::llalign_of_pref(ccx, llty); let mode = datum::appropriate_mode(subst); // FIXME(#3547)---scalars and floats are diff --git a/src/librustc/middle/trans/reachable.rs b/src/librustc/middle/trans/reachable.rs index 29051837e4a0..6b266f1d9639 100644 --- a/src/librustc/middle/trans/reachable.rs +++ b/src/librustc/middle/trans/reachable.rs @@ -29,9 +29,7 @@ use syntax::attr; use syntax::print::pprust::expr_to_str; use syntax::{visit, ast_util, ast_map}; -export map, find_reachable; - -type map = HashMap; +pub type map = HashMap; struct ctx { exp_map2: resolve::ExportMap2, @@ -40,8 +38,8 @@ struct ctx { rmap: map } -fn find_reachable(crate_mod: _mod, exp_map2: resolve::ExportMap2, - tcx: ty::ctxt, method_map: typeck::method_map) -> map { +pub fn find_reachable(crate_mod: _mod, exp_map2: resolve::ExportMap2, + tcx: ty::ctxt, method_map: typeck::method_map) -> map { let rmap = HashMap(); let cx = ctx { exp_map2: exp_map2, diff --git a/src/librustc/middle/trans/reflect.rs b/src/librustc/middle/trans/reflect.rs index ba9ff3a4f98b..f773b09d5dec 100644 --- a/src/librustc/middle/trans/reflect.rs +++ b/src/librustc/middle/trans/reflect.rs @@ -19,8 +19,8 @@ use middle::trans::common::*; use middle::trans::datum::*; use middle::trans::expr::SaveIn; use middle::trans::glue; +use middle::trans::machine; use middle::trans::meth; -use middle::trans::shape; use middle::trans::type_of::*; use util::ppaux::ty_to_str; @@ -28,7 +28,7 @@ use std::map::HashMap; use syntax::ast::def_id; use syntax::ast; -enum reflector = { +pub enum reflector = { visitor_val: ValueRef, visitor_methods: @~[ty::method], final_bcx: block, @@ -36,7 +36,7 @@ enum reflector = { mut bcx: block }; -impl reflector { +pub impl reflector { fn c_uint(u: uint) -> ValueRef { C_uint(self.bcx.ccx(), u) @@ -62,8 +62,8 @@ impl reflector { fn c_size_and_align(t: ty::t) -> ~[ValueRef] { let tr = type_of::type_of(self.bcx.ccx(), t); - let s = shape::llsize_of_real(self.bcx.ccx(), tr); - let a = shape::llalign_of_min(self.bcx.ccx(), tr); + let s = machine::llsize_of_real(self.bcx.ccx(), tr); + let a = machine::llalign_of_min(self.bcx.ccx(), tr); return ~[self.c_uint(s), self.c_uint(a)]; } @@ -310,9 +310,11 @@ impl reflector { } // Emit a sequence of calls to visit_ty::visit_foo -fn emit_calls_to_trait_visit_ty(bcx: block, t: ty::t, - visitor_val: ValueRef, - visitor_trait_id: def_id) -> block { +pub fn emit_calls_to_trait_visit_ty(bcx: block, + t: ty::t, + visitor_val: ValueRef, + visitor_trait_id: def_id) + -> block { use syntax::parse::token::special_idents::tydesc; let final = sub_block(bcx, ~"final"); assert bcx.ccx().tcx.intrinsic_defs.contains_key(tydesc); @@ -330,7 +332,7 @@ fn emit_calls_to_trait_visit_ty(bcx: block, t: ty::t, return final; } -fn ast_proto_constant(proto: ast::Proto) -> uint { +pub fn ast_proto_constant(proto: ast::Proto) -> uint { match proto { ast::ProtoBare => 0u, ast::ProtoUniq => 2u, @@ -338,3 +340,4 @@ fn ast_proto_constant(proto: ast::Proto) -> uint { ast::ProtoBorrowed => 4u, } } + diff --git a/src/librustc/middle/trans/shape.rs b/src/librustc/middle/trans/shape.rs index 3403df6c1cc6..a4cd967115be 100644 --- a/src/librustc/middle/trans/shape.rs +++ b/src/librustc/middle/trans/shape.rs @@ -34,10 +34,13 @@ use syntax::util::interner; use ty_ctxt = middle::ty::ctxt; -type ctxt = {mut next_tag_id: u16, pad: u16, pad2: u32}; +pub type ctxt = {mut next_tag_id: u16, pad: u16, pad2: u32}; -fn mk_global(ccx: @crate_ctxt, name: ~str, llval: ValueRef, internal: bool) -> - ValueRef { +pub fn mk_global(ccx: @crate_ctxt, + name: ~str, + llval: ValueRef, + internal: bool) + -> ValueRef { unsafe { let llglobal = do str::as_c_str(name) |buf| { llvm::LLVMAddGlobal(ccx.llmod, val_ty(llval), buf) @@ -54,7 +57,7 @@ fn mk_global(ccx: @crate_ctxt, name: ~str, llval: ValueRef, internal: bool) -> } } -fn mk_ctxt(llmod: ModuleRef) -> ctxt { +pub fn mk_ctxt(llmod: ModuleRef) -> ctxt { unsafe { let llshapetablesty = trans::common::T_named_struct(~"shapes"); let _llshapetables = str::as_c_str(~"shapes", |buf| { @@ -69,11 +72,11 @@ fn mk_ctxt(llmod: ModuleRef) -> ctxt { Although these two functions are never called, they are here for a VERY GOOD REASON. See #3670 */ -fn add_u16(dest: &mut ~[u8], val: u16) { +pub fn add_u16(dest: &mut ~[u8], val: u16) { *dest += ~[(val & 0xffu16) as u8, (val >> 8u16) as u8]; } -fn add_substr(dest: &mut ~[u8], src: ~[u8]) { +pub fn add_substr(dest: &mut ~[u8], src: ~[u8]) { add_u16(&mut *dest, vec::len(src) as u16); *dest += src; } diff --git a/src/librustc/middle/trans/tvec.rs b/src/librustc/middle/trans/tvec.rs index 54e6d25718b7..65d16effed39 100644 --- a/src/librustc/middle/trans/tvec.rs +++ b/src/librustc/middle/trans/tvec.rs @@ -31,7 +31,7 @@ use syntax::print::pprust::{expr_to_str}; // containing an unboxed vector. This expands a boxed vector type into such an // expanded type. It doesn't respect mutability, but that doesn't matter at // this point. -fn expand_boxed_vec_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { +pub fn expand_boxed_vec_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { let unit_ty = ty::sequence_element_type(tcx, t); let unboxed_vec_ty = ty::mk_mut_unboxed_vec(tcx, unit_ty); match ty::get(t).sty { @@ -46,35 +46,35 @@ fn expand_boxed_vec_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { } } -fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef { +pub fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::get_fill"); Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_fill])) } -fn set_fill(bcx: block, vptr: ValueRef, fill: ValueRef) { +pub fn set_fill(bcx: block, vptr: ValueRef, fill: ValueRef) { Store(bcx, fill, GEPi(bcx, vptr, [0u, abi::vec_elt_fill])); } -fn get_alloc(bcx: block, vptr: ValueRef) -> ValueRef { +pub fn get_alloc(bcx: block, vptr: ValueRef) -> ValueRef { Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_alloc])) } -fn get_bodyptr(bcx: block, vptr: ValueRef) -> ValueRef { +pub fn get_bodyptr(bcx: block, vptr: ValueRef) -> ValueRef { base::non_gc_box_cast(bcx, GEPi(bcx, vptr, [0u, abi::box_field_body])) } -fn get_dataptr(bcx: block, vptr: ValueRef) -> ValueRef { +pub fn get_dataptr(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::get_dataptr"); GEPi(bcx, vptr, [0u, abi::vec_elt_elems, 0u]) } -fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef { +pub fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::pointer_add"); let old_ty = val_ty(ptr); let bptr = PointerCast(bcx, ptr, T_ptr(T_i8())); return PointerCast(bcx, InBoundsGEP(bcx, bptr, ~[bytes]), old_ty); } -fn alloc_raw(bcx: block, unit_ty: ty::t, - fill: ValueRef, alloc: ValueRef, heap: heap) -> Result { +pub fn alloc_raw(bcx: block, unit_ty: ty::t, + fill: ValueRef, alloc: ValueRef, heap: heap) -> Result { let _icx = bcx.insn_ctxt("tvec::alloc_uniq"); let ccx = bcx.ccx(); @@ -87,12 +87,16 @@ fn alloc_raw(bcx: block, unit_ty: ty::t, Store(bcx, alloc, GEPi(bcx, body, [0u, abi::vec_elt_alloc])); return rslt(bcx, box); } -fn alloc_uniq_raw(bcx: block, unit_ty: ty::t, - fill: ValueRef, alloc: ValueRef) -> Result { +pub fn alloc_uniq_raw(bcx: block, unit_ty: ty::t, + fill: ValueRef, alloc: ValueRef) -> Result { alloc_raw(bcx, unit_ty, fill, alloc, heap_exchange) } -fn alloc_vec(bcx: block, unit_ty: ty::t, elts: uint, heap: heap) -> Result { +pub fn alloc_vec(bcx: block, + unit_ty: ty::t, + elts: uint, + heap: heap) + -> Result { let _icx = bcx.insn_ctxt("tvec::alloc_uniq"); let ccx = bcx.ccx(); let llunitty = type_of::type_of(ccx, unit_ty); @@ -106,7 +110,7 @@ fn alloc_vec(bcx: block, unit_ty: ty::t, elts: uint, heap: heap) -> Result { return rslt(bcx, vptr); } -fn duplicate_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> Result { +pub fn duplicate_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> Result { let _icx = bcx.insn_ctxt("tvec::duplicate_uniq"); let fill = get_fill(bcx, get_bodyptr(bcx, vptr)); @@ -123,7 +127,7 @@ fn duplicate_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> Result { return rslt(bcx, newptr); } -fn make_drop_glue_unboxed(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> +pub fn make_drop_glue_unboxed(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> block { let _icx = bcx.insn_ctxt("tvec::make_drop_glue_unboxed"); let tcx = bcx.tcx(), unit_ty = ty::sequence_element_type(tcx, vec_ty); @@ -132,14 +136,14 @@ fn make_drop_glue_unboxed(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> } else { bcx } } -struct VecTypes { +pub struct VecTypes { vec_ty: ty::t, unit_ty: ty::t, llunit_ty: TypeRef, llunit_size: ValueRef } -impl VecTypes { +pub impl VecTypes { fn to_str(ccx: @crate_ctxt) -> ~str { fmt!("VecTypes {vec_ty=%s, unit_ty=%s, llunit_ty=%s, llunit_size=%s}", ty_to_str(ccx.tcx, self.vec_ty), @@ -149,11 +153,11 @@ impl VecTypes { } } -fn trans_fixed_vstore(bcx: block, - vstore_expr: @ast::expr, - content_expr: @ast::expr, - dest: expr::Dest) -> block -{ +pub fn trans_fixed_vstore(bcx: block, + vstore_expr: @ast::expr, + content_expr: @ast::expr, + dest: expr::Dest) + -> block { //! // // [...] allocates a fixed-size array and moves it around "by value". @@ -178,11 +182,11 @@ fn trans_fixed_vstore(bcx: block, }; } -fn trans_slice_vstore(bcx: block, - vstore_expr: @ast::expr, - content_expr: @ast::expr, - dest: expr::Dest) -> block -{ +pub fn trans_slice_vstore(bcx: block, + vstore_expr: @ast::expr, + content_expr: @ast::expr, + dest: expr::Dest) + -> block { //! // // &[...] allocates memory on the stack and writes the values into it, @@ -237,11 +241,11 @@ fn trans_slice_vstore(bcx: block, return bcx; } -fn trans_lit_str(bcx: block, - lit_expr: @ast::expr, - lit_str: @~str, - dest: Dest) -> block -{ +pub fn trans_lit_str(bcx: block, + lit_expr: @ast::expr, + lit_str: @~str, + dest: Dest) + -> block { //! // // Literal strings translate to slices into static memory. This is @@ -275,10 +279,10 @@ fn trans_lit_str(bcx: block, } -fn trans_uniq_or_managed_vstore(bcx: block, - heap: heap, - vstore_expr: @ast::expr, - content_expr: @ast::expr) -> DatumBlock { +pub fn trans_uniq_or_managed_vstore(bcx: block, + heap: heap, + vstore_expr: @ast::expr, + content_expr: @ast::expr) -> DatumBlock { //! // // @[...] or ~[...] (also @"..." or ~"...") allocate boxes in the @@ -334,12 +338,12 @@ fn trans_uniq_or_managed_vstore(bcx: block, return immediate_rvalue_bcx(bcx, val, vt.vec_ty); } -fn write_content(bcx: block, - vt: &VecTypes, - vstore_expr: @ast::expr, - content_expr: @ast::expr, - dest: Dest) -> block -{ +pub fn write_content(bcx: block, + vt: &VecTypes, + vstore_expr: @ast::expr, + content_expr: @ast::expr, + dest: Dest) + -> block { let _icx = bcx.insn_ctxt("tvec::write_content"); let mut bcx = bcx; @@ -436,12 +440,12 @@ fn write_content(bcx: block, } } -fn vec_types_from_expr(bcx: block, vec_expr: @ast::expr) -> VecTypes { +pub fn vec_types_from_expr(bcx: block, vec_expr: @ast::expr) -> VecTypes { let vec_ty = node_id_type(bcx, vec_expr.id); vec_types(bcx, vec_ty) } -fn vec_types(bcx: block, vec_ty: ty::t) -> VecTypes { +pub fn vec_types(bcx: block, vec_ty: ty::t) -> VecTypes { let ccx = bcx.ccx(); let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); let llunit_ty = type_of::type_of(ccx, unit_ty); @@ -453,7 +457,7 @@ fn vec_types(bcx: block, vec_ty: ty::t) -> VecTypes { llunit_size: llunit_size} } -fn elements_required(bcx: block, content_expr: @ast::expr) -> uint { +pub fn elements_required(bcx: block, content_expr: @ast::expr) -> uint { //! Figure out the number of elements we need to store this content match /*bad*/copy content_expr.node { @@ -469,9 +473,9 @@ fn elements_required(bcx: block, content_expr: @ast::expr) -> uint { } } -fn get_base_and_len(bcx: block, - llval: ValueRef, - vec_ty: ty::t) -> (ValueRef, ValueRef) { +pub fn get_base_and_len(bcx: block, + llval: ValueRef, + vec_ty: ty::t) -> (ValueRef, ValueRef) { //! // // Converts a vector into the slice pair. The vector should be stored in @@ -507,12 +511,12 @@ fn get_base_and_len(bcx: block, } } -type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> Result; +pub type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> Result; -type iter_vec_block = fn(block, ValueRef, ty::t) -> block; +pub type iter_vec_block = fn(block, ValueRef, ty::t) -> block; -fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, - fill: ValueRef, f: iter_vec_block) -> block { +pub fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, + fill: ValueRef, f: iter_vec_block) -> block { let _icx = bcx.insn_ctxt("tvec::iter_vec_raw"); let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); @@ -542,15 +546,15 @@ fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, } -fn iter_vec_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t, - fill: ValueRef, f: iter_vec_block) -> block { +pub fn iter_vec_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t, + fill: ValueRef, f: iter_vec_block) -> block { let _icx = bcx.insn_ctxt("tvec::iter_vec_uniq"); let data_ptr = get_dataptr(bcx, get_bodyptr(bcx, vptr)); iter_vec_raw(bcx, data_ptr, vec_ty, fill, f) } -fn iter_vec_unboxed(bcx: block, body_ptr: ValueRef, vec_ty: ty::t, - f: iter_vec_block) -> block { +pub fn iter_vec_unboxed(bcx: block, body_ptr: ValueRef, vec_ty: ty::t, + f: iter_vec_block) -> block { let _icx = bcx.insn_ctxt("tvec::iter_vec_unboxed"); let fill = get_fill(bcx, body_ptr); let dataptr = get_dataptr(bcx, body_ptr); diff --git a/src/librustc/middle/trans/type_of.rs b/src/librustc/middle/trans/type_of.rs index 6a62e622526d..a4b7dc323e8f 100644 --- a/src/librustc/middle/trans/type_of.rs +++ b/src/librustc/middle/trans/type_of.rs @@ -14,6 +14,7 @@ use lib::llvm::{TypeRef}; use middle::trans::common::*; use middle::trans::common; use middle::trans::expr; +use middle::trans::machine; use util::ppaux; use std::map::HashMap; @@ -29,7 +30,7 @@ export type_of_glue_fn; export type_of_non_gc_box; export type_of_rooted; -fn type_of_explicit_arg(ccx: @crate_ctxt, arg: ty::arg) -> TypeRef { +pub fn type_of_explicit_arg(ccx: @crate_ctxt, arg: ty::arg) -> TypeRef { let llty = type_of(ccx, arg.ty); match ty::resolved_mode(ccx.tcx, arg.mode) { ast::by_val => llty, @@ -44,12 +45,13 @@ fn type_of_explicit_arg(ccx: @crate_ctxt, arg: ty::arg) -> TypeRef { } } -fn type_of_explicit_args(ccx: @crate_ctxt, inputs: ~[ty::arg]) -> ~[TypeRef] { +pub fn type_of_explicit_args(ccx: @crate_ctxt, inputs: ~[ty::arg]) + -> ~[TypeRef] { inputs.map(|arg| type_of_explicit_arg(ccx, *arg)) } -fn type_of_fn(cx: @crate_ctxt, inputs: ~[ty::arg], - output: ty::t) -> TypeRef { +pub fn type_of_fn(cx: @crate_ctxt, inputs: ~[ty::arg], + output: ty::t) -> TypeRef { unsafe { let mut atys: ~[TypeRef] = ~[]; @@ -66,11 +68,11 @@ fn type_of_fn(cx: @crate_ctxt, inputs: ~[ty::arg], } // Given a function type and a count of ty params, construct an llvm type -fn type_of_fn_from_ty(cx: @crate_ctxt, fty: ty::t) -> TypeRef { +pub fn type_of_fn_from_ty(cx: @crate_ctxt, fty: ty::t) -> TypeRef { type_of_fn(cx, ty::ty_fn_args(fty), ty::ty_fn_ret(fty)) } -fn type_of_non_gc_box(cx: @crate_ctxt, t: ty::t) -> TypeRef { +pub fn type_of_non_gc_box(cx: @crate_ctxt, t: ty::t) -> TypeRef { assert !ty::type_needs_infer(t); let t_norm = ty::normalize_ty(cx.tcx, t); @@ -91,7 +93,7 @@ fn type_of_non_gc_box(cx: @crate_ctxt, t: ty::t) -> TypeRef { } } -fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { +pub fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { debug!("type_of %?: %?", t, ty::get(t)); // Check the cache. @@ -234,14 +236,14 @@ fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { return llty; } -fn fill_type_of_enum(cx: @crate_ctxt, did: ast::def_id, t: ty::t, - llty: TypeRef) { +pub fn fill_type_of_enum(cx: @crate_ctxt, did: ast::def_id, t: ty::t, + llty: TypeRef) { debug!("type_of_enum %?: %?", t, ty::get(t)); let lltys = { let degen = ty::enum_is_univariant(cx.tcx, did); - let size = shape::static_size_of_enum(cx, t); + let size = machine::static_size_of_enum(cx, t); if !degen { ~[T_enum_discrim(cx), T_array(T_i8(), size)] } @@ -257,13 +259,12 @@ fn fill_type_of_enum(cx: @crate_ctxt, did: ast::def_id, t: ty::t, } // Want refinements! (Or case classes, I guess -enum named_ty { a_struct, an_enum } +pub enum named_ty { a_struct, an_enum } -fn llvm_type_name(cx: @crate_ctxt, - what: named_ty, - did: ast::def_id, - tps: ~[ty::t] - ) -> ~str { +pub fn llvm_type_name(cx: @crate_ctxt, + what: named_ty, + did: ast::def_id, + tps: ~[ty::t]) -> ~str { let name = match what { a_struct => { "~struct" } an_enum => { "~enum" } @@ -280,7 +281,7 @@ fn llvm_type_name(cx: @crate_ctxt, ); } -fn type_of_dtor(ccx: @crate_ctxt, self_ty: ty::t) -> TypeRef { +pub fn type_of_dtor(ccx: @crate_ctxt, self_ty: ty::t) -> TypeRef { unsafe { T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), // output pointer T_ptr(type_of(ccx, self_ty))], // self arg @@ -288,14 +289,14 @@ fn type_of_dtor(ccx: @crate_ctxt, self_ty: ty::t) -> TypeRef { } } -fn type_of_rooted(ccx: @crate_ctxt, t: ty::t) -> TypeRef { +pub fn type_of_rooted(ccx: @crate_ctxt, t: ty::t) -> TypeRef { let addrspace = base::get_tydesc(ccx, t).addrspace; debug!("type_of_rooted %s in addrspace %u", ty_to_str(ccx.tcx, t), addrspace as uint); return T_root(type_of(ccx, t), addrspace); } -fn type_of_glue_fn(ccx: @crate_ctxt, t: ty::t) -> TypeRef { +pub fn type_of_glue_fn(ccx: @crate_ctxt, t: ty::t) -> TypeRef { let tydescpp = T_ptr(T_ptr(ccx.tydesc_type)); let llty = T_ptr(type_of(ccx, t)); return T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, llty], diff --git a/src/librustc/middle/trans/type_use.rs b/src/librustc/middle/trans/type_use.rs index b2e6ab05e228..c19db4a75b9d 100644 --- a/src/librustc/middle/trans/type_use.rs +++ b/src/librustc/middle/trans/type_use.rs @@ -44,15 +44,14 @@ use syntax::ast_map; use syntax::ast_util; use syntax::visit; -type type_uses = uint; // Bitmask -const use_repr: uint = 1u; /* Dependency on size/alignment/mode and - take/drop glue */ -const use_tydesc: uint = 2u; /* Takes the tydesc, or compares */ +pub type type_uses = uint; // Bitmask +pub const use_repr: uint = 1u; /* Dependency on size/alignment/mode and + take/drop glue */ +pub const use_tydesc: uint = 2u; /* Takes the tydesc, or compares */ -type ctx = {ccx: @crate_ctxt, - uses: ~[mut type_uses]}; +pub type ctx = {ccx: @crate_ctxt, uses: ~[mut type_uses]}; -fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) +pub fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) -> ~[type_uses] { match ccx.type_use_cache.find(fn_id) { Some(uses) => return uses, @@ -175,7 +174,7 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) uses } -fn type_needs(cx: ctx, use_: uint, ty: ty::t) { +pub fn type_needs(cx: ctx, use_: uint, ty: ty::t) { // Optimization -- don't descend type if all params already have this use for vec::each_mut(cx.uses) |u| { if *u & use_ != use_ { @@ -185,8 +184,10 @@ fn type_needs(cx: ctx, use_: uint, ty: ty::t) { } } -fn type_needs_inner(cx: ctx, use_: uint, ty: ty::t, - enums_seen: @List) { +pub fn type_needs_inner(cx: ctx, + use_: uint, + ty: ty::t, + enums_seen: @List) { do ty::maybe_walk_ty(ty) |ty| { if ty::type_has_params(ty) { match ty::get(ty).sty { @@ -220,11 +221,11 @@ fn type_needs_inner(cx: ctx, use_: uint, ty: ty::t, } } -fn node_type_needs(cx: ctx, use_: uint, id: node_id) { +pub fn node_type_needs(cx: ctx, use_: uint, id: node_id) { type_needs(cx, use_, ty::node_id_to_type(cx.ccx.tcx, id)); } -fn mark_for_method_call(cx: ctx, e_id: node_id, callee_id: node_id) { +pub fn mark_for_method_call(cx: ctx, e_id: node_id, callee_id: node_id) { do option::iter(&cx.ccx.maps.method_map.find(e_id)) |mth| { match mth.origin { typeck::method_static(did) => { @@ -247,7 +248,7 @@ fn mark_for_method_call(cx: ctx, e_id: node_id, callee_id: node_id) { } } -fn mark_for_expr(cx: ctx, e: @expr) { +pub fn mark_for_expr(cx: ctx, e: @expr) { match e.node { expr_vstore(_, _) | expr_vec(_, _) | @@ -347,7 +348,7 @@ fn mark_for_expr(cx: ctx, e: @expr) { } } -fn handle_body(cx: ctx, body: blk) { +pub fn handle_body(cx: ctx, body: blk) { let v = visit::mk_vt(@visit::Visitor { visit_expr: |e, cx, v| { visit::visit_expr(e, cx, v); @@ -372,3 +373,4 @@ fn handle_body(cx: ctx, body: blk) { }); (v.visit_block)(body, cx, v); } + diff --git a/src/librustc/middle/trans/uniq.rs b/src/librustc/middle/trans/uniq.rs index 58853224891d..194a9c4ea09b 100644 --- a/src/librustc/middle/trans/uniq.rs +++ b/src/librustc/middle/trans/uniq.rs @@ -20,9 +20,7 @@ use middle::trans::glue; use syntax::ast; -export make_free_glue, autoderef, duplicate; - -fn make_free_glue(bcx: block, vptrptr: ValueRef, box_ty: ty::t) +pub fn make_free_glue(bcx: block, vptrptr: ValueRef, box_ty: ty::t) -> block { let _icx = bcx.insn_ctxt("uniq::make_free_glue"); let box_datum = immediate_rvalue(Load(bcx, vptrptr), box_ty); @@ -36,7 +34,7 @@ fn make_free_glue(bcx: block, vptrptr: ValueRef, box_ty: ty::t) } } -fn duplicate(bcx: block, src_box: ValueRef, src_ty: ty::t) -> Result { +pub fn duplicate(bcx: block, src_box: ValueRef, src_ty: ty::t) -> Result { let _icx = bcx.insn_ctxt("uniq::duplicate"); // Load the body of the source (*src) diff --git a/src/librustc/rustc.rc b/src/librustc/rustc.rc index c61a84c9082b..676f4d0238fd 100644 --- a/src/librustc/rustc.rc +++ b/src/librustc/rustc.rc @@ -57,41 +57,23 @@ pub mod middle { pub mod datum; pub mod callee; pub mod expr; - #[legacy_exports] pub mod common; - #[legacy_exports] pub mod consts; - #[legacy_exports] pub mod type_of; - #[legacy_exports] pub mod build; - #[legacy_exports] pub mod base; - #[legacy_exports] pub mod _match; - #[legacy_exports] pub mod uniq; - #[legacy_exports] pub mod closure; - #[legacy_exports] pub mod tvec; - #[legacy_exports] pub mod meth; - #[legacy_exports] pub mod cabi; - #[legacy_exports] pub mod cabi_x86_64; - #[legacy_exports] pub mod foreign; - #[legacy_exports] pub mod reflect; - #[legacy_exports] pub mod shape; - #[legacy_exports] pub mod debuginfo; - #[legacy_exports] pub mod type_use; - #[legacy_exports] pub mod reachable; pub mod machine; }