diff --git a/src/comp/middle/shape.rs b/src/comp/middle/shape.rs new file mode 100644 index 000000000000..c9abfac24487 --- /dev/null +++ b/src/comp/middle/shape.rs @@ -0,0 +1,538 @@ +// A "shape" is a compact encoding of a type that is used by interpreted glue. +// This substitutes for the runtime tags used by e.g. MLs. + +import lib::llvm::True; +import lib::llvm::llvm::ModuleRef; +import lib::llvm::llvm::TypeRef; +import lib::llvm::llvm::ValueRef; +import middle::trans; +import middle::trans_common::crate_ctxt; +import middle::trans::llsize_of; +import middle::trans_common::val_ty; +import middle::trans_common; +import middle::trans_common::C_bytes; +import middle::trans_common::C_int; +import middle::trans_common::C_named_struct; +import middle::trans_common::C_struct; +import middle::trans_common::C_uint; +import middle::trans_common::T_i8; +import middle::trans_common::T_ptr; +import middle::ty; +import middle::ty::field; +import middle::ty::mt; +import syntax::ast; +import syntax::codemap::span; +import syntax::util::interner; +import util::common; + +import std::ivec; +import std::map::hashmap; +import std::option::none; +import std::option::some; +import std::str; + +import ty_ctxt = middle::ty::ctxt; + +type res_info = { did: ast::def_id, t: ty::t }; + +type ctxt = { + mutable next_tag_id: u16, + pad: u16, + tag_id_to_index: hashmap[ast::def_id,u16], + mutable tag_order: ast::def_id[], + resources: interner::interner[res_info], + llshapetablesty: TypeRef, + llshapetables: ValueRef +}; + +const shape_u8 : u8 = 0u8; +const shape_u16 : u8 = 1u8; +const shape_u32 : u8 = 2u8; +const shape_u64 : u8 = 3u8; +const shape_i8 : u8 = 4u8; +const shape_i16 : u8 = 5u8; +const shape_i32 : u8 = 6u8; +const shape_i64 : u8 = 7u8; +const shape_f32 : u8 = 8u8; +const shape_f64 : u8 = 9u8; +const shape_evec : u8 = 10u8; +const shape_ivec : u8 = 11u8; +const shape_tag : u8 = 12u8; +const shape_box : u8 = 13u8; +const shape_port : u8 = 14u8; +const shape_chan : u8 = 15u8; +const shape_task : u8 = 16u8; +const shape_struct : u8 = 17u8; +const shape_fn : u8 = 18u8; +const shape_obj : u8 = 19u8; +const shape_res : u8 = 20u8; +const shape_var : u8 = 21u8; + +// FIXME: This is a bad API in trans_common. +fn C_u8(n : u8) -> ValueRef { ret trans_common::C_u8(n as uint); } + +fn fake_span() -> span { ret { lo: 0u, hi: 0u }; } + +fn hash_res_info(ri : &res_info) -> uint { + let h = 5381u; + h *= 33u; h += (ri.did.crate as uint); + h *= 33u; h += (ri.did.node as uint); + h *= 33u; h += (ri.t as uint); + ret h; +} + +fn eq_res_info(a : &res_info, b : &res_info) -> bool { + ret a.did.crate == b.did.crate && a.did.node == b.did.node && a.t == b.t; +} + +fn mk_global(ccx : &@crate_ctxt, name : &str, llval : ValueRef) -> ValueRef { + let llglobal = lib::llvm::llvm::LLVMAddGlobal(ccx.llmod, val_ty(llval), + str::buf(name)); + lib::llvm::llvm::LLVMSetInitializer(llglobal, llval); + lib::llvm::llvm::LLVMSetGlobalConstant(llglobal, True); + lib::llvm::llvm::LLVMSetLinkage(llglobal, lib::llvm::LLVMInternalLinkage + as lib::llvm::llvm::Linkage); + ret llglobal; +} + + +// Computes a set of variants of a tag that are guaranteed to have size and +// alignment at least as large as any other variant of the tag. This is an +// important performance optimization. +// +// TODO: Use this in dynamic_size_of() as well. + +fn largest_variants(ccx : &@crate_ctxt, tag_id : &ast::def_id) -> uint[] { + // Compute the minimum and maximum size and alignment for each variant. + // + // TODO: We could do better here; e.g. we know that any variant that + // contains (T,T) must be as least as large as any variant that contains + // just T. + let ranges = ~[]; + let variants = ty::tag_variants(ccx.tcx, tag_id); + for variant : ty::variant_info in variants { + let bounded = true; + let { a: min_size, b: min_align } = { a: 0u, b: 0u }; + for elem_t : ty::t in variant.args { + if ty::type_contains_params(ccx.tcx, elem_t) { + // TODO: We could do better here; this causes us to + // conservatively assume that (int, T) has minimum size 0, + // when in fact it has minimum size sizeof(int). + bounded = false; + } else { + let llty = trans::type_of(ccx, fake_span(), elem_t); + min_size += trans::llsize_of_real(ccx, llty); + min_align += trans::llalign_of_real(ccx, llty); + } + } + + ranges += ~[{ size: { min: min_size, bounded: bounded }, + align: { min: min_align, bounded: bounded } }]; + } + + // Initialize the candidate set to contain all variants. + let candidates = ~[mutable]; + for variant in variants { candidates += ~[mutable true]; } + + // Do a pairwise comparison among all variants still in the candidate set. + // Throw out any variant that we know has size and alignment at least as + // small as some other variant. + let i = 0u; + while i < ivec::len(ranges) - 1u { + if candidates.(i) { + let j = i + 1u; + while (j < ivec::len(ranges)) { + if candidates.(j) { + if ranges.(i).size.bounded && ranges.(i).align.bounded && + ranges.(j).size.bounded && + ranges.(j).align.bounded { + if ranges.(i).size >= ranges.(j).size && + ranges.(i).align >= ranges.(j).align { + // Throw out j. + candidates.(j) = false; + } else if ranges.(j).size >= ranges.(i).size && + ranges.(j).align >= ranges.(j).align { + // Throw out i. + candidates.(i) = false; + } + } + } + j += 1u; + } + } + i += 1u; + } + + // Return the resulting set. + let result = ~[]; + i = 0u; + while i < ivec::len(candidates) { + if candidates.(i) { result += ~[i]; } + i += 1u; + } + ret result; +} + +// Computes the static size of a tag, without using mk_imm_tup(), which is +// bad for performance. +// +// TODO: Migrate trans over to use this. + +fn round_up(size : u16, align : u8) -> u16 { + assert align >= 1u8; + let alignment = align as u16; + ret ((size-1u16) + alignment) & !(alignment-1u16); +} + +type size_align = { size: u16, align: u8 }; + +fn compute_static_tag_size(ccx : &@crate_ctxt, largest_variants : &uint[], + did : &ast::def_id) -> size_align { + let max_size = 0u16; let max_align = 1u8; + let variants = ty::tag_variants(ccx.tcx, did); + for vid : uint in largest_variants { + // We increment a "virtual data pointer" to compute the size. + let lltys = ~[]; + for typ : ty::t in variants.(vid).args { + lltys += ~[trans::type_of(ccx, fake_span(), typ)]; + } + + let llty = trans_common::T_struct(lltys); + let dp = trans::llsize_of_real(ccx, llty) as u16; + let variant_align = trans::llalign_of_real(ccx, llty) as u8; + + if max_size < dp { max_size = dp; } + if max_align < variant_align { max_align = variant_align; } + } + + // Add space for the tag if applicable. + // FIXME (issue #792): This is wrong. If the tag starts with an 8 byte + // aligned quantity, we don't align it. + if ivec::len(variants) > 1u { + max_size += 4u16; + max_align = 4u8; + } + + ret { size: max_size, align: max_align }; +} + +tag tag_kind { + tk_unit; + tk_enum; + tk_complex; +} + +fn tag_kind(ccx : &@crate_ctxt, did : &ast::def_id) -> tag_kind { + let variants = ty::tag_variants(ccx.tcx, did); + if ivec::len(variants) == 0u { ret tk_complex; } + for v : ty::variant_info in variants { + if ivec::len(v.args) > 0u { ret tk_complex; } + } + if ivec::len(variants) == 1u { ret tk_unit; } + ret tk_enum; +} + + +// Returns the code corresponding to the pointer size on this architecture. +fn s_int(tcx : &ty_ctxt) -> u8 { + ret shape_i32; // TODO: x86-64 +} + +fn s_uint(tcx : &ty_ctxt) -> u8 { + ret shape_u32; // TODO: x86-64 +} + +fn s_float(tcx : &ty_ctxt) -> u8 { + ret shape_f32; // TODO: x86-64 +} + +fn mk_ctxt(llmod : ModuleRef) -> ctxt { + let llshapetablesty = trans_common::T_named_struct("shapes"); + let llshapetables = + lib::llvm::llvm::LLVMAddGlobal(llmod, llshapetablesty, + str::buf("shapes")); + + ret { + mutable next_tag_id: 0u16, + pad: 0u16, + tag_id_to_index: common::new_def_hash(), + mutable tag_order: ~[], + resources: interner::mk(hash_res_info, eq_res_info), + llshapetablesty: llshapetablesty, + llshapetables: llshapetables + }; +} + +fn add_bool(dest : &mutable u8[], val : bool) { + dest += ~[if val { 1u8 } else { 0u8 }]; +} + +fn add_u16(dest : &mutable u8[], val : u16) { + dest += ~[(val & 0xffu16) as u8, (val >> 8u16) as u8]; +} + +fn add_substr(dest : &mutable u8[], src : &u8[]) { + add_u16(dest, ivec::len(src) as u16); + dest += src; +} + +fn shape_of(ccx : &@crate_ctxt, t : ty::t) -> u8[] { + let s = ~[]; + + alt ty::struct(ccx.tcx, t) { + ty::ty_nil. | ty::ty_bool. | ty::ty_machine(ast::ty_u8.) { + s += ~[shape_u8]; + } + + ty::ty_bot. { fail "bot ty in shape_of"; } + + ty::ty_int. { s += ~[s_int(ccx.tcx)]; } + ty::ty_float. { s += ~[s_float(ccx.tcx)]; } + + ty::ty_uint. | ty::ty_ptr(_) | ty::ty_type. | ty::ty_native(_) { + s += ~[s_uint(ccx.tcx)]; + } + + ty::ty_machine(ast::ty_i8.) { s += ~[shape_i8]; } + ty::ty_machine(ast::ty_u16.) { s += ~[shape_u16]; } + ty::ty_machine(ast::ty_i16.) { s += ~[shape_i16]; } + ty::ty_machine(ast::ty_u32.) | ty::ty_char. { s += ~[shape_u32]; } + ty::ty_machine(ast::ty_i32.) { s += ~[shape_i32]; } + + ty::ty_str. { s += ~[shape_evec, 1u8, 1u8, 0u8, shape_u8]; } + ty::ty_istr. { s += ~[shape_ivec, 1u8, 1u8, 0u8, shape_u8]; } + + ty::ty_tag(did, tps) { + alt tag_kind(ccx, did) { + tk_unit. { + // FIXME: For now we do this. + s += ~[shape_u32]; + } + tk_enum. { s += ~[shape_u32]; } + tk_complex. { + s += ~[shape_tag]; + + let sub = ~[]; + + let id; + alt ccx.shape_cx.tag_id_to_index.find(did) { + none. { + id = ccx.shape_cx.next_tag_id; + ccx.shape_cx.tag_id_to_index.insert(did, id); + ccx.shape_cx.tag_order += ~[did]; + ccx.shape_cx.next_tag_id += 1u16; + } + some(existing_id) { id = existing_id; } + } + add_u16(sub, id as u16); + + add_u16(sub, ivec::len(tps) as u16); + for tp : ty::t in tps { + let subshape = shape_of(ccx, tp); + add_u16(sub, ivec::len(subshape) as u16); + sub += subshape; + } + + s += sub; + } + } + } + + ty::ty_box(mt) { + s += ~[shape_box]; + add_substr(s, shape_of(ccx, mt.ty)); + } + ty::ty_vec(mt) { + s += ~[shape_evec]; + add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty)); + add_substr(s, shape_of(ccx, mt.ty)); + } + ty::ty_ivec(mt) { + s += ~[shape_ivec]; + add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty)); + add_size_hint(ccx, s, mt.ty); + add_substr(s, shape_of(ccx, mt.ty)); + } + ty::ty_port(t) { + s += ~[shape_port]; + add_substr(s, shape_of(ccx, t)); + } + ty::ty_chan(t) { s += ~[shape_chan]; } + ty::ty_task. { s += ~[shape_task]; } + + ty::ty_rec(fields) { + s += ~[shape_struct]; + let sub = ~[]; + for f : field in fields { sub += shape_of(ccx, f.mt.ty); } + add_substr(s, sub); + } + + ty::ty_fn(_,_,_,_,_) { s += ~[shape_fn]; } + ty::ty_native_fn(_,_,_) { s += ~[shape_u32]; } + ty::ty_obj(_) { s += ~[shape_obj]; } + + ty::ty_res(did, raw_subt, tps) { + let subt = ty::substitute_type_params(ccx.tcx, tps, raw_subt); + let ri = { did: did, t: subt }; + let id = interner::intern(ccx.shape_cx.resources, ri); + + s += ~[shape_res]; + add_u16(s, id as u16); + add_u16(s, ivec::len(tps) as u16); + + let sub = ~[]; + for tp : ty::t in tps { add_substr(s, sub); } + add_substr(s, sub); + + add_substr(s, shape_of(ccx, subt)); + + } + + ty::ty_var(n) { fail "shape_of ty_var"; } + ty::ty_param(n,_) { s += ~[shape_var, n as u8]; } + } + + ret s; +} + +fn add_size_hint(ccx : &@crate_ctxt, s : &mutable u8[], typ : ty::t) { + if (ty::type_has_dynamic_size(ccx.tcx, typ)) { + s += ~[ 0u8, 0u8, 0u8 ]; + ret; + } + + let llty = trans::type_of(ccx, fake_span(), typ); + add_u16(s, trans::llsize_of_real(ccx, llty) as u16); + s += ~[ trans::llalign_of_real(ccx, llty) as u8 ]; +} + +// FIXME: We might discover other variants as we traverse these. Handle this. +fn shape_of_variant(ccx : &@crate_ctxt, v : &ty::variant_info) -> u8[] { + let s = ~[]; + for t : ty::t in v.args { s += shape_of(ccx, t); } + ret s; +} + +fn gen_tag_shapes(ccx : &@crate_ctxt) -> ValueRef { + // Loop over all the tag variants and write their shapes into a data + // buffer. As we do this, it's possible for us to discover new tags, so we + // must do this first. + let i = 0u; + let data = ~[]; let offsets = ~[]; + while (i < ivec::len(ccx.shape_cx.tag_order)) { + let did = ccx.shape_cx.tag_order.(i); + let variants = ty::tag_variants(ccx.tcx, did); + + for v : ty::variant_info in variants { + offsets += ~[ivec::len(data) as u16]; + + let variant_shape = shape_of_variant(ccx, v); + add_substr(data, variant_shape); + } + + i += 1u; + } + + // Now calculate the sizes of the header space (which contains offsets to + // info records for each tag) and the info space (which contains offsets + // to each variant shape). As we do so, build up the header. + + let header = ~[]; let info = ~[]; + let header_sz = 2u16 * ccx.shape_cx.next_tag_id; + let data_sz = ivec::len(data) as u16; + + let info_sz = 0u16; + for did_ : ast::def_id in ccx.shape_cx.tag_order { + let did = did_; // Satisfy alias checker. + let variants = ty::tag_variants(ccx.tcx, did); + add_u16(header, header_sz + info_sz); + info_sz += 2u16 * ((ivec::len(variants) as u16) + 2u16) + 3u16; + } + + // Construct the info tables, which contain offsets to the shape of each + // variant. Also construct the largest-variant table for each tag, which + // contains the variants that the size-of operation needs to look at. + + let lv_table = ~[]; + i = 0u; + for did_ : ast::def_id in ccx.shape_cx.tag_order { + let did = did_; // Satisfy alias checker. + let variants = ty::tag_variants(ccx.tcx, did); + add_u16(info, ivec::len(variants) as u16); + + // Construct the largest-variants table. + add_u16(info, header_sz + info_sz + data_sz + + (ivec::len(lv_table) as u16)); + + let lv = largest_variants(ccx, did); + add_u16(lv_table, ivec::len(lv) as u16); + for v : uint in lv { add_u16(lv_table, v as u16); } + + // Determine whether the tag has dynamic size. + let dynamic = false; + for variant : ty::variant_info in variants { + for typ : ty::t in variant.args { + if ty::type_has_dynamic_size(ccx.tcx, typ) { dynamic = true; } + } + } + + // If we can, write in the static size and alignment of the tag. + // Otherwise, write a placeholder. + let size_align; + if dynamic { + size_align = { size: 0u16, align: 0u8 }; + } else { + size_align = compute_static_tag_size(ccx, lv, did); + } + add_u16(info, size_align.size); + info += ~[size_align.align]; + + // Now write in the offset of each variant. + for v : ty::variant_info in variants { + add_u16(info, header_sz + info_sz + offsets.(i)); + i += 1u; + } + } + + assert (i == ivec::len(offsets)); + assert (header_sz == (ivec::len(header) as u16)); + assert (info_sz == (ivec::len(info) as u16)); + assert (data_sz == (ivec::len(data) as u16)); + + header += info; + header += data; + header += lv_table; + + ret mk_global(ccx, "tag_shapes", C_bytes(header)); +} + +fn gen_resource_shapes(ccx : &@crate_ctxt) -> ValueRef { + let dtors = ~[]; + let i = 0u; + let len = interner::len(ccx.shape_cx.resources); + while i < len { + let ri = interner::get(ccx.shape_cx.resources, i); + dtors += ~[trans_common::get_res_dtor(ccx, fake_span(), ri.did, + ri.t)]; + i += 1u; + } + + ret mk_global(ccx, "resource_shapes", C_struct(dtors)); +} + +fn gen_shape_tables(ccx : &@crate_ctxt) { + let lltagstable = gen_tag_shapes(ccx); + let llresourcestable = gen_resource_shapes(ccx); + trans_common::set_struct_body(ccx.shape_cx.llshapetablesty, + ~[val_ty(lltagstable), + val_ty(llresourcestable)]); + + let lltables = C_named_struct(ccx.shape_cx.llshapetablesty, + ~[lltagstable, llresourcestable]); + lib::llvm::llvm::LLVMSetInitializer(ccx.shape_cx.llshapetables, lltables); + lib::llvm::llvm::LLVMSetGlobalConstant(ccx.shape_cx.llshapetables, True); + lib::llvm::llvm::LLVMSetLinkage(ccx.shape_cx.llshapetables, + lib::llvm::LLVMInternalLinkage as + lib::llvm::llvm::Linkage); +} + diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs index 72535c0d0d32..817e82718ecc 100644 --- a/src/comp/middle/trans.rs +++ b/src/comp/middle/trans.rs @@ -426,6 +426,11 @@ fn llsize_of_real(cx: &@crate_ctxt, t: TypeRef) -> uint { ret llvm::LLVMStoreSizeOfType(cx.td.lltd, t); } +// Returns the real alignment of the given type for the current target. +fn llalign_of_real(cx: &@crate_ctxt, t: TypeRef) -> uint { + ret llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, t); +} + fn llsize_of(t: TypeRef) -> ValueRef { ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMSizeOf(t), T_int(), False); @@ -1424,23 +1429,7 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: &ast::def_id, let val = GEP_tup_like(cx, tup_ty, rs, ~[0, 1]); cx = val.bcx; // Find and call the actual destructor. - let dtor_pair = - if did.crate == ast::local_crate { - alt ccx.fn_pairs.find(did.node) { - some(x) { x } - _ { ccx.tcx.sess.bug("internal error in trans_res_drop") } - } - } else { - let params = - csearch::get_type_param_count(ccx.sess.get_cstore(), did); - let f_t = - type_of_fn(ccx, cx.sp, ast::proto_fn, - ~[{mode: ty::mo_alias(false), ty: inner_t}], - ty::mk_nil(ccx.tcx), params); - get_extern_const(ccx.externs, ccx.llmod, - csearch::get_symbol(ccx.sess.get_cstore(), did), - T_fn_pair(*ccx, f_t)) - }; + let dtor_pair = trans_common::get_res_dtor(ccx, cx.sp, did, inner_t); let dtor_addr = cx.build.Load(cx.build.GEP(dtor_pair, ~[C_int(0), C_int(abi::fn_field_code)])); @@ -8002,7 +7991,8 @@ fn trans_crate(sess: &session::session, crate: &@ast::crate, tcx: &ty::ctxt, upcall::declare_upcalls(tn, tydesc_type, taskptr_type, llmod), rust_object_type: T_rust_object(), tydesc_type: tydesc_type, - task_type: task_type}; + task_type: task_type, + shape_cx: shape::mk_ctxt(llmod)}; let cx = new_local_ctxt(ccx); collect_items(ccx, crate); collect_tag_ctors(ccx, crate); diff --git a/src/comp/middle/trans_common.rs b/src/comp/middle/trans_common.rs index 097be3357fcb..32b25a8a062c 100644 --- a/src/comp/middle/trans_common.rs +++ b/src/comp/middle/trans_common.rs @@ -4,6 +4,7 @@ */ import std::int; +import std::ivec; import std::str; import std::uint; import std::str::rustrt::sbuf; @@ -13,6 +14,7 @@ import std::option; import std::option::some; import std::option::none; import std::fs; +import std::unsafe; import syntax::ast; import driver::session; import middle::ty; @@ -139,7 +141,8 @@ type crate_ctxt = { upcalls: @upcall::upcalls, rust_object_type: TypeRef, tydesc_type: TypeRef, - task_type: TypeRef + task_type: TypeRef, + shape_cx: shape::ctxt }; type local_ctxt = @@ -314,6 +317,25 @@ fn revoke_clean(cx: &@block_ctxt, val: ValueRef) { std::ivec::len(sc_cx.cleanups)); } +fn get_res_dtor(ccx : &@crate_ctxt, sp : &span, did : &ast::def_id, + inner_t : ty::t) -> ValueRef { + if did.crate == ast::local_crate { + alt ccx.fn_pairs.find(did.node) { + some(x) { ret x; } + _ { ccx.tcx.sess.bug("get_res_dtor: can't find resource dtor!"); } + } + } + + let params = csearch::get_type_param_count(ccx.sess.get_cstore(), did); + let f_t = trans::type_of_fn(ccx, sp, ast::proto_fn, + ~[{ mode: ty::mo_alias(false), ty: inner_t }], + ty::mk_nil(ccx.tcx), params); + ret trans::get_extern_const(ccx.externs, ccx.llmod, + csearch::get_symbol(ccx.sess.get_cstore(), + did), + T_fn_pair(*ccx, f_t)); +} + tag block_kind { @@ -846,3 +868,9 @@ fn C_array(ty: TypeRef, elts: &ValueRef[]) -> ValueRef { ret llvm::LLVMConstArray(ty, std::ivec::to_ptr(elts), std::ivec::len(elts)); } + +fn C_bytes(bytes : &u8[]) -> ValueRef { + ret llvm::LLVMConstString(unsafe::reinterpret_cast(ivec::to_ptr(bytes)), + ivec::len(bytes), False); +} + diff --git a/src/comp/middle/ty.rs b/src/comp/middle/ty.rs index 1c14a0be2c09..556c9e36bea4 100644 --- a/src/comp/middle/ty.rs +++ b/src/comp/middle/ty.rs @@ -170,6 +170,7 @@ export type_is_fp; export type_is_integral; export type_is_native; export type_is_nil; +export type_is_pod; export type_is_scalar; export type_is_sequence; export type_is_signed; @@ -1340,6 +1341,50 @@ fn type_owns_heap_mem(cx: &ctxt, ty: &t) -> bool { ret result; } +// Whether a type is Plain Old Data (i.e. can be safely memmoved). +fn type_is_pod(cx : &ctxt, ty : &t) -> bool { + let result = true; + alt struct(cx, ty) { + // Scalar types + ty_nil. | ty_bot. | ty_bool. | ty_int. | ty_float. | ty_uint. | + ty_machine(_) | ty_char. | ty_type. | ty_native(_) | ty_ptr(_) { + result = true; + } + + // Boxed types + ty_str. | ty_istr. | ty_box(_) | ty_vec(_) | ty_ivec(_) | + ty_fn(_,_,_,_,_) | ty_native_fn(_,_,_) | ty_obj(_) | ty_port(_) | + ty_chan(_) | ty_task. { result = false; } + + // Structural types + ty_tag(did, tps) { + let variants = tag_variants(cx, did); + for variant : variant_info in variants { + let tup_ty = mk_imm_tup(cx, variant.args); + + // Perform any type parameter substitutions. + tup_ty = substitute_type_params(cx, tps, tup_ty); + if !type_is_pod(cx, tup_ty) { result = false; } + } + } + ty_rec(flds) { + for f : field in flds { + if !type_is_pod(cx, f.mt.ty) { result = false; } + } + } + ty_res(_, inner, tps) { + result = type_is_pod(cx, + substitute_type_params(cx, tps, inner)); + } + ty_constr(subt, _) { result = type_is_pod(cx, subt); } + + ty_var(_) { fail "ty_var in type_is_pod"; } + ty_param(_,_) { result = false; } + } + + ret result; +} + fn type_param(cx: &ctxt, ty: &t) -> option::t[uint] { alt struct(cx, ty) { ty_param(id,_) { ret some(id); } diff --git a/src/comp/rustc.rc b/src/comp/rustc.rc index ce1cb27eb646..3181d3480718 100644 --- a/src/comp/rustc.rc +++ b/src/comp/rustc.rc @@ -29,6 +29,7 @@ mod middle { mod alias; mod kind; mod freevars; + mod shape; mod tstate { mod ck; diff --git a/src/comp/syntax/util/interner.rs b/src/comp/syntax/util/interner.rs index 9474ddd16d2c..7e7a9ca71073 100644 --- a/src/comp/syntax/util/interner.rs +++ b/src/comp/syntax/util/interner.rs @@ -20,6 +20,7 @@ fn mk[@T](hasher: hashfn[T], eqer: eqfn[T]) -> interner[T] { let m = map::mk_hashmap[T, uint](hasher, eqer); ret {map: m, mutable vect: ~[], hasher: hasher, eqer: eqer}; } + fn intern[@T](itr: &interner[T], val: &T) -> uint { alt itr.map.find(val) { some(idx) { ret idx; } @@ -31,5 +32,8 @@ fn intern[@T](itr: &interner[T], val: &T) -> uint { } } } + fn get[T](itr: &interner[T], idx: uint) -> T { ret itr.vect.(idx); } +fn len[T](itr : &interner[T]) -> uint { ret ivec::len(itr.vect); } +