aboutsummaryrefslogtreecommitdiff
path: root/src/comp
diff options
context:
space:
mode:
authorPatrick Walton <[email protected]>2011-04-22 17:00:46 -0700
committerPatrick Walton <[email protected]>2011-04-22 17:00:46 -0700
commite0eccaddb2bad87993505fffc523a47b68018e64 (patch)
tree9cbc2786de5bf19b959c22c5b4606a3d2809c90e /src/comp
parentrustc: Move the type serialization logic to an Encode module (diff)
downloadrust-e0eccaddb2bad87993505fffc523a47b68018e64.tar.xz
rust-e0eccaddb2bad87993505fffc523a47b68018e64.zip
rustc: Thread the type store through everything that needs to access type structures
Diffstat (limited to 'src/comp')
-rw-r--r--src/comp/front/creader.rs2
-rw-r--r--src/comp/middle/metadata.rs27
-rw-r--r--src/comp/middle/trans.rs354
-rw-r--r--src/comp/middle/ty.rs298
-rw-r--r--src/comp/middle/typeck.rs114
5 files changed, 412 insertions, 383 deletions
diff --git a/src/comp/front/creader.rs b/src/comp/front/creader.rs
index 857ed2d3..99dbf6ad 100644
--- a/src/comp/front/creader.rs
+++ b/src/comp/front/creader.rs
@@ -546,7 +546,7 @@ fn get_tag_variants(session.session sess,
auto item = find_item(did._1, items);
auto ctor_ty = item_type(item, external_crate_id, tystore);
let vec[ty.t] arg_tys = vec();
- alt (ty.struct(ctor_ty)) {
+ alt (ty.struct(tystore, ctor_ty)) {
case (ty.ty_fn(_, ?args, _)) {
for (ty.arg a in args) {
arg_tys += vec(a.ty);
diff --git a/src/comp/middle/metadata.rs b/src/comp/middle/metadata.rs
index 7f6ccc82..bc4bce12 100644
--- a/src/comp/middle/metadata.rs
+++ b/src/comp/middle/metadata.rs
@@ -52,11 +52,12 @@ const uint tag_index_table = 0x15u;
mod Encode {
type ctxt = rec(
- fn(ast.def_id) -> str ds // Callback to translate defs to strs.
+ fn(ast.def_id) -> str ds, // Callback to translate defs to strs.
+ @ty.type_store tystore // The type store.
);
fn ty_str(@ctxt cx, ty.t t) -> str {
- ret sty_str(cx, ty.struct(t));
+ ret sty_str(cx, ty.struct(cx.tystore, t));
}
fn mt_str(@ctxt cx, &ty.mt mt) -> str {
@@ -332,11 +333,11 @@ fn encode_variant_id(&ebml.writer ebml_w, ast.def_id vid) {
ebml.end_tag(ebml_w);
}
-fn encode_type(&ebml.writer ebml_w, ty.t typ) {
+fn encode_type(@trans.crate_ctxt cx, &ebml.writer ebml_w, ty.t typ) {
ebml.start_tag(ebml_w, tag_items_data_item_type);
auto f = def_to_str;
- auto ty_str_ctxt = @rec(ds=f);
+ auto ty_str_ctxt = @rec(ds=f, tystore=cx.tystore);
ebml_w.writer.write(_str.bytes(Encode.ty_str(ty_str_ctxt, typ)));
ebml.end_tag(ebml_w);
@@ -379,7 +380,7 @@ fn encode_tag_variant_info(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_def_id(ebml_w, variant.node.id);
encode_kind(ebml_w, 'v' as u8);
encode_tag_id(ebml_w, did);
- encode_type(ebml_w, trans.node_ann_type(cx, variant.node.ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, variant.node.ann));
if (_vec.len[ast.variant_arg](variant.node.args) > 0u) {
encode_symbol(cx, ebml_w, variant.node.id);
}
@@ -396,7 +397,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
ebml.start_tag(ebml_w, tag_items_data_item);
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 'c' as u8);
- encode_type(ebml_w, trans.node_ann_type(cx, ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, ann));
encode_symbol(cx, ebml_w, did);
ebml.end_tag(ebml_w);
}
@@ -405,7 +406,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 'f' as u8);
encode_type_param_count(ebml_w, tps);
- encode_type(ebml_w, trans.node_ann_type(cx, ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, ann));
encode_symbol(cx, ebml_w, did);
ebml.end_tag(ebml_w);
}
@@ -426,7 +427,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 'y' as u8);
encode_type_param_count(ebml_w, tps);
- encode_type(ebml_w, trans.node_ann_type(cx, ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, ann));
ebml.end_tag(ebml_w);
}
case (ast.item_tag(?id, ?variants, ?tps, ?did, ?ann)) {
@@ -434,7 +435,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 't' as u8);
encode_type_param_count(ebml_w, tps);
- encode_type(ebml_w, trans.node_ann_type(cx, ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, ann));
for (ast.variant v in variants) {
encode_variant_id(ebml_w, v.node.id);
}
@@ -448,7 +449,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_kind(ebml_w, 'o' as u8);
encode_type_param_count(ebml_w, tps);
auto fn_ty = trans.node_ann_type(cx, ann);
- encode_type(ebml_w, fn_ty);
+ encode_type(cx, ebml_w, fn_ty);
encode_symbol(cx, ebml_w, odid.ctor);
ebml.end_tag(ebml_w);
@@ -456,7 +457,7 @@ fn encode_info_for_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
encode_def_id(ebml_w, odid.ty);
encode_kind(ebml_w, 'y' as u8);
encode_type_param_count(ebml_w, tps);
- encode_type(ebml_w, ty.ty_fn_ret(fn_ty));
+ encode_type(cx, ebml_w, ty.ty_fn_ret(cx.tystore, fn_ty));
ebml.end_tag(ebml_w);
}
}
@@ -469,13 +470,13 @@ fn encode_info_for_native_item(@trans.crate_ctxt cx, &ebml.writer ebml_w,
case (ast.native_item_ty(_, ?did)) {
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 'T' as u8);
- encode_type(ebml_w, ty.mk_native(cx.tystore));
+ encode_type(cx, ebml_w, ty.mk_native(cx.tystore));
}
case (ast.native_item_fn(_, _, _, ?tps, ?did, ?ann)) {
encode_def_id(ebml_w, did);
encode_kind(ebml_w, 'F' as u8);
encode_type_param_count(ebml_w, tps);
- encode_type(ebml_w, trans.node_ann_type(cx, ann));
+ encode_type(cx, ebml_w, trans.node_ann_type(cx, ann));
encode_symbol(cx, ebml_w, did);
}
}
diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs
index 8ab2c3c0..cf16d317 100644
--- a/src/comp/middle/trans.rs
+++ b/src/comp/middle/trans.rs
@@ -185,7 +185,7 @@ fn mangle_name_by_type(@crate_ctxt ccx, vec[str] path, ty.t t) -> str {
ccx.sha.reset();
auto f = metadata.def_to_str;
- auto cx = @rec(ds=f);
+ auto cx = @rec(ds=f, tystore=ccx.tystore);
ccx.sha.input_str(metadata.Encode.ty_str(cx, t));
ret sep() + "rust" + sep()
@@ -558,9 +558,9 @@ fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
//
// TODO: Enforce via a predicate.
fn type_of(@crate_ctxt cx, ty.t t) -> TypeRef {
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.tystore, t)) {
log_err "type_of() called on a type with dynamic size: " +
- ty.ty_to_str(t);
+ ty.ty_to_str(cx.tystore, t);
fail;
}
@@ -570,7 +570,7 @@ fn type_of(@crate_ctxt cx, ty.t t) -> TypeRef {
fn type_of_explicit_args(@crate_ctxt cx, vec[ty.arg] inputs) -> vec[TypeRef] {
let vec[TypeRef] atys = vec();
for (ty.arg arg in inputs) {
- if (ty.type_has_dynamic_size(arg.ty)) {
+ if (ty.type_has_dynamic_size(cx.tystore, arg.ty)) {
check (arg.mode == ast.alias);
atys += vec(T_typaram_ptr(cx.tn));
} else {
@@ -605,7 +605,7 @@ fn type_of_fn_full(@crate_ctxt cx,
let vec[TypeRef] atys = vec();
// Arg 0: Output pointer.
- if (ty.type_has_dynamic_size(output)) {
+ if (ty.type_has_dynamic_size(cx.tystore, output)) {
atys += vec(T_typaram_ptr(cx.tn));
} else {
atys += vec(T_ptr(type_of_inner(cx, output)));
@@ -686,7 +686,7 @@ fn type_of_inner(@crate_ctxt cx, ty.t t) -> TypeRef {
let TypeRef llty = 0 as TypeRef;
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.tystore, t)) {
case (ty.ty_native) { llty = T_ptr(T_i8()); }
case (ty.ty_nil) { llty = T_nil(); }
case (ty.ty_bool) { llty = T_bool(); }
@@ -710,7 +710,7 @@ fn type_of_inner(@crate_ctxt cx, ty.t t) -> TypeRef {
case (ty.ty_char) { llty = T_char(); }
case (ty.ty_str) { llty = T_ptr(T_str()); }
case (ty.ty_tag(_, _)) {
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.tystore, t)) {
llty = T_opaque_tag(cx.tn);
} else {
auto size = static_size_of_tag(cx, t);
@@ -786,13 +786,14 @@ fn type_of_inner(@crate_ctxt cx, ty.t t) -> TypeRef {
}
check (llty as int != 0);
- llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(cx.tystore, t)),
+ llty);
cx.lltypes.insert(t, llty);
ret llty;
}
fn type_of_arg(@local_ctxt cx, &ty.arg arg) -> TypeRef {
- alt (ty.struct(arg.ty)) {
+ alt (ty.struct(cx.ccx.tystore, arg.ty)) {
case (ty.ty_param(_)) {
if (arg.mode == ast.alias) {
ret T_typaram_ptr(cx.ccx.tn);
@@ -814,7 +815,7 @@ fn type_of_arg(@local_ctxt cx, &ty.arg arg) -> TypeRef {
fn type_of_ty_param_count_and_ty(@local_ctxt lcx,
ty.ty_param_count_and_ty tpt) -> TypeRef {
- alt (ty.struct(tpt._1)) {
+ alt (ty.struct(lcx.ccx.tystore, tpt._1)) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
auto llfnty = type_of_fn(lcx.ccx, proto, inputs, output, tpt._0);
ret T_fn_pair(lcx.ccx.tn, llfnty);
@@ -1123,14 +1124,14 @@ fn llalign_of(TypeRef t) -> ValueRef {
}
fn size_of(@block_ctxt cx, ty.t t) -> result {
- if (!ty.type_has_dynamic_size(t)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
ret res(cx, llsize_of(type_of(cx.fcx.lcx.ccx, t)));
}
ret dynamic_size_of(cx, t);
}
fn align_of(@block_ctxt cx, ty.t t) -> result {
- if (!ty.type_has_dynamic_size(t)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
ret res(cx, llalign_of(type_of(cx.fcx.lcx.ccx, t)));
}
ret dynamic_align_of(cx, t);
@@ -1151,7 +1152,7 @@ fn array_alloca(@block_ctxt cx, TypeRef t, ValueRef n) -> ValueRef {
// types.
fn simplify_type(@crate_ctxt ccx, ty.t typ) -> ty.t {
fn simplifier(@crate_ctxt ccx, ty.t typ) -> ty.t {
- alt (ty.struct(typ)) {
+ alt (ty.struct(ccx.tystore, typ)) {
case (ty.ty_box(_)) {
ret ty.mk_imm_box(ccx.tystore, ty.mk_nil(ccx.tystore));
}
@@ -1164,7 +1165,7 @@ fn simplify_type(@crate_ctxt ccx, ty.t typ) -> ty.t {
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(@crate_ctxt cx, ty.t t) -> uint {
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.tystore, t)) {
log_err "dynamically sized type passed to static_size_of_tag()";
fail;
}
@@ -1175,7 +1176,7 @@ fn static_size_of_tag(@crate_ctxt cx, ty.t t) -> uint {
auto tid;
let vec[ty.t] subtys;
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.tystore, t)) {
case (ty.ty_tag(?tid_, ?subtys_)) {
tid = tid_;
subtys = subtys_;
@@ -1235,7 +1236,7 @@ fn dynamic_size_of(@block_ctxt cx, ty.t t) -> result {
ret res(bcx, off);
}
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_param(?p)) {
auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
@@ -1290,7 +1291,7 @@ fn dynamic_size_of(@block_ctxt cx, ty.t t) -> result {
}
fn dynamic_align_of(@block_ctxt cx, ty.t t) -> result {
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_param(?p)) {
auto aptr = field_of_tydesc(cx, t, abi.tydesc_field_align);
ret res(aptr.bcx, aptr.bcx.build.Load(aptr.val));
@@ -1330,11 +1331,11 @@ fn dynamic_align_of(@block_ctxt cx, ty.t t) -> result {
fn GEP_tup_like(@block_ctxt cx, ty.t t,
ValueRef base, vec[int] ixs) -> result {
- check (ty.type_is_tup_like(t));
+ check (ty.type_is_tup_like(cx.fcx.lcx.ccx.tystore, t));
// It might be a static-known type. Handle this.
- if (! ty.type_has_dynamic_size(t)) {
+ if (! ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
let vec[ValueRef] v = vec();
for (int i in ixs) {
v += vec(C_int(i));
@@ -1359,7 +1360,7 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
// elements of the type and splitting the Xth off. Return the prefix as
// well as the innermost Xth type.
- fn split_type(ty.t t, vec[int] ixs, uint n)
+ fn split_type(@crate_ctxt ccx, ty.t t, vec[int] ixs, uint n)
-> rec(vec[ty.t] prefix, ty.t target) {
let uint len = _vec.len[int](ixs);
@@ -1375,7 +1376,7 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
// *single* structure, the first index (in GEP-ese) should just be
// 0, to yield the pointee.
check (ixs.(n) == 0);
- ret split_type(t, ixs, n+1u);
+ ret split_type(ccx, t, ixs, n+1u);
}
check (n < len);
@@ -1384,11 +1385,12 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
let vec[ty.t] prefix = vec();
let int i = 0;
while (i < ix) {
- _vec.push[ty.t](prefix, ty.get_element_type(t, i as uint));
+ _vec.push[ty.t](prefix,
+ ty.get_element_type(ccx.tystore, t, i as uint));
i += 1 ;
}
- auto selected = ty.get_element_type(t, i as uint);
+ auto selected = ty.get_element_type(ccx.tystore, t, i as uint);
if (n == len-1u) {
// We are at the innermost index.
@@ -1398,7 +1400,7 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
// Not the innermost index; call self recursively to dig deeper.
// Once we get an inner result, append it current prefix and
// return to caller.
- auto inner = split_type(selected, ixs, n+1u);
+ auto inner = split_type(ccx, selected, ixs, n+1u);
prefix += inner.prefix;
ret rec(prefix=prefix with inner);
}
@@ -1408,7 +1410,7 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
// the tuple parens are associative so it doesn't matter that we've
// flattened the incoming structure.
- auto s = split_type(t, ixs, 0u);
+ auto s = split_type(cx.fcx.lcx.ccx, t, ixs, 0u);
auto prefix_ty = ty.mk_imm_tup(cx.fcx.lcx.ccx.tystore, s.prefix);
auto bcx = cx;
auto sz = size_of(bcx, prefix_ty);
@@ -1416,7 +1418,7 @@ fn GEP_tup_like(@block_ctxt cx, ty.t t,
auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
auto bumped = bcx.build.GEP(raw, vec(sz.val));
- if (ty.type_has_dynamic_size(s.target)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, s.target)) {
ret res(bcx, bumped);
}
@@ -1460,7 +1462,7 @@ fn GEP_tag(@block_ctxt cx,
// Cast the blob pointer to the appropriate type, if we need to (i.e. if
// the blob pointer isn't dynamically sized).
let ValueRef llunionptr;
- if (!ty.type_has_dynamic_size(tup_ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, tup_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, tup_ty);
llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
} else {
@@ -1472,7 +1474,7 @@ fn GEP_tag(@block_ctxt cx,
// Cast the result to the appropriate type, if necessary.
auto val;
- if (!ty.type_has_dynamic_size(elem_ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, elem_ty)) {
auto llelemty = type_of(rslt.bcx.fcx.lcx.ccx, elem_ty);
val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
} else {
@@ -1529,7 +1531,7 @@ fn linearize_ty_params(@block_ctxt cx, ty.t t) ->
mutable vec[uint] defs);
fn linearizer(@rr r, ty.t t) {
- alt(ty.struct(t)) {
+ alt(ty.struct(r.cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_param(?pid)) {
let bool seen = false;
for (uint d in r.defs) {
@@ -1552,22 +1554,22 @@ fn linearize_ty_params(@block_ctxt cx, ty.t t) ->
mutable defs = param_defs);
auto f = bind linearizer(x, _);
- ty.walk_ty(f, t);
+ ty.walk_ty(cx.fcx.lcx.ccx.tystore, f, t);
ret tup(x.defs, x.vals);
}
fn get_tydesc(&@block_ctxt cx, ty.t t) -> result {
// Is the supplied type a type param? If so, return the passed-in tydesc.
- alt (ty.type_param(t)) {
+ alt (ty.type_param(cx.fcx.lcx.ccx.tystore, t)) {
case (some[uint](?id)) { ret res(cx, cx.fcx.lltydescs.(id)); }
case (none[uint]) { /* fall through */ }
}
// Does it contain a type param? If so, generate a derived tydesc.
- let uint n_params = ty.count_ty_params(t);
+ let uint n_params = ty.count_ty_params(cx.fcx.lcx.ccx.tystore, t);
- if (ty.count_ty_params(t) > 0u) {
+ if (ty.count_ty_params(cx.fcx.lcx.ccx.tystore, t) > 0u) {
auto tys = linearize_ty_params(cx, t);
check (n_params == _vec.len[uint](tys._0));
@@ -1633,7 +1635,7 @@ fn declare_tydesc(@local_ctxt cx, ty.t t) {
auto llsize;
auto llalign;
- if (!ty.type_has_dynamic_size(t)) {
+ if (!ty.type_has_dynamic_size(ccx.tystore, t)) {
auto llty = type_of(ccx, t);
llsize = llsize_of(llty);
llalign = llalign_of(llty);
@@ -1646,7 +1648,8 @@ fn declare_tydesc(@local_ctxt cx, ty.t t) {
auto glue_fn_ty = T_ptr(T_glue_fn(ccx.tn));
- auto name = sanitize(ccx.names.next("tydesc_" + ty.ty_to_str(t)));
+ auto name = sanitize(ccx.names.next("tydesc_" +
+ ty.ty_to_str(cx.ccx.tystore, t)));
auto gvar = llvm.LLVMAddGlobal(ccx.llmod, T_tydesc(ccx.tn),
_str.buf(name));
auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(ccx.tn)))),
@@ -1719,7 +1722,7 @@ fn make_generic_glue(@local_ctxt cx,
// passed by value.
auto llty;
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.ccx.tystore, t)) {
llty = T_ptr(T_i8());
} else {
llty = T_ptr(type_of(cx.ccx, t));
@@ -1766,10 +1769,10 @@ fn make_generic_glue(@local_ctxt cx,
fn make_take_glue(@block_ctxt cx, ValueRef v, ty.t t) {
// NB: v is an *alias* of type t here, not a direct value.
auto bcx;
- if (ty.type_is_boxed(t)) {
+ if (ty.type_is_boxed(cx.fcx.lcx.ccx.tystore, t)) {
bcx = incr_refcnt_of_boxed(cx, cx.build.Load(v)).bcx;
- } else if (ty.type_is_structural(t)) {
+ } else if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, t)) {
bcx = iter_structural_ty(cx, v, t,
bind take_ty(_, _, _)).bcx;
} else {
@@ -1800,7 +1803,7 @@ fn incr_refcnt_of_boxed(@block_ctxt cx, ValueRef box_ptr) -> result {
fn make_drop_glue(@block_ctxt cx, ValueRef v0, ty.t t) {
// NB: v0 is an *alias* of type t here, not a direct value.
auto rslt;
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_str) {
auto v = cx.build.Load(v0);
rslt = decr_refcnt_and_if_zero
@@ -1941,13 +1944,13 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v0, ty.t t) {
}
case (_) {
- if (ty.type_is_structural(t)) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, t)) {
rslt = iter_structural_ty(cx, v0, t,
bind drop_ty(_, _, _));
- } else if (ty.type_is_scalar(t) ||
- ty.type_is_native(t) ||
- ty.type_is_nil(t)) {
+ } else if (ty.type_is_scalar(cx.fcx.lcx.ccx.tystore, t) ||
+ ty.type_is_native(cx.fcx.lcx.ccx.tystore, t) ||
+ ty.type_is_nil(cx.fcx.lcx.ccx.tystore, t)) {
rslt = res(cx, C_nil());
}
}
@@ -2009,10 +2012,10 @@ fn make_cmp_glue(@block_ctxt cx,
auto lhs = load_if_immediate(cx, lhs0, t);
auto rhs = load_if_immediate(cx, rhs0, t);
- if (ty.type_is_scalar(t)) {
+ if (ty.type_is_scalar(cx.fcx.lcx.ccx.tystore, t)) {
make_scalar_cmp_glue(cx, lhs, rhs, t, llop);
- } else if (ty.type_is_box(t)) {
+ } else if (ty.type_is_box(cx.fcx.lcx.ccx.tystore, t)) {
lhs = cx.build.GEP(lhs, vec(C_int(0), C_int(abi.box_rc_field_body)));
rhs = cx.build.GEP(rhs, vec(C_int(0), C_int(abi.box_rc_field_body)));
auto rslt = call_cmp_glue(cx, lhs, rhs, t, llop);
@@ -2020,8 +2023,8 @@ fn make_cmp_glue(@block_ctxt cx,
rslt.bcx.build.Store(rslt.val, cx.fcx.llretptr);
rslt.bcx.build.RetVoid();
- } else if (ty.type_is_structural(t)
- || ty.type_is_sequence(t)) {
+ } else if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, t)
+ || ty.type_is_sequence(cx.fcx.lcx.ccx.tystore, t)) {
auto scx = new_sub_block_ctxt(cx, "structural compare start");
auto next = new_sub_block_ctxt(cx, "structural compare end");
@@ -2055,7 +2058,7 @@ fn make_cmp_glue(@block_ctxt cx,
llvm.LLVMSetValueName(flag, _str.buf("flag"));
auto r;
- if (ty.type_is_sequence(t)) {
+ if (ty.type_is_sequence(cx.fcx.lcx.ccx.tystore, t)) {
// If we hit == all the way through the minimum-shared-length
// section, default to judging the relative sequence lengths.
@@ -2094,7 +2097,8 @@ fn make_cmp_glue(@block_ctxt cx,
// be i8, because the data part of a vector always has type
// i8[]. So we need to cast it to the proper type.
- if (!ty.type_has_dynamic_size(t)) {
+ if (!ty.type_has_dynamic_size(last_cx.fcx.lcx.ccx.tystore,
+ t)) {
auto llelemty = T_ptr(type_of(last_cx.fcx.lcx.ccx, t));
av = cx.build.PointerCast(av, llelemty);
bv = cx.build.PointerCast(bv, llelemty);
@@ -2116,7 +2120,7 @@ fn make_cmp_glue(@block_ctxt cx,
ret res(cnt_cx, C_nil());
}
- if (ty.type_is_structural(t)) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, t)) {
r = iter_structural_ty_full(r.bcx, lhs, rhs, t,
bind inner(next, false, flag, llop,
_, _, _, _));
@@ -2143,31 +2147,34 @@ fn make_cmp_glue(@block_ctxt cx,
} else {
// FIXME: compare obj, fn by pointer?
trans_fail(cx, none[common.span],
- "attempt to compare values of type " + ty.ty_to_str(t));
+ "attempt to compare values of type " +
+ ty.ty_to_str(cx.fcx.lcx.ccx.tystore, t));
}
}
// A helper function to create scalar comparison glue.
fn make_scalar_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs, ty.t t,
ValueRef llop) {
- if (ty.type_is_fp(t)) {
+ if (ty.type_is_fp(cx.fcx.lcx.ccx.tystore, t)) {
make_fp_cmp_glue(cx, lhs, rhs, t, llop);
ret;
}
- if (ty.type_is_integral(t) || ty.type_is_bool(t)) {
+ if (ty.type_is_integral(cx.fcx.lcx.ccx.tystore, t) ||
+ ty.type_is_bool(cx.fcx.lcx.ccx.tystore, t)) {
make_integral_cmp_glue(cx, lhs, rhs, t, llop);
ret;
}
- if (ty.type_is_nil(t)) {
+ if (ty.type_is_nil(cx.fcx.lcx.ccx.tystore, t)) {
cx.build.Store(C_bool(true), cx.fcx.llretptr);
cx.build.RetVoid();
ret;
}
trans_fail(cx, none[common.span],
- "attempt to compare values of type " + ty.ty_to_str(t));
+ "attempt to compare values of type " +
+ ty.ty_to_str(cx.fcx.lcx.ccx.tystore, t));
}
// A helper function to create floating point comparison glue.
@@ -2246,8 +2253,8 @@ fn compare_integral_values(@block_ctxt cx, ValueRef lhs, ValueRef rhs,
// A helper function to create integral comparison glue.
fn make_integral_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs,
ty.t intype, ValueRef llop) {
- auto r = compare_integral_values(cx, lhs, rhs, ty.type_is_signed(intype),
- llop);
+ auto r = compare_integral_values(cx, lhs, rhs,
+ ty.type_is_signed(cx.fcx.lcx.ccx.tystore, intype), llop);
r.bcx.build.Store(r.val, r.bcx.fcx.llretptr);
r.bcx.build.RetVoid();
}
@@ -2271,7 +2278,7 @@ fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[variant_info] {
auto ctor_ty = node_ann_type(cx, variant.node.ann);
let vec[ty.t] arg_tys = vec();
if (_vec.len[ast.variant_arg](variant.node.args) > 0u) {
- for (ty.arg a in ty.ty_fn_args(ctor_ty)) {
+ for (ty.arg a in ty.ty_fn_args(cx.tystore, ctor_ty)) {
arg_tys += vec(a.ty);
}
}
@@ -2356,7 +2363,7 @@ fn iter_structural_ty_full(@block_ctxt cx,
ret res(next_cx, r.val);
}
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_tup(?args)) {
let int i = 0;
for (ty.mt arg in args) {
@@ -2430,7 +2437,7 @@ fn iter_structural_ty_full(@block_ctxt cx,
if (_vec.len[ty.t](variant.args) > 0u) {
// N-ary variant.
auto fn_ty = variant.ctor_ty;
- alt (ty.struct(fn_ty)) {
+ alt (ty.struct(bcx.fcx.lcx.ccx.tystore, fn_ty)) {
case (ty.ty_fn(_, ?args, _)) {
auto j = 0;
for (ty.arg a in args) {
@@ -2570,7 +2577,7 @@ fn iter_sequence_inner(@block_ctxt cx,
ValueRef dst,
ValueRef src) -> result {
auto llptrty;
- if (!ty.type_has_dynamic_size(elt_ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, elt_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, elt_ty);
llptrty = T_ptr(llty);
} else {
@@ -2605,7 +2612,7 @@ fn iter_sequence(@block_ctxt cx,
C_int(abi.vec_elt_fill)));
auto llunit_ty;
- if (ty.type_has_dynamic_size(elt_ty)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, elt_ty)) {
llunit_ty = T_i8();
} else {
llunit_ty = type_of(cx.fcx.lcx.ccx, elt_ty);
@@ -2626,7 +2633,7 @@ fn iter_sequence(@block_ctxt cx,
ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
}
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_vec(?elt)) {
ret iter_sequence_body(cx, v, elt.ty, f, false);
}
@@ -2701,7 +2708,7 @@ fn call_cmp_glue(@block_ctxt cx, ValueRef lhs, ValueRef rhs, ty.t t,
}
fn take_ty(@block_ctxt cx, ValueRef v, ty.t t) -> result {
- if (!ty.type_is_scalar(t)) {
+ if (!ty.type_is_scalar(cx.fcx.lcx.ccx.tystore, t)) {
call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue);
}
ret res(cx, C_nil());
@@ -2723,7 +2730,7 @@ fn drop_ty(@block_ctxt cx,
ValueRef v,
ty.t t) -> result {
- if (!ty.type_is_scalar(t)) {
+ if (!ty.type_is_scalar(cx.fcx.lcx.ccx.tystore, t)) {
call_tydesc_glue(cx, v, t, abi.tydesc_field_drop_glue);
}
ret res(cx, C_nil());
@@ -2753,7 +2760,7 @@ fn memcpy_ty(@block_ctxt cx,
ValueRef dst,
ValueRef src,
ty.t t) -> result {
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
auto llszptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
auto llsz = llszptr.bcx.build.Load(llszptr.val);
ret call_memcpy(llszptr.bcx, dst, src, llsz);
@@ -2773,21 +2780,22 @@ fn copy_ty(@block_ctxt cx,
ValueRef dst,
ValueRef src,
ty.t t) -> result {
- if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
+ if (ty.type_is_scalar(cx.fcx.lcx.ccx.tystore, t) ||
+ ty.type_is_native(cx.fcx.lcx.ccx.tystore, t)) {
ret res(cx, cx.build.Store(src, dst));
- } else if (ty.type_is_nil(t)) {
+ } else if (ty.type_is_nil(cx.fcx.lcx.ccx.tystore, t)) {
ret res(cx, C_nil());
- } else if (ty.type_is_boxed(t)) {
+ } else if (ty.type_is_boxed(cx.fcx.lcx.ccx.tystore, t)) {
auto r = take_ty(cx, src, t);
if (action == DROP_EXISTING) {
r = drop_ty(r.bcx, r.bcx.build.Load(dst), t);
}
ret res(r.bcx, r.bcx.build.Store(src, dst));
- } else if (ty.type_is_structural(t) ||
- ty.type_has_dynamic_size(t)) {
+ } else if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, t) ||
+ ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
auto r = take_ty(cx, src, t);
if (action == DROP_EXISTING) {
r = drop_ty(r.bcx, dst, t);
@@ -2796,7 +2804,7 @@ fn copy_ty(@block_ctxt cx,
}
cx.fcx.lcx.ccx.sess.bug("unexpected type in trans.copy_ty: " +
- ty.ty_to_str(t));
+ ty.ty_to_str(cx.fcx.lcx.ccx.tystore, t));
fail;
}
@@ -2853,7 +2861,7 @@ fn trans_lit(@crate_ctxt cx, &ast.lit lit, &ast.ann ann) -> ValueRef {
}
fn target_type(@crate_ctxt cx, ty.t t) -> ty.t {
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.tystore, t)) {
case (ty.ty_int) {
auto struct_ty = ty.mk_mach(cx.tystore,
cx.sess.get_targ_cfg().int_type);
@@ -2917,7 +2925,7 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
case (ast.neg) {
sub = autoderef(sub.bcx, sub.val,
ty.expr_ty(cx.fcx.lcx.ccx.tystore, e));
- if(ty.struct(e_ty) == ty.ty_float) {
+ if(ty.struct(cx.fcx.lcx.ccx.tystore, e_ty) == ty.ty_float) {
ret res(sub.bcx, sub.bcx.build.FNeg(sub.val));
}
else {
@@ -2944,7 +2952,7 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
// Cast the body type to the type of the value. This is needed to
// make tags work, since tags have a different LLVM type depending
// on whether they're boxed or not.
- if (!ty.type_has_dynamic_size(e_ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, e_ty)) {
auto llety = T_ptr(type_of(sub.bcx.fcx.lcx.ccx, e_ty));
body = sub.bcx.build.PointerCast(body, llety);
}
@@ -2974,7 +2982,7 @@ fn trans_compare(@block_ctxt cx0, ast.binop op, ty.t t0,
auto rhs = rhs_r.val;
cx = rhs_r.bcx;
- auto t = autoderefed_ty(t0);
+ auto t = autoderefed_ty(cx.fcx.lcx.ccx, t0);
// Determine the operation we need.
// FIXME: Use or-patterns when we have them.
@@ -3008,7 +3016,7 @@ fn trans_vec_append(@block_ctxt cx, ty.t t,
auto elt_ty = ty.sequence_element_type(cx.fcx.lcx.ccx.tystore, t);
auto skip_null = C_bool(false);
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_str) { skip_null = C_bool(true); }
case (_) { }
}
@@ -3048,7 +3056,7 @@ fn trans_eager_binop(@block_ctxt cx, ast.binop op, ty.t intype,
ValueRef lhs, ValueRef rhs) -> result {
auto is_float = false;
- alt (ty.struct(intype)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, intype)) {
case (ty.ty_float) {
is_float = true;
}
@@ -3059,7 +3067,7 @@ fn trans_eager_binop(@block_ctxt cx, ast.binop op, ty.t intype,
alt (op) {
case (ast.add) {
- if (ty.type_is_sequence(intype)) {
+ if (ty.type_is_sequence(cx.fcx.lcx.ccx.tystore, intype)) {
ret trans_vec_add(cx, intype, lhs, rhs);
}
if (is_float) {
@@ -3091,7 +3099,7 @@ fn trans_eager_binop(@block_ctxt cx, ast.binop op, ty.t intype,
if (is_float) {
ret res(cx, cx.build.FDiv(lhs, rhs));
}
- if (ty.type_is_signed(intype)) {
+ if (ty.type_is_signed(cx.fcx.lcx.ccx.tystore, intype)) {
ret res(cx, cx.build.SDiv(lhs, rhs));
} else {
ret res(cx, cx.build.UDiv(lhs, rhs));
@@ -3101,7 +3109,7 @@ fn trans_eager_binop(@block_ctxt cx, ast.binop op, ty.t intype,
if (is_float) {
ret res(cx, cx.build.FRem(lhs, rhs));
}
- if (ty.type_is_signed(intype)) {
+ if (ty.type_is_signed(cx.fcx.lcx.ccx.tystore, intype)) {
ret res(cx, cx.build.SRem(lhs, rhs));
} else {
ret res(cx, cx.build.URem(lhs, rhs));
@@ -3126,7 +3134,7 @@ fn autoderef(@block_ctxt cx, ValueRef v, ty.t t) -> result {
let ty.t t1 = t;
while (true) {
- alt (ty.struct(t1)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t1)) {
case (ty.ty_box(?mt)) {
auto body = cx.build.GEP(v1,
vec(C_int(0),
@@ -3137,7 +3145,8 @@ fn autoderef(@block_ctxt cx, ValueRef v, ty.t t) -> result {
// to cast this pointer, since statically-sized tag types have
// different types depending on whether they're behind a box
// or not.
- if (!ty.type_has_dynamic_size(mt.ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore,
+ mt.ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, mt.ty);
v1 = cx.build.PointerCast(body, T_ptr(llty));
} else {
@@ -3153,11 +3162,11 @@ fn autoderef(@block_ctxt cx, ValueRef v, ty.t t) -> result {
}
}
-fn autoderefed_ty(ty.t t) -> ty.t {
+fn autoderefed_ty(@crate_ctxt ccx, ty.t t) -> ty.t {
let ty.t t1 = t;
while (true) {
- alt (ty.struct(t1)) {
+ alt (ty.struct(ccx.tystore, t1)) {
case (ty.ty_box(?mt)) {
t1 = mt.ty;
}
@@ -3227,8 +3236,7 @@ fn trans_binary(@block_ctxt cx, ast.binop op,
auto rhty = ty.expr_ty(cx.fcx.lcx.ccx.tystore, b);
rhs = autoderef(rhs.bcx, rhs.val, rhty);
ret trans_eager_binop(rhs.bcx, op,
- autoderefed_ty(lhty),
- lhs.val, rhs.val);
+ autoderefed_ty(cx.fcx.lcx.ccx, lhty), lhs.val, rhs.val);
}
}
fail;
@@ -3303,11 +3311,11 @@ fn trans_if(@block_ctxt cx, @ast.expr cond,
// if expression can have a non-nil type.
// FIXME: This isn't quite right, particularly re: dynamic types
auto expr_ty = ty.expr_ty(cx.fcx.lcx.ccx.tystore, elexpr);
- if (ty.type_has_dynamic_size(expr_ty)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, expr_ty)) {
expr_llty = T_typaram_ptr(cx.fcx.lcx.ccx.tn);
} else {
expr_llty = type_of(else_res.bcx.fcx.lcx.ccx, expr_ty);
- if (ty.type_is_structural(expr_ty)) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, expr_ty)) {
expr_llty = T_ptr(expr_llty);
}
}
@@ -3832,11 +3840,11 @@ fn trans_alt(@block_ctxt cx, @ast.expr expr,
// FIXME: This isn't quite right, particularly re: dynamic types
auto expr_ty = ty.ann_to_type(ann);
auto expr_llty;
- if (ty.type_has_dynamic_size(expr_ty)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, expr_ty)) {
expr_llty = T_typaram_ptr(cx.fcx.lcx.ccx.tn);
} else {
expr_llty = type_of(cx.fcx.lcx.ccx, expr_ty);
- if (ty.type_is_structural(expr_ty)) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore, expr_ty)) {
expr_llty = T_ptr(expr_llty);
}
}
@@ -3993,7 +4001,7 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
auto v_tyt = ty.lookup_item_type(cx.fcx.lcx.ccx.sess,
cx.fcx.lcx.ccx.tystore,
cx.fcx.lcx.ccx.type_cache, vid);
- alt (ty.struct(v_tyt._1)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, v_tyt._1)) {
case (ty.ty_fn(_, _, _)) {
// N-ary variant.
ret lval_generic_fn(cx, v_tyt, vid, ann);
@@ -4009,7 +4017,8 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
auto lltagblob = alloc_result.val;
auto lltagty;
- if (ty.type_has_dynamic_size(tag_ty)) {
+ if (ty.type_has_dynamic_size(
+ cx.fcx.lcx.ccx.tystore, tag_ty)) {
lltagty = T_opaque_tag(cx.fcx.lcx.ccx.tn);
} else {
lltagty = type_of(cx.fcx.lcx.ccx, tag_ty);
@@ -4053,9 +4062,9 @@ fn trans_field(@block_ctxt cx, &ast.span sp, ValueRef v, ty.t t0,
&ast.ident field, &ast.ann ann) -> lval_result {
auto r = autoderef(cx, v, t0);
- auto t = autoderefed_ty(t0);
+ auto t = autoderefed_ty(cx.fcx.lcx.ccx, t0);
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_tup(_)) {
let uint ix = ty.field_num(cx.fcx.lcx.ccx.sess, sp, field);
auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
@@ -4134,7 +4143,7 @@ fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
auto elt;
- if (ty.type_has_dynamic_size(unit_ty)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, unit_ty)) {
body = next_cx.build.PointerCast(body, T_ptr(T_array(T_i8(), 0u)));
elt = next_cx.build.GEP(body, vec(C_int(0), scaled_ix));
} else {
@@ -4201,15 +4210,16 @@ fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
auto llsrctype = val_ty(e_res.val);
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto lldsttype = type_of(cx.fcx.lcx.ccx, t);
- if (!ty.type_is_fp(t)) {
+ if (!ty.type_is_fp(cx.fcx.lcx.ccx.tystore, t)) {
// TODO: native-to-native casts
- if (ty.type_is_native(ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
+ if (ty.type_is_native(cx.fcx.lcx.ccx.tystore,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
e_res.val = e_res.bcx.build.PtrToInt(e_res.val, lldsttype);
- } else if (ty.type_is_native(t)) {
+ } else if (ty.type_is_native(cx.fcx.lcx.ccx.tystore, t)) {
e_res.val = e_res.bcx.build.IntToPtr(e_res.val, lldsttype);
} else if (llvm.LLVMGetIntTypeWidth(lldsttype) >
llvm.LLVMGetIntTypeWidth(llsrctype)) {
- if (ty.type_is_signed(t)) {
+ if (ty.type_is_signed(cx.fcx.lcx.ccx.tystore, t)) {
// Widening signed cast.
e_res.val =
e_res.bcx.build.SExtOrBitCast(e_res.val,
@@ -4265,11 +4275,11 @@ fn trans_bind_thunk(@local_ctxt cx,
C_int(abi.fn_field_box)));
lltargetclosure = bcx.build.Load(lltargetclosure);
- auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
- auto outgoing_args = ty.ty_fn_args(outgoing_fty);
+ auto outgoing_ret_ty = ty.ty_fn_ret(cx.ccx.tystore, outgoing_fty);
+ auto outgoing_args = ty.ty_fn_args(cx.ccx.tystore, outgoing_fty);
auto llretptr = fcx.llretptr;
- if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
+ if (ty.type_has_dynamic_size(cx.ccx.tystore, outgoing_ret_ty)) {
llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.ccx.tn));
}
@@ -4318,7 +4328,8 @@ fn trans_bind_thunk(@local_ctxt cx,
if (out_arg.mode == ast.val) {
val = bcx.build.Load(val);
- } else if (ty.count_ty_params(out_arg.ty) > 0u) {
+ } else if (ty.count_ty_params(cx.ccx.tystore,
+ out_arg.ty) > 0u) {
check (out_arg.mode == ast.alias);
val = bcx.build.PointerCast(val, llout_arg_ty);
}
@@ -4331,7 +4342,7 @@ fn trans_bind_thunk(@local_ctxt cx,
case (none[@ast.expr]) {
let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
- if (ty.count_ty_params(out_arg.ty) > 0u) {
+ if (ty.count_ty_params(cx.ccx.tystore, out_arg.ty) > 0u) {
check (out_arg.mode == ast.alias);
passed_arg = bcx.build.PointerCast(passed_arg,
llout_arg_ty);
@@ -4353,7 +4364,8 @@ fn trans_bind_thunk(@local_ctxt cx,
// Cast the outgoing function to the appropriate type (see the comments in
// trans_bind below for why this is necessary).
auto lltargetty = type_of_fn(bcx.fcx.lcx.ccx,
- ty.ty_fn_proto(outgoing_fty),
+ ty.ty_fn_proto(bcx.fcx.lcx.ccx.tystore,
+ outgoing_fty),
outgoing_args,
outgoing_ret_ty,
ty_param_count);
@@ -4476,10 +4488,10 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
// function has, which type_of() doesn't, as only we know which
// item the function refers to.
auto llfnty = type_of_fn(bcx.fcx.lcx.ccx,
- ty.ty_fn_proto(outgoing_fty),
- ty.ty_fn_args(outgoing_fty),
- ty.ty_fn_ret(outgoing_fty),
- ty_param_count);
+ ty.ty_fn_proto(bcx.fcx.lcx.ccx.tystore, outgoing_fty),
+ ty.ty_fn_args(bcx.fcx.lcx.ccx.tystore, outgoing_fty),
+ ty.ty_fn_ret(bcx.fcx.lcx.ccx.tystore, outgoing_fty),
+ ty_param_count);
auto llclosurety = T_ptr(T_fn_pair(bcx.fcx.lcx.ccx.tn, llfnty));
// Store thunk-target.
@@ -4574,14 +4586,14 @@ fn trans_args(@block_ctxt cx,
ty.t fn_ty)
-> tup(@block_ctxt, vec[ValueRef], ValueRef) {
- let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
+ let vec[ty.arg] args = ty.ty_fn_args(cx.fcx.lcx.ccx.tystore, fn_ty);
let vec[ValueRef] llargs = vec();
let vec[ValueRef] lltydescs = vec();
let @block_ctxt bcx = cx;
// Arg 0: Output pointer.
- auto retty = ty.ty_fn_ret(fn_ty);
+ auto retty = ty.ty_fn_ret(cx.fcx.lcx.ccx.tystore, fn_ty);
auto llretslot_res = alloc_ty(bcx, retty);
bcx = llretslot_res.bcx;
auto llretslot = llretslot_res.val;
@@ -4589,16 +4601,16 @@ fn trans_args(@block_ctxt cx,
alt (gen) {
case (some[generic_info](?g)) {
lltydescs = g.tydescs;
- args = ty.ty_fn_args(g.item_type);
- retty = ty.ty_fn_ret(g.item_type);
+ args = ty.ty_fn_args(cx.fcx.lcx.ccx.tystore, g.item_type);
+ retty = ty.ty_fn_ret(cx.fcx.lcx.ccx.tystore, g.item_type);
}
case (_) {
}
}
- if (ty.type_has_dynamic_size(retty)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, retty)) {
llargs += vec(bcx.build.PointerCast
(llretslot, T_typaram_ptr(cx.fcx.lcx.ccx.tn)));
- } else if (ty.count_ty_params(retty) != 0u) {
+ } else if (ty.count_ty_params(cx.fcx.lcx.ccx.tystore, retty) != 0u) {
// It's possible that the callee has some generic-ness somewhere in
// its return value -- say a method signature within an obj or a fn
// type deep in a structure -- which the caller has a concrete view
@@ -4651,7 +4663,8 @@ fn trans_args(@block_ctxt cx,
auto mode = args.(i).mode;
auto val;
- if (ty.type_is_structural(ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
auto re = trans_expr(bcx, e);
val = re.val;
bcx = re.bcx;
@@ -4661,7 +4674,8 @@ fn trans_args(@block_ctxt cx,
lv = trans_lval(bcx, e);
} else {
auto r = trans_expr(bcx, e);
- if (type_is_immediate(ty.expr_ty(cx.fcx.lcx.ccx.tystore,
+ if (type_is_immediate(cx.fcx.lcx.ccx,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore,
e))) {
lv = lval_val(r.bcx, r.val);
} else {
@@ -4684,12 +4698,12 @@ fn trans_args(@block_ctxt cx,
bcx = re.bcx;
}
- if (ty.count_ty_params(args.(i).ty) > 0u) {
+ if (ty.count_ty_params(cx.fcx.lcx.ccx.tystore, args.(i).ty) > 0u) {
auto lldestty = arg_tys.(i);
if (mode == ast.val) {
// FIXME: we'd prefer to use &&, but rustboot doesn't like it
- if (ty.type_is_structural(ty.expr_ty(cx.fcx.lcx.ccx.tystore,
- e))) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
lldestty = T_ptr(lldestty);
}
}
@@ -4699,8 +4713,8 @@ fn trans_args(@block_ctxt cx,
if (mode == ast.val) {
// FIXME: we'd prefer to use &&, but rustboot doesn't like it
- if (ty.type_is_structural(ty.expr_ty(cx.fcx.lcx.ccx.tystore,
- e))) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore, e))) {
// Until here we've been treating structures by pointer;
// we are now passing it as an arg, so need to load it.
val = bcx.build.Load(val);
@@ -4783,7 +4797,7 @@ fn trans_call(@block_ctxt cx, @ast.expr f,
bcx.build.FastCall(faddr, llargs);
auto retval = C_nil();
- if (!ty.type_is_nil(ret_ty)) {
+ if (!ty.type_is_nil(cx.fcx.lcx.ccx.tystore, ret_ty)) {
retval = load_if_immediate(bcx, llretslot, ret_ty);
// Retval doesn't correspond to anything really tangible in the frame,
// but it's a ref all the same, so we put a note here to drop it when
@@ -4823,7 +4837,7 @@ fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
&ast.ann ann) -> result {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty = t;
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_vec(?mt)) {
unit_ty = mt.ty;
}
@@ -4875,7 +4889,7 @@ fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
// (5) "src_res" is derived from "unit_ty", which is not behind a box.
auto dst_val;
- if (!ty.type_has_dynamic_size(unit_ty)) {
+ if (!ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, unit_ty)) {
auto llunit_ty = type_of(cx.fcx.lcx.ccx, unit_ty);
dst_val = bcx.build.PointerCast(dst_res.val, T_ptr(llunit_ty));
} else {
@@ -4917,7 +4931,7 @@ fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
}
let vec[ty.field] ty_fields = vec();
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_rec(?flds)) { ty_fields = flds; }
}
@@ -5120,8 +5134,10 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
// pointer (or need one), perform load/store operations based on the
// immediate-ness of the type.
-fn type_is_immediate(ty.t t) -> bool {
- ret ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t);
+fn type_is_immediate(@crate_ctxt ccx, ty.t t) -> bool {
+ ret ty.type_is_scalar(ccx.tystore, t) ||
+ ty.type_is_boxed(ccx.tystore, t) ||
+ ty.type_is_native(ccx.tystore, t);
}
fn do_spill(@block_ctxt cx, ValueRef v) -> ValueRef {
@@ -5132,14 +5148,14 @@ fn do_spill(@block_ctxt cx, ValueRef v) -> ValueRef {
}
fn spill_if_immediate(@block_ctxt cx, ValueRef v, ty.t t) -> ValueRef {
- if (type_is_immediate(t)) {
+ if (type_is_immediate(cx.fcx.lcx.ccx, t)) {
ret do_spill(cx, v);
}
ret v;
}
fn load_if_immediate(@block_ctxt cx, ValueRef v, ty.t t) -> ValueRef {
- if (type_is_immediate(t)) {
+ if (type_is_immediate(cx.fcx.lcx.ccx, t)) {
ret cx.build.Load(v);
}
ret v;
@@ -5170,10 +5186,10 @@ fn trans_log(int lvl, @block_ctxt cx, @ast.expr e) -> result {
auto sub = trans_expr(log_cx, e);
auto e_ty = ty.expr_ty(cx.fcx.lcx.ccx.tystore, e);
- if (ty.type_is_fp(e_ty)) {
+ if (ty.type_is_fp(cx.fcx.lcx.ccx.tystore, e_ty)) {
let TypeRef tr;
let bool is32bit = false;
- alt (ty.struct(e_ty)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, e_ty)) {
case (ty.ty_machine(util.common.ty_f32)) {
tr = T_f32();
is32bit = true;
@@ -5199,7 +5215,7 @@ fn trans_log(int lvl, @block_ctxt cx, @ast.expr e) -> result {
uval.bcx.build.Br(after_cx.llbb);
}
} else {
- alt (ty.struct(e_ty)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, e_ty)) {
case (ty.ty_str) {
auto v = vp2i(sub.bcx, sub.val);
trans_upcall(sub.bcx,
@@ -5284,8 +5300,8 @@ fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
auto llarg = r.val;
bcx = r.bcx;
- if (ty.type_is_structural(ty.expr_ty(cx.fcx.lcx.ccx.tystore,
- x))) {
+ if (ty.type_is_structural(cx.fcx.lcx.ccx.tystore,
+ ty.expr_ty(cx.fcx.lcx.ccx.tystore, x))) {
// Until here we've been treating structures by pointer; we
// are now passing it as an arg, so need to load it.
llarg = bcx.build.Load(llarg);
@@ -5385,7 +5401,7 @@ fn trans_port(@block_ctxt cx, ast.ann ann) -> result {
auto t = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty;
- alt (ty.struct(t)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, t)) {
case (ty.ty_port(?t)) {
unit_ty = t;
}
@@ -5440,7 +5456,7 @@ fn trans_send(@block_ctxt cx, @ast.expr lhs, @ast.expr rhs,
auto chan_ty = node_ann_type(cx.fcx.lcx.ccx, ann);
auto unit_ty;
- alt (ty.struct(chan_ty)) {
+ alt (ty.struct(cx.fcx.lcx.ccx.tystore, chan_ty)) {
case (ty.ty_chan(?t)) {
unit_ty = t;
}
@@ -5532,7 +5548,7 @@ fn init_local(@block_ctxt cx, @ast.local local) -> result {
fn zero_alloca(@block_ctxt cx, ValueRef llptr, ty.t t) -> result {
auto bcx = cx;
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
auto llsz = size_of(bcx, t);
bcx = call_bzero(llsz.bcx, llptr, llsz.val).bcx;
} else {
@@ -5665,7 +5681,7 @@ fn llallocas_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
fn alloc_ty(@block_ctxt cx, ty.t t) -> result {
auto val = C_int(0);
- if (ty.type_has_dynamic_size(t)) {
+ if (ty.type_has_dynamic_size(cx.fcx.lcx.ccx.tystore, t)) {
// NB: we have to run this particular 'size_of' in a
// block_ctxt built on the llallocas block for the fn,
@@ -5721,7 +5737,7 @@ fn trans_block(@block_ctxt cx, &ast.block b) -> result {
ret r;
} else {
auto r_ty = ty.expr_ty(cx.fcx.lcx.ccx.tystore, e);
- if (!ty.type_is_nil(r_ty)) {
+ if (!ty.type_is_nil(cx.fcx.lcx.ccx.tystore, r_ty)) {
// The value resulting from the block gets copied into an
// alloca created in an outer scope and its refcount
// bumped so that it can escape this block. This means
@@ -5917,8 +5933,8 @@ fn is_terminated(@block_ctxt cx) -> bool {
ret llvm.LLVMIsATerminatorInst(inst) as int != 0;
}
-fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
- alt (ty.struct(ty.ann_to_type(ann))) {
+fn arg_tys_of_fn(@crate_ctxt ccx, ast.ann ann) -> vec[ty.arg] {
+ alt (ty.struct(ccx.tystore, ty.ann_to_type(ann))) {
case (ty.ty_fn(_, ?arg_tys, _)) {
ret arg_tys;
}
@@ -5926,8 +5942,8 @@ fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
fail;
}
-fn ret_ty_of_fn_ty(ty.t t) -> ty.t {
- alt (ty.struct(t)) {
+fn ret_ty_of_fn_ty(@crate_ctxt ccx, ty.t t) -> ty.t {
+ alt (ty.struct(ccx.tystore, t)) {
case (ty.ty_fn(_, _, ?ret_ty)) {
ret ret_ty;
}
@@ -5936,8 +5952,8 @@ fn ret_ty_of_fn_ty(ty.t t) -> ty.t {
}
-fn ret_ty_of_fn(ast.ann ann) -> ty.t {
- ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
+fn ret_ty_of_fn(@crate_ctxt ccx, ast.ann ann) -> ty.t {
+ ret ret_ty_of_fn_ty(ccx, ty.ann_to_type(ann));
}
fn populate_fn_ctxt_from_llself(@fn_ctxt fcx, self_vt llself) {
@@ -5978,7 +5994,7 @@ fn populate_fn_ctxt_from_llself(@fn_ctxt fcx, self_vt llself) {
// If we can (i.e. the type is statically sized), then cast the resulting
// fields pointer to the appropriate LLVM type. If not, just leave it as
// i8 *.
- if (!ty.type_has_dynamic_size(fields_tup_ty)) {
+ if (!ty.type_has_dynamic_size(fcx.lcx.ccx.tystore, fields_tup_ty)) {
auto llfields_ty = type_of(fcx.lcx.ccx, fields_tup_ty);
obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
} else {
@@ -6017,7 +6033,7 @@ fn trans_fn(@local_ctxt cx, &ast._fn f, ast.def_id fid,
auto fcx = new_fn_ctxt(cx, llfndecl);
create_llargs_for_fn_args(fcx, f.proto,
- ty_self, ret_ty_of_fn(ann),
+ ty_self, ret_ty_of_fn(cx.ccx, ann),
f.decl.inputs, ty_params);
copy_any_self_to_alloca(fcx, ty_self);
@@ -6030,7 +6046,7 @@ fn trans_fn(@local_ctxt cx, &ast._fn f, ast.def_id fid,
}
}
- copy_args_to_allocas(fcx, f.decl.inputs, arg_tys_of_fn(ann));
+ copy_args_to_allocas(fcx, f.decl.inputs, arg_tys_of_fn(fcx.lcx.ccx, ann));
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
@@ -6063,7 +6079,7 @@ fn trans_vtbl(@local_ctxt cx,
for (@ast.method m in meths) {
auto llfnty = T_nil();
- alt (ty.struct(node_ann_type(cx.ccx, m.node.ann))) {
+ alt (ty.struct(cx.ccx.tystore, node_ann_type(cx.ccx, m.node.ann))) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
llfnty = type_of_fn_full(cx.ccx, proto,
some[TypeRef](llself_ty),
@@ -6102,7 +6118,7 @@ fn trans_dtor(@local_ctxt cx,
&@ast.method dtor) -> ValueRef {
auto llfnty = T_nil();
- alt (ty.struct(node_ann_type(cx.ccx, dtor.node.ann))) {
+ alt (ty.struct(cx.ccx.tystore, node_ann_type(cx.ccx, dtor.node.ann))) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
llfnty = type_of_fn_full(cx.ccx, proto,
some[TypeRef](llself_ty),
@@ -6141,16 +6157,16 @@ fn trans_obj(@local_ctxt cx, &ast._obj ob, ast.def_id oid,
auto fcx = new_fn_ctxt(cx, llctor_decl);
create_llargs_for_fn_args(fcx, ast.proto_fn,
none[tup(TypeRef, ty.t)],
- ret_ty_of_fn(ann),
+ ret_ty_of_fn(cx.ccx, ann),
fn_args, ty_params);
- let vec[ty.arg] arg_tys = arg_tys_of_fn(ann);
+ let vec[ty.arg] arg_tys = arg_tys_of_fn(cx.ccx, ann);
copy_args_to_allocas(fcx, fn_args, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
- auto self_ty = ret_ty_of_fn(ann);
+ auto self_ty = ret_ty_of_fn(cx.ccx, ann);
auto llself_ty = type_of(ccx, self_ty);
auto pair = bcx.fcx.llretptr;
auto vtbl = trans_vtbl(cx, llself_ty, self_ty, ob, ty_params);
@@ -6284,7 +6300,7 @@ fn trans_tag_variant(@local_ctxt cx, ast.def_id tag_id,
create_llargs_for_fn_args(fcx, ast.proto_fn,
none[tup(TypeRef, ty.t)],
- ret_ty_of_fn(variant.node.ann),
+ ret_ty_of_fn(cx.ccx, variant.node.ann),
fn_args, ty_params);
let vec[ty.t] ty_param_substs = vec();
@@ -6294,7 +6310,7 @@ fn trans_tag_variant(@local_ctxt cx, ast.def_id tag_id,
i += 1u;
}
- auto arg_tys = arg_tys_of_fn(variant.node.ann);
+ auto arg_tys = arg_tys_of_fn(cx.ccx, variant.node.ann);
copy_args_to_allocas(fcx, fn_args, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
@@ -6326,8 +6342,8 @@ fn trans_tag_variant(@local_ctxt cx, ast.def_id tag_id,
auto arg_ty = arg_tys.(i).ty;
auto llargval;
- if (ty.type_is_structural(arg_ty) ||
- ty.type_has_dynamic_size(arg_ty)) {
+ if (ty.type_is_structural(cx.ccx.tystore, arg_ty) ||
+ ty.type_has_dynamic_size(cx.ccx.tystore, arg_ty)) {
llargval = llargptr;
} else {
llargval = bcx.build.Load(llargptr);
@@ -6426,7 +6442,7 @@ fn decl_fn_and_pair(@crate_ctxt ccx,
auto llfty;
auto llpairty;
- alt (ty.struct(node_ann_type(ccx, ann))) {
+ alt (ty.struct(ccx.tystore, node_ann_type(ccx, ann))) {
case (ty.ty_fn(?proto, ?inputs, ?output)) {
llfty = type_of_fn(ccx, proto, inputs, output,
_vec.len[ast.ty_param](ty_params));
@@ -6485,7 +6501,7 @@ fn native_fn_ty_param_count(@crate_ctxt cx, &ast.def_id id) -> uint {
fn native_fn_wrapper_type(@crate_ctxt cx, uint ty_param_count, ty.t x)
-> TypeRef {
- alt (ty.struct(x)) {
+ alt (ty.struct(cx.tystore, x)) {
case (ty.ty_native_fn(?abi, ?args, ?out)) {
ret type_of_fn(cx, ast.proto_fn, args, out, ty_param_count);
}
@@ -6522,9 +6538,10 @@ fn decl_native_fn_and_pair(@crate_ctxt ccx,
auto item = ccx.native_items.get(id);
auto fn_type = node_ann_type(ccx, ann); // NB: has no type params
- auto abi = ty.ty_fn_abi(fn_type);
- auto llfnty = type_of_native_fn(ccx, abi, ty.ty_fn_args(fn_type),
- ty.ty_fn_ret(fn_type), num_ty_param);
+ auto abi = ty.ty_fn_abi(ccx.tystore, fn_type);
+ auto llfnty = type_of_native_fn(ccx, abi,
+ ty.ty_fn_args(ccx.tystore, fn_type),
+ ty.ty_fn_ret(ccx.tystore, fn_type), num_ty_param);
let vec[ValueRef] call_args = vec();
auto arg_n = 3u;
@@ -6555,7 +6572,7 @@ fn decl_native_fn_and_pair(@crate_ctxt ccx,
&mutable vec[ValueRef] args,
ValueRef v,
ty.t t) {
- if (ty.type_is_integral(t)) {
+ if (ty.type_is_integral(cx.fcx.lcx.ccx.tystore, t)) {
auto lldsttype = T_int();
auto llsrctype = type_of(cx.fcx.lcx.ccx, t);
if (llvm.LLVMGetIntTypeWidth(lldsttype) >
@@ -6564,7 +6581,7 @@ fn decl_native_fn_and_pair(@crate_ctxt ccx,
} else {
args += vec(cx.build.TruncOrBitCast(v, T_int()));
}
- } else if (ty.type_is_fp(t)) {
+ } else if (ty.type_is_fp(cx.fcx.lcx.ccx.tystore, t)) {
args += vec(cx.build.FPToSI(v, T_int()));
} else {
args += vec(vp2i(cx, v));
@@ -6573,7 +6590,7 @@ fn decl_native_fn_and_pair(@crate_ctxt ccx,
auto r;
auto rptr;
- auto args = ty.ty_fn_args(fn_type);
+ auto args = ty.ty_fn_args(ccx.tystore, fn_type);
if (abi == ast.native_abi_llvm) {
let vec[ValueRef] call_args = vec();
let vec[TypeRef] call_arg_tys = vec();
@@ -6585,7 +6602,8 @@ fn decl_native_fn_and_pair(@crate_ctxt ccx,
i += 1u;
}
auto llnativefnty = T_fn(call_arg_tys,
- type_of(ccx, ty.ty_fn_ret(fn_type)));
+ type_of(ccx,
+ ty.ty_fn_ret(ccx.tystore, fn_type)));
auto llnativefn = get_extern_fn(ccx.externs, ccx.llmod, name,
lib.llvm.LLVMCCallConv, llnativefnty);
r = bcx.build.Call(llnativefn, call_args);
diff --git a/src/comp/middle/ty.rs b/src/comp/middle/ty.rs
index 29b2aeee..86a0db1f 100644
--- a/src/comp/middle/ty.rs
+++ b/src/comp/middle/ty.rs
@@ -215,10 +215,10 @@ fn mk_native(@type_store ts) -> t { ret gen_ty(ts, ty_native); }
// Returns the one-level-deep type structure of the given type.
-fn struct(t typ) -> sty { ret typ.struct; }
+fn struct(@type_store tystore, t typ) -> sty { ret typ.struct; }
// Returns the canonical name of the given type.
-fn cname(t typ) -> option.t[str] { ret typ.cname; }
+fn cname(@type_store tystore, t typ) -> option.t[str] { ret typ.cname; }
// Stringification
@@ -234,9 +234,10 @@ fn path_to_str(&ast.path pth) -> str {
ret result;
}
-fn ty_to_str(&t typ) -> str {
+fn ty_to_str(@type_store ts, &t typ) -> str {
- fn fn_input_to_str(&rec(ast.mode mode, t ty) input) -> str {
+ fn fn_input_to_str(@type_store tystore,
+ &rec(ast.mode mode, t ty) input) -> str {
auto s;
if (mode_is_alias(input.mode)) {
s = "&";
@@ -244,13 +245,14 @@ fn ty_to_str(&t typ) -> str {
s = "";
}
- ret s + ty_to_str(input.ty);
+ ret s + ty_to_str(tystore, input.ty);
}
- fn fn_to_str(ast.proto proto,
+ fn fn_to_str(@type_store tystore,
+ ast.proto proto,
option.t[ast.ident] ident,
vec[arg] inputs, t output) -> str {
- auto f = fn_input_to_str;
+ auto f = bind fn_input_to_str(tystore, _);
auto s;
alt (proto) {
@@ -274,22 +276,22 @@ fn ty_to_str(&t typ) -> str {
s += _str.connect(_vec.map[arg,str](f, inputs), ", ");
s += ")";
- if (struct(output) != ty_nil) {
- s += " -> " + ty_to_str(output);
+ if (struct(tystore, output) != ty_nil) {
+ s += " -> " + ty_to_str(tystore, output);
}
ret s;
}
- fn method_to_str(&method m) -> str {
- ret fn_to_str(m.proto, some[ast.ident](m.ident),
+ fn method_to_str(@type_store tystore, &method m) -> str {
+ ret fn_to_str(tystore, m.proto, some[ast.ident](m.ident),
m.inputs, m.output) + ";";
}
- fn field_to_str(&field f) -> str {
- ret mt_to_str(f.mt) + " " + f.ident;
+ fn field_to_str(@type_store tystore, &field f) -> str {
+ ret mt_to_str(tystore, f.mt) + " " + f.ident;
}
- fn mt_to_str(&mt m) -> str {
+ fn mt_to_str(@type_store tystore, &mt m) -> str {
auto mstr;
alt (m.mut) {
case (ast.mut) { mstr = "mutable "; }
@@ -297,34 +299,34 @@ fn ty_to_str(&t typ) -> str {
case (ast.maybe_mut) { mstr = "mutable? "; }
}
- ret mstr + ty_to_str(m.ty);
+ ret mstr + ty_to_str(tystore, m.ty);
}
auto s = "";
- alt (struct(typ)) {
- case (ty_native) { s += "native"; }
- case (ty_nil) { s += "()"; }
- case (ty_bool) { s += "bool"; }
- case (ty_int) { s += "int"; }
- case (ty_float) { s += "float"; }
- case (ty_uint) { s += "uint"; }
- case (ty_machine(?tm)) { s += common.ty_mach_to_str(tm); }
- case (ty_char) { s += "char"; }
- case (ty_str) { s += "str"; }
- case (ty_box(?tm)) { s += "@" + mt_to_str(tm); }
- case (ty_vec(?tm)) { s += "vec[" + mt_to_str(tm) + "]"; }
- case (ty_port(?t)) { s += "port[" + ty_to_str(t) + "]"; }
- case (ty_chan(?t)) { s += "chan[" + ty_to_str(t) + "]"; }
- case (ty_type) { s += "type"; }
+ alt (struct(ts, typ)) {
+ case (ty_native) { s += "native"; }
+ case (ty_nil) { s += "()"; }
+ case (ty_bool) { s += "bool"; }
+ case (ty_int) { s += "int"; }
+ case (ty_float) { s += "float"; }
+ case (ty_uint) { s += "uint"; }
+ case (ty_machine(?tm)) { s += common.ty_mach_to_str(tm); }
+ case (ty_char) { s += "char"; }
+ case (ty_str) { s += "str"; }
+ case (ty_box(?tm)) { s += "@" + mt_to_str(ts, tm); }
+ case (ty_vec(?tm)) { s += "vec[" + mt_to_str(ts, tm) + "]"; }
+ case (ty_port(?t)) { s += "port[" + ty_to_str(ts, t) + "]"; }
+ case (ty_chan(?t)) { s += "chan[" + ty_to_str(ts, t) + "]"; }
+ case (ty_type) { s += "type"; }
case (ty_tup(?elems)) {
- auto f = mt_to_str;
+ auto f = bind mt_to_str(ts, _);
auto strs = _vec.map[mt,str](f, elems);
s += "tup(" + _str.connect(strs, ",") + ")";
}
case (ty_rec(?elems)) {
- auto f = field_to_str;
+ auto f = bind field_to_str(ts, _);
auto strs = _vec.map[field,str](f, elems);
s += "rec(" + _str.connect(strs, ",") + ")";
}
@@ -334,27 +336,27 @@ fn ty_to_str(&t typ) -> str {
s += "<tag#" + util.common.istr(id._0) + ":" +
util.common.istr(id._1) + ">";
if (_vec.len[t](tps) > 0u) {
- auto f = ty_to_str;
+ auto f = bind ty_to_str(ts, _);
auto strs = _vec.map[t,str](f, tps);
s += "[" + _str.connect(strs, ",") + "]";
}
}
case (ty_fn(?proto, ?inputs, ?output)) {
- s += fn_to_str(proto, none[ast.ident], inputs, output);
+ s += fn_to_str(ts, proto, none[ast.ident], inputs, output);
}
case (ty_native_fn(_, ?inputs, ?output)) {
- s += fn_to_str(ast.proto_fn, none[ast.ident], inputs, output);
+ s += fn_to_str(ts, ast.proto_fn, none[ast.ident], inputs, output);
}
case (ty_obj(?meths)) {
- alt (cname(typ)) {
+ alt (cname(ts, typ)) {
case (some[str](?cs)) {
s += cs;
}
case (_) {
- auto f = method_to_str;
+ auto f = bind method_to_str(ts, _);
auto m = _vec.map[method,str](f, meths);
s += "obj {\n\t" + _str.connect(m, "\n\t") + "\n}";
}
@@ -386,8 +388,8 @@ fn ty_to_str(&t typ) -> str {
type ty_walk = fn(t);
-fn walk_ty(ty_walk walker, t ty) {
- alt (struct(ty)) {
+fn walk_ty(@type_store tystore, ty_walk walker, t ty) {
+ alt (struct(tystore, ty)) {
case (ty_nil) { /* no-op */ }
case (ty_bool) { /* no-op */ }
case (ty_int) { /* no-op */ }
@@ -398,44 +400,44 @@ fn walk_ty(ty_walk walker, t ty) {
case (ty_str) { /* no-op */ }
case (ty_type) { /* no-op */ }
case (ty_native) { /* no-op */ }
- case (ty_box(?tm)) { walk_ty(walker, tm.ty); }
- case (ty_vec(?tm)) { walk_ty(walker, tm.ty); }
- case (ty_port(?subty)) { walk_ty(walker, subty); }
- case (ty_chan(?subty)) { walk_ty(walker, subty); }
+ case (ty_box(?tm)) { walk_ty(tystore, walker, tm.ty); }
+ case (ty_vec(?tm)) { walk_ty(tystore, walker, tm.ty); }
+ case (ty_port(?subty)) { walk_ty(tystore, walker, subty); }
+ case (ty_chan(?subty)) { walk_ty(tystore, walker, subty); }
case (ty_tag(?tid, ?subtys)) {
for (t subty in subtys) {
- walk_ty(walker, subty);
+ walk_ty(tystore, walker, subty);
}
}
case (ty_tup(?mts)) {
for (mt tm in mts) {
- walk_ty(walker, tm.ty);
+ walk_ty(tystore, walker, tm.ty);
}
}
case (ty_rec(?fields)) {
for (field fl in fields) {
- walk_ty(walker, fl.mt.ty);
+ walk_ty(tystore, walker, fl.mt.ty);
}
}
case (ty_fn(?proto, ?args, ?ret_ty)) {
for (arg a in args) {
- walk_ty(walker, a.ty);
+ walk_ty(tystore, walker, a.ty);
}
- walk_ty(walker, ret_ty);
+ walk_ty(tystore, walker, ret_ty);
}
case (ty_native_fn(?abi, ?args, ?ret_ty)) {
for (arg a in args) {
- walk_ty(walker, a.ty);
+ walk_ty(tystore, walker, a.ty);
}
- walk_ty(walker, ret_ty);
+ walk_ty(tystore, walker, ret_ty);
}
case (ty_obj(?methods)) {
let vec[method] new_methods = vec();
for (method m in methods) {
for (arg a in m.inputs) {
- walk_ty(walker, a.ty);
+ walk_ty(tystore, walker, a.ty);
}
- walk_ty(walker, m.output);
+ walk_ty(tystore, walker, m.output);
}
}
case (ty_var(_)) { /* no-op */ }
@@ -451,7 +453,7 @@ type ty_fold = fn(t) -> t;
fn fold_ty(@type_store tystore, ty_fold fld, t ty_0) -> t {
auto ty = ty_0;
- alt (struct(ty)) {
+ alt (struct(tystore, ty)) {
case (ty_nil) { /* no-op */ }
case (ty_bool) { /* no-op */ }
case (ty_int) { /* no-op */ }
@@ -552,13 +554,13 @@ fn fold_ty(@type_store tystore, ty_fold fld, t ty_0) -> t {
// Type utilities
fn rename(@type_store tystore, t typ, str new_cname) -> t {
- ret gen_ty_full(tystore, struct(typ), some[str](new_cname));
+ ret gen_ty_full(tystore, struct(tystore, typ), some[str](new_cname));
}
// Returns a type with the structural part taken from `struct_ty` and the
// canonical name from `cname_ty`.
fn copy_cname(@type_store tystore, t struct_ty, t cname_ty) -> t {
- ret gen_ty_full(tystore, struct(struct_ty), cname_ty.cname);
+ ret gen_ty_full(tystore, struct(tystore, struct_ty), cname_ty.cname);
}
// FIXME: remove me when == works on these tags.
@@ -570,24 +572,24 @@ fn mode_is_alias(ast.mode m) -> bool {
fail;
}
-fn type_is_nil(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_nil(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_nil) { ret true; }
case (_) { ret false; }
}
fail;
}
-fn type_is_bool(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_bool(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_bool) { ret true; }
case (_) { ret false; }
}
}
-fn type_is_structural(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_structural(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_tup(_)) { ret true; }
case (ty_rec(_)) { ret true; }
case (ty_tag(_,_)) { ret true; }
@@ -598,8 +600,8 @@ fn type_is_structural(t ty) -> bool {
fail;
}
-fn type_is_sequence(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_sequence(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_str) { ret true; }
case (ty_vec(_)) { ret true; }
case (_) { ret false; }
@@ -608,7 +610,7 @@ fn type_is_sequence(t ty) -> bool {
}
fn sequence_element_type(@type_store tystore, t ty) -> t {
- alt (struct(ty)) {
+ alt (struct(tystore, ty)) {
case (ty_str) { ret mk_mach(tystore, common.ty_u8); }
case (ty_vec(?mt)) { ret mt.ty; }
}
@@ -616,8 +618,8 @@ fn sequence_element_type(@type_store tystore, t ty) -> t {
}
-fn type_is_tup_like(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_tup_like(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_box(_)) { ret true; }
case (ty_tup(_)) { ret true; }
case (ty_rec(_)) { ret true; }
@@ -627,9 +629,9 @@ fn type_is_tup_like(t ty) -> bool {
fail;
}
-fn get_element_type(t ty, uint i) -> t {
- check (type_is_tup_like(ty));
- alt (struct(ty)) {
+fn get_element_type(@type_store tystore, t ty, uint i) -> t {
+ check (type_is_tup_like(tystore, ty));
+ alt (struct(tystore, ty)) {
case (ty_tup(?mts)) {
ret mts.(i).ty;
}
@@ -640,16 +642,16 @@ fn get_element_type(t ty, uint i) -> t {
fail;
}
-fn type_is_box(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_box(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_box(_)) { ret true; }
case (_) { ret false; }
}
fail;
}
-fn type_is_boxed(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_boxed(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_str) { ret true; }
case (ty_vec(_)) { ret true; }
case (ty_box(_)) { ret true; }
@@ -660,8 +662,8 @@ fn type_is_boxed(t ty) -> bool {
fail;
}
-fn type_is_scalar(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_scalar(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_nil) { ret true; }
case (ty_bool) { ret true; }
case (ty_int) { ret true; }
@@ -678,34 +680,36 @@ fn type_is_scalar(t ty) -> bool {
// FIXME: should we just return true for native types in
// type_is_scalar?
-fn type_is_native(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_native(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_native) { ret true; }
case (_) { ret false; }
}
fail;
}
-fn type_has_dynamic_size(t ty) -> bool {
- alt (struct(ty)) {
+fn type_has_dynamic_size(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_tup(?mts)) {
auto i = 0u;
while (i < _vec.len[mt](mts)) {
- if (type_has_dynamic_size(mts.(i).ty)) { ret true; }
+ if (type_has_dynamic_size(tystore, mts.(i).ty)) { ret true; }
i += 1u;
}
}
case (ty_rec(?fields)) {
auto i = 0u;
while (i < _vec.len[field](fields)) {
- if (type_has_dynamic_size(fields.(i).mt.ty)) { ret true; }
+ if (type_has_dynamic_size(tystore, fields.(i).mt.ty)) {
+ ret true;
+ }
i += 1u;
}
}
case (ty_tag(_, ?subtys)) {
auto i = 0u;
while (i < _vec.len[t](subtys)) {
- if (type_has_dynamic_size(subtys.(i))) { ret true; }
+ if (type_has_dynamic_size(tystore, subtys.(i))) { ret true; }
i += 1u;
}
}
@@ -715,8 +719,8 @@ fn type_has_dynamic_size(t ty) -> bool {
ret false;
}
-fn type_is_integral(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_integral(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_int) { ret true; }
case (ty_uint) { ret true; }
case (ty_machine(?m)) {
@@ -739,8 +743,8 @@ fn type_is_integral(t ty) -> bool {
fail;
}
-fn type_is_fp(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_fp(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_machine(?tm)) {
alt (tm) {
case (common.ty_f32) { ret true; }
@@ -756,8 +760,8 @@ fn type_is_fp(t ty) -> bool {
fail;
}
-fn type_is_signed(t ty) -> bool {
- alt (struct(ty)) {
+fn type_is_signed(@type_store tystore, t ty) -> bool {
+ alt (struct(tystore, ty)) {
case (ty_int) { ret true; }
case (ty_machine(?tm)) {
alt (tm) {
@@ -773,8 +777,8 @@ fn type_is_signed(t ty) -> bool {
fail;
}
-fn type_param(t ty) -> option.t[uint] {
- alt (struct(ty)) {
+fn type_param(@type_store tystore, t ty) -> option.t[uint] {
+ alt (struct(tystore, ty)) {
case (ty_param(?id)) { ret some[uint](id); }
case (_) { /* fall through */ }
}
@@ -1217,15 +1221,15 @@ fn eq_ty_full(&t a, &t b) -> bool {
}
// Check canonical names.
- alt (cname(a)) {
+ alt (a.cname) {
case (none[str]) {
- alt (cname(b)) {
+ alt (b.cname) {
case (none[str]) { /* ok */ }
case (_) { ret false; }
}
}
case (some[str](?s_a)) {
- alt (cname(b)) {
+ alt (b.cname) {
case (some[str](?s_b)) {
if (!_str.eq(s_a, s_b)) { ret false; }
}
@@ -1235,7 +1239,7 @@ fn eq_ty_full(&t a, &t b) -> bool {
}
// Check structures.
- ret equal_type_structures(struct(a), struct(b));
+ ret equal_type_structures(a.struct, b.struct);
}
// This is the equality function the public should use. It works as long as
@@ -1300,9 +1304,9 @@ fn triv_ann(t typ) -> ast.ann {
}
// Returns the number of distinct type parameters in the given type.
-fn count_ty_params(t ty) -> uint {
- fn counter(@mutable vec[uint] param_indices, t ty) {
- alt (struct(ty)) {
+fn count_ty_params(@type_store tystore, t ty) -> uint {
+ fn counter(@type_store tystore, @mutable vec[uint] param_indices, t ty) {
+ alt (struct(tystore, ty)) {
case (ty_param(?param_idx)) {
auto seen = false;
for (uint other_param_idx in *param_indices) {
@@ -1320,59 +1324,59 @@ fn count_ty_params(t ty) -> uint {
let vec[uint] v = vec(); // FIXME: typechecker botch
let @mutable vec[uint] param_indices = @mutable v;
- auto f = bind counter(param_indices, _);
- walk_ty(f, ty);
+ auto f = bind counter(tystore, param_indices, _);
+ walk_ty(tystore, f, ty);
ret _vec.len[uint](*param_indices);
}
-fn type_contains_vars(t typ) -> bool {
- fn checker(@mutable bool flag, t typ) {
- alt (struct(typ)) {
+fn type_contains_vars(@type_store tystore, t typ) -> bool {
+ fn checker(@type_store tystore, @mutable bool flag, t typ) {
+ alt (struct(tystore, typ)) {
case (ty_var(_)) { *flag = true; }
case (_) { /* fall through */ }
}
}
let @mutable bool flag = @mutable false;
- auto f = bind checker(flag, _);
- walk_ty(f, typ);
+ auto f = bind checker(tystore, flag, _);
+ walk_ty(tystore, f, typ);
ret *flag;
}
// Type accessors for substructures of types
-fn ty_fn_args(t fty) -> vec[arg] {
- alt (struct(fty)) {
+fn ty_fn_args(@type_store tystore, t fty) -> vec[arg] {
+ alt (struct(tystore, fty)) {
case (ty.ty_fn(_, ?a, _)) { ret a; }
case (ty.ty_native_fn(_, ?a, _)) { ret a; }
}
fail;
}
-fn ty_fn_proto(t fty) -> ast.proto {
- alt (struct(fty)) {
+fn ty_fn_proto(@type_store tystore, t fty) -> ast.proto {
+ alt (struct(tystore, fty)) {
case (ty.ty_fn(?p, _, _)) { ret p; }
}
fail;
}
-fn ty_fn_abi(t fty) -> ast.native_abi {
- alt (struct(fty)) {
+fn ty_fn_abi(@type_store tystore, t fty) -> ast.native_abi {
+ alt (struct(tystore, fty)) {
case (ty.ty_native_fn(?a, _, _)) { ret a; }
}
fail;
}
-fn ty_fn_ret(t fty) -> t {
- alt (struct(fty)) {
+fn ty_fn_ret(@type_store tystore, t fty) -> t {
+ alt (struct(tystore, fty)) {
case (ty.ty_fn(_, _, ?r)) { ret r; }
case (ty.ty_native_fn(_, _, ?r)) { ret r; }
}
fail;
}
-fn is_fn_ty(t fty) -> bool {
- alt (struct(fty)) {
+fn is_fn_ty(@type_store tystore, t fty) -> bool {
+ alt (struct(tystore, fty)) {
case (ty.ty_fn(_, _, _)) { ret true; }
case (ty.ty_native_fn(_, _, _)) { ret true; }
case (_) { ret false; }
@@ -1684,8 +1688,8 @@ mod Unify {
// something we'll probably need to develop over time.
// Simple structural type comparison.
- fn struct_cmp(t expected, t actual) -> result {
- if (struct(expected) == struct(actual)) {
+ fn struct_cmp(@ctxt cx, t expected, t actual) -> result {
+ if (struct(cx.tystore, expected) == struct(cx.tystore, actual)) {
ret ures_ok(expected);
}
@@ -1850,7 +1854,7 @@ mod Unify {
a_meth.inputs, a_meth.output);
alt (r) {
case (ures_ok(?tfn)) {
- alt (struct(tfn)) {
+ alt (struct(cx.tystore, tfn)) {
case (ty_fn(?proto, ?ins, ?out)) {
result_meths += vec(rec(inputs = ins,
output = out
@@ -1890,12 +1894,12 @@ mod Unify {
// Fast path.
if (eq_ty(expected, actual)) { ret ures_ok(expected); }
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
// If the RHS is a variable type, then just do the appropriate
// binding.
case (ty.ty_var(?actual_id)) {
auto actual_n = get_or_create_set(cx, actual_id);
- alt (struct(expected)) {
+ alt (struct(cx.tystore, expected)) {
case (ty.ty_var(?expected_id)) {
auto expected_n = get_or_create_set(cx, expected_id);
UFind.union(cx.sets, expected_n, actual_n);
@@ -1931,7 +1935,7 @@ mod Unify {
ret ures_ok(result_ty);
}
case (ty.ty_bound_param(?actual_id)) {
- alt (struct(expected)) {
+ alt (struct(cx.tystore, expected)) {
case (ty.ty_local(_)) {
log_err "TODO: bound param unifying with local";
fail;
@@ -1945,21 +1949,21 @@ mod Unify {
case (_) { /* empty */ }
}
- alt (struct(expected)) {
- case (ty.ty_nil) { ret struct_cmp(expected, actual); }
- case (ty.ty_bool) { ret struct_cmp(expected, actual); }
- case (ty.ty_int) { ret struct_cmp(expected, actual); }
- case (ty.ty_uint) { ret struct_cmp(expected, actual); }
- case (ty.ty_machine(_)) { ret struct_cmp(expected, actual); }
- case (ty.ty_float) { ret struct_cmp(expected, actual); }
- case (ty.ty_char) { ret struct_cmp(expected, actual); }
- case (ty.ty_str) { ret struct_cmp(expected, actual); }
- case (ty.ty_type) { ret struct_cmp(expected, actual); }
- case (ty.ty_native) { ret struct_cmp(expected, actual); }
- case (ty.ty_param(_)) { ret struct_cmp(expected, actual); }
+ alt (struct(cx.tystore, expected)) {
+ case (ty.ty_nil) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_bool) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_int) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_uint) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_machine(_)) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_float) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_char) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_str) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_type) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_native) { ret struct_cmp(cx, expected, actual); }
+ case (ty.ty_param(_)) { ret struct_cmp(cx, expected, actual); }
case (ty.ty_tag(?expected_id, ?expected_tps)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_tag(?actual_id, ?actual_tps)) {
if (expected_id._0 != actual_id._0 ||
expected_id._1 != actual_id._1) {
@@ -2001,7 +2005,7 @@ mod Unify {
}
case (ty.ty_box(?expected_mt)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_box(?actual_mt)) {
auto mut;
alt (unify_mut(expected_mt.mut, actual_mt.mut)) {
@@ -2033,7 +2037,7 @@ mod Unify {
}
case (ty.ty_vec(?expected_mt)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_vec(?actual_mt)) {
auto mut;
alt (unify_mut(expected_mt.mut, actual_mt.mut)) {
@@ -2065,7 +2069,7 @@ mod Unify {
}
case (ty.ty_port(?expected_sub)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_port(?actual_sub)) {
auto result = unify_step(cx,
expected_sub,
@@ -2087,7 +2091,7 @@ mod Unify {
}
case (ty.ty_chan(?expected_sub)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_chan(?actual_sub)) {
auto result = unify_step(cx,
expected_sub,
@@ -2109,7 +2113,7 @@ mod Unify {
}
case (ty.ty_tup(?expected_elems)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_tup(?actual_elems)) {
auto expected_len = _vec.len[ty.mt](expected_elems);
auto actual_len = _vec.len[ty.mt](actual_elems);
@@ -2163,7 +2167,7 @@ mod Unify {
}
case (ty.ty_rec(?expected_fields)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_rec(?actual_fields)) {
auto expected_len = _vec.len[field](expected_fields);
auto actual_len = _vec.len[field](actual_fields);
@@ -2227,7 +2231,7 @@ mod Unify {
}
case (ty.ty_fn(?ep, ?expected_inputs, ?expected_output)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_fn(?ap, ?actual_inputs, ?actual_output)) {
ret unify_fn(cx, ep, ap,
expected, actual,
@@ -2243,7 +2247,7 @@ mod Unify {
case (ty.ty_native_fn(?e_abi, ?expected_inputs,
?expected_output)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_native_fn(?a_abi, ?actual_inputs,
?actual_output)) {
ret unify_native_fn(cx, e_abi, a_abi,
@@ -2258,7 +2262,7 @@ mod Unify {
}
case (ty.ty_obj(?expected_meths)) {
- alt (struct(actual)) {
+ alt (struct(cx.tystore, actual)) {
case (ty.ty_obj(?actual_meths)) {
ret unify_obj(cx, expected, actual,
expected_meths, actual_meths);
@@ -2311,7 +2315,7 @@ mod Unify {
// Performs type binding substitution.
fn substitute(@ctxt cx, vec[t] set_types, t typ) -> t {
fn substituter(@ctxt cx, vec[t] types, t typ) -> t {
- alt (struct(typ)) {
+ alt (struct(cx.tystore, typ)) {
case (ty_var(?id)) {
alt (cx.var_ids.find(id)) {
case (some[uint](?n)) {
@@ -2443,8 +2447,8 @@ fn type_err_to_str(&ty.type_err err) -> str {
fn substitute_type_params(@type_store tystore,
vec[t] bindings,
t typ) -> t {
- fn replacer(vec[t] bindings, t typ) -> t {
- alt (struct(typ)) {
+ fn replacer(@type_store tystore, vec[t] bindings, t typ) -> t {
+ alt (struct(tystore, typ)) {
case (ty_bound_param(?param_index)) {
ret bindings.(param_index);
}
@@ -2452,14 +2456,14 @@ fn substitute_type_params(@type_store tystore,
}
}
- auto f = bind replacer(bindings, _);
+ auto f = bind replacer(tystore, bindings, _);
ret fold_ty(tystore, f, typ);
}
// Converts type parameters in a type to bound type parameters.
fn bind_params_in_type(@type_store tystore, t typ) -> t {
fn binder(@type_store tystore, t typ) -> t {
- alt (struct(typ)) {
+ alt (struct(tystore, typ)) {
case (ty_bound_param(?index)) {
log_err "bind_params_in_type() called on type that already " +
"has bound params in it";
diff --git a/src/comp/middle/typeck.rs b/src/comp/middle/typeck.rs
index b6729989..f86e2c4f 100644
--- a/src/comp/middle/typeck.rs
+++ b/src/comp/middle/typeck.rs
@@ -81,7 +81,7 @@ fn substitute_ty_params(&@crate_ctxt ccx,
vec[ty.t] supplied,
&span sp) -> ty.t {
fn substituter(@crate_ctxt ccx, vec[ty.t] supplied, ty.t typ) -> ty.t {
- alt (struct(typ)) {
+ alt (struct(ccx.tystore, typ)) {
case (ty.ty_bound_param(?pid)) { ret supplied.(pid); }
case (_) { ret typ; }
}
@@ -686,10 +686,10 @@ mod Collect {
ret @fold.respan[ast.native_item_](sp, item);
}
- fn get_ctor_obj_methods(ty.t t) -> vec[method] {
- alt (struct(t)) {
+ fn get_ctor_obj_methods(&@env e, ty.t t) -> vec[method] {
+ alt (struct(e.cx.tystore, t)) {
case (ty.ty_fn(_,_,?tobj)) {
- alt (struct(tobj)) {
+ alt (struct(e.cx.tystore, tobj)) {
case (ty.ty_obj(?tm)) {
ret tm;
}
@@ -712,7 +712,7 @@ mod Collect {
ast.obj_def_ids odid, ast.ann a) -> @ast.item {
check (e.cx.type_cache.contains_key(odid.ctor));
auto t = e.cx.type_cache.get(odid.ctor)._1;
- let vec[method] meth_tys = get_ctor_obj_methods(t);
+ let vec[method] meth_tys = get_ctor_obj_methods(e, t);
let vec[@ast.method] methods = vec();
let vec[ast.obj_field] fields = vec();
@@ -853,7 +853,8 @@ mod Unify {
alt (fcx.locals.find(id)) {
case (none[ty.t]) { ret none[ty.t]; }
case (some[ty.t](?existing_type)) {
- if (ty.type_contains_vars(existing_type)) {
+ if (ty.type_contains_vars(fcx.ccx.tystore,
+ existing_type)) {
// Not fully resolved yet. The writeback phase
// will mop up.
ret none[ty.t];
@@ -921,10 +922,10 @@ tag autoderef_kind {
NO_AUTODEREF;
}
-fn strip_boxes(ty.t t) -> ty.t {
+fn strip_boxes(@ty.type_store tystore, ty.t t) -> ty.t {
auto t1 = t;
while (true) {
- alt (struct(t1)) {
+ alt (struct(tystore, t1)) {
case (ty.ty_box(?inner)) { t1 = inner.ty; }
case (_) { ret t1; }
}
@@ -942,11 +943,11 @@ fn add_boxes(@crate_ctxt ccx, uint n, ty.t t) -> ty.t {
}
-fn count_boxes(ty.t t) -> uint {
+fn count_boxes(@ty.type_store tystore, ty.t t) -> uint {
auto n = 0u;
auto t1 = t;
while (true) {
- alt (struct(t1)) {
+ alt (struct(tystore, t1)) {
case (ty.ty_box(?inner)) { n += 1u; t1 = inner.ty; }
case (_) { ret n; }
}
@@ -984,9 +985,9 @@ mod Demand {
auto implicit_boxes = 0u;
if (adk == AUTODEREF_OK) {
- expected_1 = strip_boxes(expected_1);
- actual_1 = strip_boxes(actual_1);
- implicit_boxes = count_boxes(actual);
+ expected_1 = strip_boxes(fcx.ccx.tystore, expected_1);
+ actual_1 = strip_boxes(fcx.ccx.tystore, actual_1);
+ implicit_boxes = count_boxes(fcx.ccx.tystore, actual);
}
let vec[mutable ty.t] ty_param_substs =
@@ -1010,9 +1011,9 @@ mod Demand {
case (ures_err(?err, ?expected, ?actual)) {
fcx.ccx.sess.span_err(sp, "mismatched types: expected "
- + ty_to_str(expected) + " but found "
- + ty_to_str(actual) + " (" +
- ty.type_err_to_str(err) + ")");
+ + ty_to_str(fcx.ccx.tystore, expected) + " but found "
+ + ty_to_str(fcx.ccx.tystore, actual) + " ("
+ + ty.type_err_to_str(err) + ")");
// TODO: In the future, try returning "expected", reporting
// the error, and continue.
@@ -1040,7 +1041,7 @@ fn variant_arg_types(@crate_ctxt ccx, &span sp, ast.def_id vid,
auto tpt = ty.lookup_item_type(ccx.sess, ccx.tystore, ccx.type_cache,
vid);
- alt (struct(tpt._1)) {
+ alt (struct(ccx.tystore, tpt._1)) {
case (ty.ty_fn(_, ?ins, _)) {
// N-ary variant.
for (ty.arg arg in ins) {
@@ -1109,7 +1110,7 @@ mod Pushdown {
// Take the variant's type parameters out of the expected
// type.
auto tag_tps;
- alt (struct(expected)) {
+ alt (struct(fcx.ccx.tystore, expected)) {
case (ty.ty_tag(_, ?tps)) { tag_tps = tps; }
case (_) {
log_err "tag pattern type not actually a tag?!";
@@ -1159,7 +1160,7 @@ mod Pushdown {
auto t = Demand.simple(fcx, e.span, expected,
ann_to_type(ann));
let vec[@ast.expr] es_1 = vec();
- alt (struct(t)) {
+ alt (struct(fcx.ccx.tystore, t)) {
case (ty.ty_vec(?mt)) {
for (@ast.expr e_0 in es_0) {
es_1 += vec(pushdown_expr(fcx, mt.ty, e_0));
@@ -1176,7 +1177,7 @@ mod Pushdown {
auto t = Demand.simple(fcx, e.span, expected,
ann_to_type(ann));
let vec[ast.elt] elts_1 = vec();
- alt (struct(t)) {
+ alt (struct(fcx.ccx.tystore, t)) {
case (ty.ty_tup(?mts)) {
auto i = 0u;
for (ast.elt elt_0 in es_0) {
@@ -1200,7 +1201,7 @@ mod Pushdown {
auto t = Demand.simple(fcx, e.span, expected,
ann_to_type(ann));
let vec[ast.field] fields_1 = vec();
- alt (struct(t)) {
+ alt (struct(fcx.ccx.tystore, t)) {
case (ty.ty_rec(?field_mts)) {
alt (base_0) {
case (none[@ast.expr]) {
@@ -1413,7 +1414,7 @@ mod Pushdown {
auto t = Demand.simple(fcx, e.span, expected,
ann_to_type(ann));
let @ast.expr es_1;
- alt (struct(t)) {
+ alt (struct(fcx.ccx.tystore, t)) {
case (ty.ty_chan(?subty)) {
auto pt = ty.mk_port(fcx.ccx.tystore, subty);
es_1 = pushdown_expr(fcx, pt, es);
@@ -1515,7 +1516,7 @@ fn writeback_local(&option.t[@fn_ctxt] env, &span sp, @ast.local local)
fn resolve_local_types_in_annotation(&option.t[@fn_ctxt] env, ast.ann ann)
-> ast.ann {
fn resolver(@fn_ctxt fcx, ty.t typ) -> ty.t {
- alt (struct(typ)) {
+ alt (struct(fcx.ccx.tystore, typ)) {
case (ty.ty_local(?lid)) { ret fcx.locals.get(lid); }
case (_) { ret typ; }
}
@@ -1604,7 +1605,7 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
fcx.ccx.type_cache, vdef._0);
auto ann = instantiate_path(fcx, p, tpt, pat.span);
- alt (struct(t)) {
+ alt (struct(fcx.ccx.tystore, t)) {
// N-ary variants have function types.
case (ty.ty_fn(_, ?args, ?tag_ty)) {
auto arg_len = _vec.len[arg](args);
@@ -1692,7 +1693,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto rt_0 = next_ty_var(fcx.ccx);
auto t_0;
- alt (struct(expr_ty(fcx.ccx.tystore, f_0))) {
+ alt (struct(fcx.ccx.tystore, expr_ty(fcx.ccx.tystore, f_0))) {
case (ty.ty_fn(?proto, _, _)) {
t_0 = ty.mk_fn(fcx.ccx.tystore, proto, arg_tys_0, rt_0);
}
@@ -1774,7 +1775,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
expr_ty(fcx.ccx.tystore, lhs_1),
rhs_0, AUTODEREF_OK);
- auto t = strip_boxes(lhs_t0);
+ auto t = strip_boxes(fcx.ccx.tystore, lhs_t0);
alt (binop) {
case (ast.eq) { t = ty.mk_bool(fcx.ccx.tystore); }
case (ast.lt) { t = ty.mk_bool(fcx.ccx.tystore); }
@@ -1801,7 +1802,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
rec(ty=oper_t, mut=mut));
}
case (ast.deref) {
- alt (struct(oper_t)) {
+ alt (struct(fcx.ccx.tystore, oper_t)) {
case (ty.ty_box(?inner)) {
oper_t = inner.ty;
}
@@ -1809,11 +1810,11 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
fcx.ccx.sess.span_err
(expr.span,
"dereferencing non-box type: "
- + ty_to_str(oper_t));
+ + ty_to_str(fcx.ccx.tystore, oper_t));
}
}
}
- case (_) { oper_t = strip_boxes(oper_t); }
+ case (_) { oper_t = strip_boxes(fcx.ccx.tystore, oper_t); }
}
auto ann = triv_ann(oper_t);
@@ -1971,7 +1972,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto chan_t = ty.mk_chan(fcx.ccx.tystore, rhs_t);
auto lhs_1 = Pushdown.pushdown_expr(fcx, chan_t, lhs_0);
auto item_t;
- alt (struct(expr_ty(fcx.ccx.tystore, lhs_1))) {
+ alt (struct(fcx.ccx.tystore, expr_ty(fcx.ccx.tystore, lhs_1))) {
case (ty.ty_chan(?it)) {
item_t = it;
}
@@ -1994,7 +1995,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto port_t = ty.mk_port(fcx.ccx.tystore, lhs_t1);
auto rhs_1 = Pushdown.pushdown_expr(fcx, port_t, rhs_0);
auto item_t;
- alt (struct(expr_ty(fcx.ccx.tystore, rhs_0))) {
+ alt (struct(fcx.ccx.tystore, expr_ty(fcx.ccx.tystore, rhs_0))) {
case (ty.ty_port(?it)) {
item_t = it;
}
@@ -2164,7 +2165,8 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto proto_1;
let vec[ty.arg] arg_tys_1 = vec();
auto rt_1;
- alt (struct(expr_ty(fcx.ccx.tystore, result._0))) {
+ alt (struct(fcx.ccx.tystore,
+ expr_ty(fcx.ccx.tystore, result._0))) {
case (ty.ty_fn(?proto, ?arg_tys, ?rt)) {
proto_1 = proto;
rt_1 = rt;
@@ -2202,7 +2204,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
// Pull the return type out of the type of the function.
auto rt_1 = ty.mk_nil(fcx.ccx.tystore); // FIXME: typestate botch
- alt (struct(expr_ty(fcx.ccx.tystore, f_1))) {
+ alt (struct(fcx.ccx.tystore, expr_ty(fcx.ccx.tystore, f_1))) {
case (ty.ty_fn(_,_,?rt)) { rt_1 = rt; }
case (ty.ty_native_fn(_, _, ?rt)) { rt_1 = rt; }
case (_) {
@@ -2234,7 +2236,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
// Grab this method's type out of the current object type
// this_obj_ty is an ty.t
- alt (struct(this_obj_ty)) {
+ alt (struct(fcx.ccx.tystore, this_obj_ty)) {
case (ty.ty_obj(?methods)) {
for (ty.method method in methods) {
if (method.ident == id) {
@@ -2258,9 +2260,9 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto args_1 = result._1;
// Check the return type
- alt (struct(expr_ty(fcx.ccx.tystore, f_1))) {
+ alt (struct(fcx.ccx.tystore, expr_ty(fcx.ccx.tystore, f_1))) {
case (ty.ty_fn(_,_,?rt)) {
- alt (struct(rt)) {
+ alt (struct(fcx.ccx.tystore, rt)) {
case (ty.ty_nil) {
// This is acceptable
}
@@ -2286,12 +2288,14 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
auto e_1 = check_expr(fcx, e);
auto t_1 = ast_ty_to_ty_crate(fcx.ccx, t);
// FIXME: there are more forms of cast to support, eventually.
- if (! (type_is_scalar(expr_ty(fcx.ccx.tystore, e_1)) &&
- type_is_scalar(t_1))) {
+ if (! (type_is_scalar(fcx.ccx.tystore,
+ expr_ty(fcx.ccx.tystore, e_1)) &&
+ type_is_scalar(fcx.ccx.tystore, t_1))) {
fcx.ccx.sess.span_err(expr.span,
"non-scalar cast: " +
- ty_to_str(expr_ty(fcx.ccx.tystore, e_1)) + " as " +
- ty_to_str(t_1));
+ ty_to_str(fcx.ccx.tystore,
+ expr_ty(fcx.ccx.tystore, e_1)) + " as " +
+ ty_to_str(fcx.ccx.tystore, t_1));
}
auto ann = triv_ann(t_1);
@@ -2374,7 +2378,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
let vec[field] base_fields = vec();
- alt (struct(bexpr_t)) {
+ alt (struct(fcx.ccx.tystore, bexpr_t)) {
case (ty.ty_rec(?flds)) {
base_fields = flds;
}
@@ -2412,8 +2416,9 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
case (ast.expr_field(?base, ?field, _)) {
auto base_1 = check_expr(fcx, base);
- auto base_t = strip_boxes(expr_ty(fcx.ccx.tystore, base_1));
- alt (struct(base_t)) {
+ auto base_t = strip_boxes(fcx.ccx.tystore,
+ expr_ty(fcx.ccx.tystore, base_1));
+ alt (struct(fcx.ccx.tystore, base_t)) {
case (ty.ty_tup(?args)) {
let uint ix = ty.field_num(fcx.ccx.sess,
expr.span, field);
@@ -2462,25 +2467,26 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
case (_) {
fcx.ccx.sess.span_unimpl(expr.span,
"base type for expr_field in typeck.check_expr: " +
- ty_to_str(base_t));
+ ty_to_str(fcx.ccx.tystore, base_t));
}
}
}
case (ast.expr_index(?base, ?idx, _)) {
auto base_1 = check_expr(fcx, base);
- auto base_t = strip_boxes(expr_ty(fcx.ccx.tystore, base_1));
+ auto base_t = strip_boxes(fcx.ccx.tystore,
+ expr_ty(fcx.ccx.tystore, base_1));
auto idx_1 = check_expr(fcx, idx);
auto idx_t = expr_ty(fcx.ccx.tystore, idx_1);
- alt (struct(base_t)) {
+ alt (struct(fcx.ccx.tystore, base_t)) {
case (ty.ty_vec(?mt)) {
- if (! type_is_integral(idx_t)) {
+ if (! type_is_integral(fcx.ccx.tystore, idx_t)) {
fcx.ccx.sess.span_err
(idx.span,
"non-integral type of vec index: "
- + ty_to_str(idx_t));
+ + ty_to_str(fcx.ccx.tystore, idx_t));
}
auto ann = triv_ann(mt.ty);
ret @fold.respan[ast.expr_](expr.span,
@@ -2489,11 +2495,11 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
ann));
}
case (ty.ty_str) {
- if (! type_is_integral(idx_t)) {
+ if (! type_is_integral(fcx.ccx.tystore, idx_t)) {
fcx.ccx.sess.span_err
(idx.span,
"non-integral type of str index: "
- + ty_to_str(idx_t));
+ + ty_to_str(fcx.ccx.tystore, idx_t));
}
auto ann = triv_ann(ty.mk_mach(fcx.ccx.tystore,
common.ty_u8));
@@ -2506,7 +2512,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
fcx.ccx.sess.span_err
(expr.span,
"vector-indexing bad type: "
- + ty_to_str(base_t));
+ + ty_to_str(fcx.ccx.tystore, base_t));
}
}
}
@@ -2521,7 +2527,7 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
case (ast.expr_chan(?x, _)) {
auto expr_1 = check_expr(fcx, x);
auto port_t = expr_ty(fcx.ccx.tystore, expr_1);
- alt (struct(port_t)) {
+ alt (struct(fcx.ccx.tystore, port_t)) {
case (ty.ty_port(?subtype)) {
auto ct = ty.mk_chan(fcx.ccx.tystore, subtype);
auto ann = triv_ann(ct);
@@ -2530,8 +2536,8 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
case (_) {
fcx.ccx.sess.span_err(expr.span,
- "bad port type: "
- + ty_to_str(port_t));
+ "bad port type: " + ty_to_str(fcx.ccx.tystore,
+ port_t));
}
}
}