aboutsummaryrefslogtreecommitdiff
path: root/src/comp/middle
diff options
context:
space:
mode:
Diffstat (limited to 'src/comp/middle')
-rw-r--r--src/comp/middle/fold.rs407
-rw-r--r--src/comp/middle/resolve.rs142
-rw-r--r--src/comp/middle/trans.rs3331
-rw-r--r--src/comp/middle/ty.rs679
-rw-r--r--src/comp/middle/typeck.rs844
5 files changed, 3920 insertions, 1483 deletions
diff --git a/src/comp/middle/fold.rs b/src/comp/middle/fold.rs
index ca10e79f..d7660460 100644
--- a/src/comp/middle/fold.rs
+++ b/src/comp/middle/fold.rs
@@ -10,6 +10,7 @@ import util.common.ty_mach;
import util.common.append;
import front.ast;
+import front.ast.fn_decl;
import front.ast.ident;
import front.ast.path;
import front.ast.mutability;
@@ -20,6 +21,7 @@ import front.ast.block;
import front.ast.item;
import front.ast.view_item;
import front.ast.meta_item;
+import front.ast.native_item;
import front.ast.arg;
import front.ast.pat;
import front.ast.decl;
@@ -28,6 +30,7 @@ import front.ast.def;
import front.ast.def_id;
import front.ast.ann;
+import std._uint;
import std._vec;
type ast_fold[ENV] =
@@ -56,6 +59,7 @@ type ast_fold[ENV] =
vec[ast.ty_method] meths) -> @ty) fold_ty_obj,
(fn(&ENV e, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty) fold_ty_fn,
@@ -72,7 +76,8 @@ type ast_fold[ENV] =
vec[ast.elt] es, ann a) -> @expr) fold_expr_tup,
(fn(&ENV e, &span sp,
- vec[ast.field] fields, ann a) -> @expr) fold_expr_rec,
+ vec[ast.field] fields,
+ option.t[@expr] base, ann a) -> @expr) fold_expr_rec,
(fn(&ENV e, &span sp,
@expr f, vec[@expr] args,
@@ -108,6 +113,10 @@ type ast_fold[ENV] =
ann a) -> @expr) fold_expr_for,
(fn(&ENV e, &span sp,
+ @decl decl, @expr seq, &block body,
+ ann a) -> @expr) fold_expr_for_each,
+
+ (fn(&ENV e, &span sp,
@expr cond, &block body,
ann a) -> @expr) fold_expr_while,
@@ -144,6 +153,29 @@ type ast_fold[ENV] =
&option.t[def] d,
ann a) -> @expr) fold_expr_path,
+ (fn(&ENV e, &span sp,
+ &path p, vec[@expr] args,
+ option.t[@expr] body,
+ @expr expanded,
+ ann a) -> @expr) fold_expr_ext,
+
+ (fn(&ENV e, &span sp) -> @expr) fold_expr_fail,
+
+ (fn(&ENV e, &span sp,
+ &option.t[@expr] rv) -> @expr) fold_expr_ret,
+
+ (fn(&ENV e, &span sp,
+ &option.t[@expr] rv) -> @expr) fold_expr_put,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_be,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_log,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_check_expr,
+
// Decl folds.
(fn(&ENV e, &span sp,
@ast.local local) -> @decl) fold_decl_local,
@@ -157,6 +189,9 @@ type ast_fold[ENV] =
ann a) -> @pat) fold_pat_wild,
(fn(&ENV e, &span sp,
+ @ast.lit lit, ann a) -> @pat) fold_pat_lit,
+
+ (fn(&ENV e, &span sp,
ident i, def_id did, ann a) -> @pat) fold_pat_bind,
(fn(&ENV e, &span sp,
@@ -170,15 +205,6 @@ type ast_fold[ENV] =
@decl decl) -> @stmt) fold_stmt_decl,
(fn(&ENV e, &span sp,
- &option.t[@expr] rv) -> @stmt) fold_stmt_ret,
-
- (fn(&ENV e, &span sp,
- @expr e) -> @stmt) fold_stmt_log,
-
- (fn(&ENV e, &span sp,
- @expr e) -> @stmt) fold_stmt_check_expr,
-
- (fn(&ENV e, &span sp,
@expr e) -> @stmt) fold_stmt_expr,
// Item folds.
@@ -192,13 +218,24 @@ type ast_fold[ENV] =
def_id id, ann a) -> @item) fold_item_fn,
(fn(&ENV e, &span sp, ident ident,
+ &ast.fn_decl decl,
+ vec[ast.ty_param] ty_params,
+ def_id id, ann a) -> @native_item) fold_native_item_fn,
+
+ (fn(&ENV e, &span sp, ident ident,
&ast._mod m, def_id id) -> @item) fold_item_mod,
(fn(&ENV e, &span sp, ident ident,
+ &ast.native_mod m, def_id id) -> @item) fold_item_native_mod,
+
+ (fn(&ENV e, &span sp, ident ident,
@ty t, vec[ast.ty_param] ty_params,
def_id id, ann a) -> @item) fold_item_ty,
(fn(&ENV e, &span sp, ident ident,
+ def_id id) -> @native_item) fold_native_item_ty,
+
+ (fn(&ENV e, &span sp, ident ident,
vec[ast.variant] variants,
vec[ast.ty_param] ty_params,
def_id id) -> @item) fold_item_tag,
@@ -220,23 +257,30 @@ type ast_fold[ENV] =
(fn(&ENV e, &span sp,
&ast.block_) -> block) fold_block,
+ (fn(&ENV e, &fn_decl decl,
+ ast.proto proto,
+ &block body) -> ast._fn) fold_fn,
+
(fn(&ENV e, ast.effect effect,
- bool is_iter,
vec[arg] inputs,
- @ty output, &block body) -> ast._fn) fold_fn,
+ @ty output) -> ast.fn_decl) fold_fn_decl,
(fn(&ENV e, &ast._mod m) -> ast._mod) fold_mod,
+ (fn(&ENV e, &ast.native_mod m) -> ast.native_mod) fold_native_mod,
+
(fn(&ENV e, &span sp,
&ast._mod m) -> @ast.crate) fold_crate,
(fn(&ENV e,
vec[ast.obj_field] fields,
- vec[@ast.method] methods) -> ast._obj) fold_obj,
+ vec[@ast.method] methods,
+ option.t[block] dtor) -> ast._obj) fold_obj,
// Env updates.
(fn(&ENV e, @ast.crate c) -> ENV) update_env_for_crate,
(fn(&ENV e, @item i) -> ENV) update_env_for_item,
+ (fn(&ENV e, @native_item i) -> ENV) update_env_for_native_item,
(fn(&ENV e, @view_item i) -> ENV) update_env_for_view_item,
(fn(&ENV e, &block b) -> ENV) update_env_for_block,
(fn(&ENV e, @stmt s) -> ENV) update_env_for_stmt,
@@ -312,11 +356,13 @@ fn fold_ty[ENV](&ENV env, ast_fold[ENV] fld, @ty t) -> @ty {
case (ast.ty_obj(?meths)) {
let vec[ast.ty_method] meths_ = vec();
for (ast.ty_method m in meths) {
- auto tfn = fold_ty_fn(env_, fld, t.span, m.inputs, m.output);
+ auto tfn = fold_ty_fn(env_, fld, t.span, m.proto,
+ m.inputs, m.output);
alt (tfn.node) {
- case (ast.ty_fn(?ins, ?out)) {
+ case (ast.ty_fn(?p, ?ins, ?out)) {
append[ast.ty_method]
- (meths_, rec(inputs=ins, output=out with m));
+ (meths_, rec(proto=p, inputs=ins, output=out
+ with m));
}
}
}
@@ -333,13 +379,14 @@ fn fold_ty[ENV](&ENV env, ast_fold[ENV] fld, @ty t) -> @ty {
ret fld.fold_ty_mutable(env_, t.span, ty_);
}
- case (ast.ty_fn(?inputs, ?output)) {
- ret fold_ty_fn(env_, fld, t.span, inputs, output);
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
+ ret fold_ty_fn(env_, fld, t.span, proto, inputs, output);
}
}
}
fn fold_ty_fn[ENV](&ENV env, ast_fold[ENV] fld, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty {
auto output_ = fold_ty(env, fld, output);
@@ -349,7 +396,7 @@ fn fold_ty_fn[ENV](&ENV env, ast_fold[ENV] fld, &span sp,
auto input_ = rec(ty=ty_ with input);
inputs_ += vec(input_);
}
- ret fld.fold_ty_fn(env, sp, inputs_, output_);
+ ret fld.fold_ty_fn(env, sp, proto, inputs_, output_);
}
fn fold_decl[ENV](&ENV env, ast_fold[ENV] fld, @decl d) -> @decl {
@@ -397,6 +444,9 @@ fn fold_pat[ENV](&ENV env, ast_fold[ENV] fld, @ast.pat p) -> @ast.pat {
alt (p.node) {
case (ast.pat_wild(?t)) { ret fld.fold_pat_wild(env_, p.span, t); }
+ case (ast.pat_lit(?lt, ?t)) {
+ ret fld.fold_pat_lit(env_, p.span, lt, t);
+ }
case (ast.pat_bind(?id, ?did, ?t)) {
ret fld.fold_pat_bind(env_, p.span, id, did, t);
}
@@ -449,12 +499,19 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
ret fld.fold_expr_tup(env_, e.span, elts, t);
}
- case (ast.expr_rec(?fs, ?t)) {
+ case (ast.expr_rec(?fs, ?base, ?t)) {
let vec[ast.field] fields = vec();
+ let option.t[@expr] b = none[@expr];
for (ast.field f in fs) {
fields += fold_rec_field(env, fld, f);
}
- ret fld.fold_expr_rec(env_, e.span, fields, t);
+ alt (base) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?eb)) {
+ b = some[@expr](fold_expr(env_, fld, eb));
+ }
+ }
+ ret fld.fold_expr_rec(env_, e.span, fields, b, t);
}
case (ast.expr_call(?f, ?args, ?t)) {
@@ -521,6 +578,13 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
ret fld.fold_expr_for(env_, e.span, ddecl, sseq, bbody, t);
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, ?t)) {
+ auto ddecl = fold_decl(env_, fld, decl);
+ auto sseq = fold_expr(env_, fld, seq);
+ auto bbody = fold_block(env_, fld, body);
+ ret fld.fold_expr_for_each(env_, e.span, ddecl, sseq, bbody, t);
+ }
+
case (ast.expr_while(?cnd, ?body, ?t)) {
auto ccnd = fold_expr(env_, fld, cnd);
auto bbody = fold_block(env_, fld, body);
@@ -574,9 +638,59 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
auto p_ = fold_path(env_, fld, p);
ret fld.fold_expr_path(env_, e.span, p_, r, t);
}
+
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, ?t)) {
+ // Only fold the expanded expression, not the
+ // expressions involved in syntax extension
+ auto exp = fold_expr(env_, fld, expanded);
+ ret fld.fold_expr_ext(env_, e.span, p, args, body,
+ exp, t);
+ }
+
+ case (ast.expr_fail) {
+ ret fld.fold_expr_fail(env_, e.span);
+ }
+
+ case (ast.expr_ret(?oe)) {
+ auto oee = none[@expr];
+ alt (oe) {
+ case (some[@expr](?x)) {
+ oee = some(fold_expr(env_, fld, x));
+ }
+ case (_) { /* fall through */ }
+ }
+ ret fld.fold_expr_ret(env_, e.span, oee);
+ }
+
+ case (ast.expr_put(?oe)) {
+ auto oee = none[@expr];
+ alt (oe) {
+ case (some[@expr](?x)) {
+ oee = some(fold_expr(env_, fld, x));
+ }
+ case (_) { /* fall through */ }
+ }
+ ret fld.fold_expr_put(env_, e.span, oee);
+ }
+
+ case (ast.expr_be(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_be(env_, e.span, ee);
+ }
+
+ case (ast.expr_log(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_log(env_, e.span, ee);
+ }
+
+ case (ast.expr_check_expr(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_check_expr(env_, e.span, ee);
+ }
+
}
- ret e;
+ fail;
}
@@ -594,37 +708,12 @@ fn fold_stmt[ENV](&ENV env, ast_fold[ENV] fld, &@stmt s) -> @stmt {
ret fld.fold_stmt_decl(env_, s.span, dd);
}
- case (ast.stmt_ret(?oe)) {
- auto oee = none[@expr];
- alt (oe) {
- case (some[@expr](?e)) {
- oee = some(fold_expr(env_, fld, e));
- }
- case (_) { /* fall through */ }
- }
- ret fld.fold_stmt_ret(env_, s.span, oee);
- }
-
- case (ast.stmt_log(?e)) {
- auto ee = fold_expr(env_, fld, e);
- ret fld.fold_stmt_log(env_, s.span, ee);
- }
-
- case (ast.stmt_check_expr(?e)) {
- auto ee = fold_expr(env_, fld, e);
- ret fld.fold_stmt_check_expr(env_, s.span, ee);
- }
-
- case (ast.stmt_fail) {
- ret s;
- }
-
case (ast.stmt_expr(?e)) {
auto ee = fold_expr(env_, fld, e);
ret fld.fold_stmt_expr(env_, s.span, ee);
}
}
- ret s;
+ fail;
}
fn fold_block[ENV](&ENV env, ast_fold[ENV] fld, &block blk) -> block {
@@ -666,17 +755,22 @@ fn fold_arg[ENV](&ENV env, ast_fold[ENV] fld, &arg a) -> arg {
ret rec(ty=ty with a);
}
-
-fn fold_fn[ENV](&ENV env, ast_fold[ENV] fld, &ast._fn f) -> ast._fn {
-
+fn fold_fn_decl[ENV](&ENV env, ast_fold[ENV] fld,
+ &ast.fn_decl decl) -> ast.fn_decl {
let vec[ast.arg] inputs = vec();
- for (ast.arg a in f.inputs) {
+ for (ast.arg a in decl.inputs) {
inputs += fold_arg(env, fld, a);
}
- auto output = fold_ty[ENV](env, fld, f.output);
+ auto output = fold_ty[ENV](env, fld, decl.output);
+ ret fld.fold_fn_decl(env, decl.effect, inputs, output);
+}
+
+fn fold_fn[ENV](&ENV env, ast_fold[ENV] fld, &ast._fn f) -> ast._fn {
+ auto decl = fold_fn_decl(env, fld, f.decl);
+
auto body = fold_block[ENV](env, fld, f.body);
- ret fld.fold_fn(env, f.effect, f.is_iter, inputs, output, body);
+ ret fld.fold_fn(env, decl, f.proto, body);
}
@@ -701,6 +795,13 @@ fn fold_obj[ENV](&ENV env, ast_fold[ENV] fld, &ast._obj ob) -> ast._obj {
for (ast.obj_field f in ob.fields) {
fields += fold_obj_field(env, fld, f);
}
+ let option.t[block] dtor = none[block];
+ alt (ob.dtor) {
+ case (none[block]) { }
+ case (some[block](?b)) {
+ dtor = some[block](fold_block[ENV](env, fld, b));
+ }
+ }
let vec[ast.ty_param] tp = vec();
for (@ast.method m in ob.methods) {
// Fake-up an ast.item for this method.
@@ -715,7 +816,7 @@ fn fold_obj[ENV](&ENV env, ast_fold[ENV] fld, &ast._obj ob) -> ast._obj {
let ENV _env = fld.update_env_for_item(env, i);
append[@ast.method](meths, fold_method(_env, fld, m));
}
- ret fld.fold_obj(env, fields, meths);
+ ret fld.fold_obj(env, fields, meths, dtor);
}
fn fold_view_item[ENV](&ENV env, ast_fold[ENV] fld, @view_item vi)
@@ -768,6 +869,11 @@ fn fold_item[ENV](&ENV env, ast_fold[ENV] fld, @item i) -> @item {
ret fld.fold_item_mod(env_, i.span, ident, mm_, id);
}
+ case (ast.item_native_mod(?ident, ?mm, ?id)) {
+ let ast.native_mod mm_ = fold_native_mod[ENV](env_, fld, mm);
+ ret fld.fold_item_native_mod(env_, i.span, ident, mm_, id);
+ }
+
case (ast.item_ty(?ident, ?ty, ?params, ?id, ?ann)) {
let @ast.ty ty_ = fold_ty[ENV](env_, fld, ty);
ret fld.fold_item_ty(env_, i.span, ident, ty_, params, id, ann);
@@ -798,7 +904,6 @@ fn fold_item[ENV](&ENV env, ast_fold[ENV] fld, @item i) -> @item {
fail;
}
-
fn fold_mod[ENV](&ENV e, ast_fold[ENV] fld, &ast._mod m) -> ast._mod {
let vec[@view_item] view_items = vec();
@@ -818,7 +923,50 @@ fn fold_mod[ENV](&ENV e, ast_fold[ENV] fld, &ast._mod m) -> ast._mod {
}
ret fld.fold_mod(e, rec(view_items=view_items, items=items, index=index));
- }
+}
+
+fn fold_native_item[ENV](&ENV env, ast_fold[ENV] fld,
+ @native_item i) -> @native_item {
+ let ENV env_ = fld.update_env_for_native_item(env, i);
+
+ if (!fld.keep_going(env_)) {
+ ret i;
+ }
+ alt (i.node) {
+ case (ast.native_item_ty(?ident, ?id)) {
+ ret fld.fold_native_item_ty(env_, i.span, ident, id);
+ }
+ case (ast.native_item_fn(?ident, ?fn_decl, ?ty_params, ?id, ?ann)) {
+ auto d = fold_fn_decl[ENV](env_, fld, fn_decl);
+ ret fld.fold_native_item_fn(env_, i.span, ident, d,
+ ty_params, id, ann);
+ }
+ }
+}
+
+fn fold_native_mod[ENV](&ENV e, ast_fold[ENV] fld,
+ &ast.native_mod m) -> ast.native_mod {
+ let vec[@view_item] view_items = vec();
+ let vec[@native_item] items = vec();
+ auto index = new_str_hash[ast.native_mod_index_entry]();
+
+ for (@view_item vi in m.view_items) {
+ auto new_vi = fold_view_item[ENV](e, fld, vi);
+ append[@view_item](view_items, new_vi);
+ }
+
+ for (@native_item i in m.items) {
+ auto new_item = fold_native_item[ENV](e, fld, i);
+ append[@native_item](items, new_item);
+ ast.index_native_item(index, new_item);
+ }
+
+ ret fld.fold_native_mod(e, rec(native_name=m.native_name,
+ abi=m.abi,
+ view_items=view_items,
+ items=items,
+ index=index));
+}
fn fold_crate[ENV](&ENV env, ast_fold[ENV] fld, @ast.crate c) -> @ast.crate {
let ENV env_ = fld.update_env_for_crate(env, c);
@@ -894,9 +1042,10 @@ fn identity_fold_ty_obj[ENV](&ENV env, &span sp,
}
fn identity_fold_ty_fn[ENV](&ENV env, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty {
- ret @respan(sp, ast.ty_fn(inputs, output));
+ ret @respan(sp, ast.ty_fn(proto, inputs, output));
}
fn identity_fold_ty_path[ENV](&ENV env, &span sp, ast.path p,
@@ -922,8 +1071,9 @@ fn identity_fold_expr_tup[ENV](&ENV env, &span sp,
}
fn identity_fold_expr_rec[ENV](&ENV env, &span sp,
- vec[ast.field] fields, ann a) -> @expr {
- ret @respan(sp, ast.expr_rec(fields, a));
+ vec[ast.field] fields,
+ option.t[@expr] base, ann a) -> @expr {
+ ret @respan(sp, ast.expr_rec(fields, base, a));
}
fn identity_fold_expr_call[ENV](&ENV env, &span sp, @expr f,
@@ -971,6 +1121,12 @@ fn identity_fold_expr_for[ENV](&ENV env, &span sp,
ret @respan(sp, ast.expr_for(d, seq, body, a));
}
+fn identity_fold_expr_for_each[ENV](&ENV env, &span sp,
+ @decl d, @expr seq,
+ &block body, ann a) -> @expr {
+ ret @respan(sp, ast.expr_for_each(d, seq, body, a));
+}
+
fn identity_fold_expr_while[ENV](&ENV env, &span sp,
@expr cond, &block body, ann a) -> @expr {
ret @respan(sp, ast.expr_while(cond, body, a));
@@ -1019,6 +1175,40 @@ fn identity_fold_expr_path[ENV](&ENV env, &span sp,
ret @respan(sp, ast.expr_path(p, d, a));
}
+fn identity_fold_expr_ext[ENV](&ENV env, &span sp,
+ &path p, vec[@expr] args,
+ option.t[@expr] body,
+ @expr expanded,
+ ann a) -> @expr {
+ ret @respan(sp, ast.expr_ext(p, args, body, expanded, a));
+}
+
+fn identity_fold_expr_fail[ENV](&ENV env, &span sp) -> @expr {
+ ret @respan(sp, ast.expr_fail);
+}
+
+fn identity_fold_expr_ret[ENV](&ENV env, &span sp,
+ &option.t[@expr] rv) -> @expr {
+ ret @respan(sp, ast.expr_ret(rv));
+}
+
+fn identity_fold_expr_put[ENV](&ENV env, &span sp,
+ &option.t[@expr] rv) -> @expr {
+ ret @respan(sp, ast.expr_put(rv));
+}
+
+fn identity_fold_expr_be[ENV](&ENV env, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_be(x));
+}
+
+fn identity_fold_expr_log[ENV](&ENV e, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_log(x));
+}
+
+fn identity_fold_expr_check_expr[ENV](&ENV e, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_check_expr(x));
+}
+
// Decl identities.
@@ -1038,6 +1228,10 @@ fn identity_fold_pat_wild[ENV](&ENV e, &span sp, ann a) -> @pat {
ret @respan(sp, ast.pat_wild(a));
}
+fn identity_fold_pat_lit[ENV](&ENV e, &span sp, @ast.lit lit, ann a) -> @pat {
+ ret @respan(sp, ast.pat_lit(lit, a));
+}
+
fn identity_fold_pat_bind[ENV](&ENV e, &span sp, ident i, def_id did, ann a)
-> @pat {
ret @respan(sp, ast.pat_bind(i, did, a));
@@ -1055,19 +1249,6 @@ fn identity_fold_stmt_decl[ENV](&ENV env, &span sp, @decl d) -> @stmt {
ret @respan(sp, ast.stmt_decl(d));
}
-fn identity_fold_stmt_ret[ENV](&ENV env, &span sp,
- &option.t[@expr] rv) -> @stmt {
- ret @respan(sp, ast.stmt_ret(rv));
-}
-
-fn identity_fold_stmt_log[ENV](&ENV e, &span sp, @expr x) -> @stmt {
- ret @respan(sp, ast.stmt_log(x));
-}
-
-fn identity_fold_stmt_check_expr[ENV](&ENV e, &span sp, @expr x) -> @stmt {
- ret @respan(sp, ast.stmt_check_expr(x));
-}
-
fn identity_fold_stmt_expr[ENV](&ENV e, &span sp, @expr x) -> @stmt {
ret @respan(sp, ast.stmt_expr(x));
}
@@ -1087,17 +1268,34 @@ fn identity_fold_item_fn[ENV](&ENV e, &span sp, ident i,
ret @respan(sp, ast.item_fn(i, f, ty_params, id, a));
}
+fn identity_fold_native_item_fn[ENV](&ENV e, &span sp, ident i,
+ &ast.fn_decl decl,
+ vec[ast.ty_param] ty_params,
+ def_id id, ann a) -> @native_item {
+ ret @respan(sp, ast.native_item_fn(i, decl, ty_params, id, a));
+}
+
fn identity_fold_item_mod[ENV](&ENV e, &span sp, ident i,
&ast._mod m, def_id id) -> @item {
ret @respan(sp, ast.item_mod(i, m, id));
}
+fn identity_fold_item_native_mod[ENV](&ENV e, &span sp, ident i,
+ &ast.native_mod m, def_id id) -> @item {
+ ret @respan(sp, ast.item_native_mod(i, m, id));
+}
+
fn identity_fold_item_ty[ENV](&ENV e, &span sp, ident i,
@ty t, vec[ast.ty_param] ty_params,
def_id id, ann a) -> @item {
ret @respan(sp, ast.item_ty(i, t, ty_params, id, a));
}
+fn identity_fold_native_item_ty[ENV](&ENV e, &span sp, ident i,
+ def_id id) -> @native_item {
+ ret @respan(sp, ast.native_item_ty(i, id));
+}
+
fn identity_fold_item_tag[ENV](&ENV e, &span sp, ident i,
vec[ast.variant] variants,
vec[ast.ty_param] ty_params,
@@ -1132,28 +1330,38 @@ fn identity_fold_block[ENV](&ENV e, &span sp, &ast.block_ blk) -> block {
ret respan(sp, blk);
}
+fn identity_fold_fn_decl[ENV](&ENV e,
+ ast.effect effect,
+ vec[arg] inputs,
+ @ty output) -> ast.fn_decl {
+ ret rec(effect=effect, inputs=inputs, output=output);
+}
+
fn identity_fold_fn[ENV](&ENV e,
- ast.effect effect,
- bool is_iter,
- vec[arg] inputs,
- @ast.ty output,
+ &fn_decl decl,
+ ast.proto proto,
&block body) -> ast._fn {
- ret rec(effect=effect, is_iter=is_iter, inputs=inputs,
- output=output, body=body);
+ ret rec(decl=decl, proto=proto, body=body);
}
fn identity_fold_mod[ENV](&ENV e, &ast._mod m) -> ast._mod {
ret m;
}
+fn identity_fold_native_mod[ENV](&ENV e,
+ &ast.native_mod m) -> ast.native_mod {
+ ret m;
+}
+
fn identity_fold_crate[ENV](&ENV e, &span sp, &ast._mod m) -> @ast.crate {
ret @respan(sp, rec(module=m));
}
fn identity_fold_obj[ENV](&ENV e,
vec[ast.obj_field] fields,
- vec[@ast.method] methods) -> ast._obj {
- ret rec(fields=fields, methods=methods);
+ vec[@ast.method] methods,
+ option.t[block] dtor) -> ast._obj {
+ ret rec(fields=fields, methods=methods, dtor=dtor);
}
@@ -1167,6 +1375,10 @@ fn identity_update_env_for_item[ENV](&ENV e, @item i) -> ENV {
ret e;
}
+fn identity_update_env_for_native_item[ENV](&ENV e, @native_item i) -> ENV {
+ ret e;
+}
+
fn identity_update_env_for_view_item[ENV](&ENV e, @view_item i) -> ENV {
ret e;
}
@@ -1224,13 +1436,13 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_ty_tup = bind identity_fold_ty_tup[ENV](_,_,_),
fold_ty_rec = bind identity_fold_ty_rec[ENV](_,_,_),
fold_ty_obj = bind identity_fold_ty_obj[ENV](_,_,_),
- fold_ty_fn = bind identity_fold_ty_fn[ENV](_,_,_,_),
+ fold_ty_fn = bind identity_fold_ty_fn[ENV](_,_,_,_,_),
fold_ty_path = bind identity_fold_ty_path[ENV](_,_,_,_),
fold_ty_mutable = bind identity_fold_ty_mutable[ENV](_,_,_),
fold_expr_vec = bind identity_fold_expr_vec[ENV](_,_,_,_),
fold_expr_tup = bind identity_fold_expr_tup[ENV](_,_,_,_),
- fold_expr_rec = bind identity_fold_expr_rec[ENV](_,_,_,_),
+ fold_expr_rec = bind identity_fold_expr_rec[ENV](_,_,_,_,_),
fold_expr_call = bind identity_fold_expr_call[ENV](_,_,_,_,_),
fold_expr_bind = bind identity_fold_expr_bind[ENV](_,_,_,_,_),
fold_expr_binary = bind identity_fold_expr_binary[ENV](_,_,_,_,_,_),
@@ -1239,6 +1451,8 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_expr_cast = bind identity_fold_expr_cast[ENV](_,_,_,_,_),
fold_expr_if = bind identity_fold_expr_if[ENV](_,_,_,_,_,_),
fold_expr_for = bind identity_fold_expr_for[ENV](_,_,_,_,_,_),
+ fold_expr_for_each
+ = bind identity_fold_expr_for_each[ENV](_,_,_,_,_,_),
fold_expr_while = bind identity_fold_expr_while[ENV](_,_,_,_,_),
fold_expr_do_while
= bind identity_fold_expr_do_while[ENV](_,_,_,_,_),
@@ -1250,25 +1464,36 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_expr_field = bind identity_fold_expr_field[ENV](_,_,_,_,_),
fold_expr_index = bind identity_fold_expr_index[ENV](_,_,_,_,_),
fold_expr_path = bind identity_fold_expr_path[ENV](_,_,_,_,_),
+ fold_expr_ext = bind identity_fold_expr_ext[ENV](_,_,_,_,_,_,_),
+ fold_expr_fail = bind identity_fold_expr_fail[ENV](_,_),
+ fold_expr_ret = bind identity_fold_expr_ret[ENV](_,_,_),
+ fold_expr_put = bind identity_fold_expr_put[ENV](_,_,_),
+ fold_expr_be = bind identity_fold_expr_be[ENV](_,_,_),
+ fold_expr_log = bind identity_fold_expr_log[ENV](_,_,_),
+ fold_expr_check_expr
+ = bind identity_fold_expr_check_expr[ENV](_,_,_),
fold_decl_local = bind identity_fold_decl_local[ENV](_,_,_),
fold_decl_item = bind identity_fold_decl_item[ENV](_,_,_),
fold_pat_wild = bind identity_fold_pat_wild[ENV](_,_,_),
+ fold_pat_lit = bind identity_fold_pat_lit[ENV](_,_,_,_),
fold_pat_bind = bind identity_fold_pat_bind[ENV](_,_,_,_,_),
fold_pat_tag = bind identity_fold_pat_tag[ENV](_,_,_,_,_,_),
fold_stmt_decl = bind identity_fold_stmt_decl[ENV](_,_,_),
- fold_stmt_ret = bind identity_fold_stmt_ret[ENV](_,_,_),
- fold_stmt_log = bind identity_fold_stmt_log[ENV](_,_,_),
- fold_stmt_check_expr
- = bind identity_fold_stmt_check_expr[ENV](_,_,_),
fold_stmt_expr = bind identity_fold_stmt_expr[ENV](_,_,_),
fold_item_const= bind identity_fold_item_const[ENV](_,_,_,_,_,_,_),
fold_item_fn = bind identity_fold_item_fn[ENV](_,_,_,_,_,_,_),
+ fold_native_item_fn =
+ bind identity_fold_native_item_fn[ENV](_,_,_,_,_,_,_),
fold_item_mod = bind identity_fold_item_mod[ENV](_,_,_,_,_),
+ fold_item_native_mod =
+ bind identity_fold_item_native_mod[ENV](_,_,_,_,_),
fold_item_ty = bind identity_fold_item_ty[ENV](_,_,_,_,_,_,_),
+ fold_native_item_ty =
+ bind identity_fold_native_item_ty[ENV](_,_,_,_),
fold_item_tag = bind identity_fold_item_tag[ENV](_,_,_,_,_,_),
fold_item_obj = bind identity_fold_item_obj[ENV](_,_,_,_,_,_,_),
@@ -1278,13 +1503,17 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
bind identity_fold_view_item_import[ENV](_,_,_,_,_,_),
fold_block = bind identity_fold_block[ENV](_,_,_),
- fold_fn = bind identity_fold_fn[ENV](_,_,_,_,_,_),
+ fold_fn = bind identity_fold_fn[ENV](_,_,_,_),
+ fold_fn_decl = bind identity_fold_fn_decl[ENV](_,_,_,_),
fold_mod = bind identity_fold_mod[ENV](_,_),
+ fold_native_mod = bind identity_fold_native_mod[ENV](_,_),
fold_crate = bind identity_fold_crate[ENV](_,_,_),
- fold_obj = bind identity_fold_obj[ENV](_,_,_),
+ fold_obj = bind identity_fold_obj[ENV](_,_,_,_),
update_env_for_crate = bind identity_update_env_for_crate[ENV](_,_),
update_env_for_item = bind identity_update_env_for_item[ENV](_,_),
+ update_env_for_native_item =
+ bind identity_update_env_for_native_item[ENV](_,_),
update_env_for_view_item =
bind identity_update_env_for_view_item[ENV](_,_),
update_env_for_block = bind identity_update_env_for_block[ENV](_,_),
diff --git a/src/comp/middle/resolve.rs b/src/comp/middle/resolve.rs
index 1af3b205..5b6db631 100644
--- a/src/comp/middle/resolve.rs
+++ b/src/comp/middle/resolve.rs
@@ -18,6 +18,7 @@ import std._vec;
tag scope {
scope_crate(@ast.crate);
scope_item(@ast.item);
+ scope_native_item(@ast.native_item);
scope_loop(@ast.decl); // there's only 1 decl per loop.
scope_block(ast.block);
scope_arm(ast.arm);
@@ -34,6 +35,7 @@ tag def_wrap {
def_wrap_use(@ast.view_item);
def_wrap_import(@ast.view_item);
def_wrap_mod(@ast.item);
+ def_wrap_native_mod(@ast.item);
def_wrap_other(def);
def_wrap_expr_field(uint, def);
def_wrap_resolving;
@@ -103,6 +105,29 @@ fn find_final_def(&env e, import_map index,
// should return what a.b.c.d points to in the end.
fn found_something(&env e, import_map index,
&span sp, vec[ident] idents, def_wrap d) -> def_wrap {
+
+ fn found_mod(&env e, &import_map index, &span sp,
+ vec[ident] idents, @ast.item i) -> def_wrap {
+ auto len = _vec.len[ident](idents);
+ auto rest_idents = _vec.slice[ident](idents, 1u, len);
+ auto empty_e = rec(scopes = nil[scope],
+ sess = e.sess);
+ auto tmp_e = update_env_for_item(empty_e, i);
+ auto next_i = rest_idents.(0);
+ auto next_ = lookup_name_wrapped(tmp_e, next_i);
+ alt (next_) {
+ case (none[tup(@env, def_wrap)]) {
+ e.sess.span_err(sp, "unresolved name: " + next_i);
+ fail;
+ }
+ case (some[tup(@env, def_wrap)](?next)) {
+ auto combined_e = update_env_for_item(e, i);
+ ret found_something(combined_e, index, sp,
+ rest_idents, next._1);
+ }
+ }
+ }
+
alt (d) {
case (def_wrap_import(?imp)) {
alt (imp.node) {
@@ -122,23 +147,10 @@ fn find_final_def(&env e, import_map index,
}
alt (d) {
case (def_wrap_mod(?i)) {
- auto rest_idents = _vec.slice[ident](idents, 1u, len);
- auto empty_e = rec(scopes = nil[scope],
- sess = e.sess);
- auto tmp_e = update_env_for_item(empty_e, i);
- auto next_i = rest_idents.(0);
- auto next_ = lookup_name_wrapped(tmp_e, next_i);
- alt (next_) {
- case (none[tup(@env, def_wrap)]) {
- e.sess.span_err(sp, "unresolved name: " + next_i);
- fail;
- }
- case (some[tup(@env, def_wrap)](?next)) {
- auto combined_e = update_env_for_item(e, i);
- ret found_something(combined_e, index, sp,
- rest_idents, next._1);
- }
- }
+ ret found_mod(e, index, sp, idents, i);
+ }
+ case (def_wrap_native_mod(?i)) {
+ ret found_mod(e, index, sp, idents, i);
}
case (def_wrap_use(?c)) {
e.sess.span_err(sp, "Crate access is not implemented");
@@ -201,6 +213,9 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (ast.item_mod(_, _, ?id)) {
ret def_wrap_mod(i);
}
+ case (ast.item_native_mod(_, _, ?id)) {
+ ret def_wrap_native_mod(i);
+ }
case (ast.item_ty(_, _, _, ?id, _)) {
ret def_wrap_other(ast.def_ty(id));
}
@@ -213,6 +228,17 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
+ fn found_def_native_item(@ast.native_item i) -> def_wrap {
+ alt (i.node) {
+ case (ast.native_item_ty(_, ?id)) {
+ ret def_wrap_other(ast.def_native_ty(id));
+ }
+ case (ast.native_item_fn(_, _, _, ?id, _)) {
+ ret def_wrap_other(ast.def_native_fn(id));
+ }
+ }
+ }
+
fn found_decl_stmt(@ast.stmt s) -> def_wrap {
alt (s.node) {
case (ast.stmt_decl(?d)) {
@@ -267,11 +293,47 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
}
- case (none[ast.mod_index_entry]) { /* fall through */ }
+ case (none[ast.mod_index_entry]) {
+ ret none[def_wrap];
+ }
+ }
+ }
+
+ fn check_native_mod(ast.ident i, ast.native_mod m) -> option.t[def_wrap] {
+
+ alt (m.index.find(i)) {
+ case (some[ast.native_mod_index_entry](?ent)) {
+ alt (ent) {
+ case (ast.nmie_view_item(?view_item)) {
+ ret some(found_def_view(view_item));
+ }
+ case (ast.nmie_item(?item)) {
+ ret some(found_def_native_item(item));
+ }
+ }
+ }
+ case (none[ast.native_mod_index_entry]) {
+ ret none[def_wrap];
+ }
}
- ret none[def_wrap];
}
+ fn handle_fn_decl(ast.ident i, &ast.fn_decl decl,
+ &vec[ast.ty_param] ty_params) -> option.t[def_wrap] {
+ for (ast.arg a in decl.inputs) {
+ if (_str.eq(a.ident, i)) {
+ auto t = ast.def_arg(a.id);
+ ret some(def_wrap_other(t));
+ }
+ }
+ for (ast.ty_param tp in ty_params) {
+ if (_str.eq(tp.ident, i)) {
+ auto t = ast.def_ty_arg(tp.id);
+ ret some(def_wrap_other(t));
+ }
+ }
+ ret none[def_wrap];
+ }
fn in_scope(ast.ident i, &scope s) -> option.t[def_wrap] {
alt (s) {
@@ -283,9 +345,12 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (scope_item(?it)) {
alt (it.node) {
case (ast.item_fn(_, ?f, ?ty_params, _, _)) {
- for (ast.arg a in f.inputs) {
- if (_str.eq(a.ident, i)) {
- auto t = ast.def_arg(a.id);
+ ret handle_fn_decl(i, f.decl, ty_params);
+ }
+ case (ast.item_obj(_, ?ob, ?ty_params, _, _)) {
+ for (ast.obj_field f in ob.fields) {
+ if (_str.eq(f.ident, i)) {
+ auto t = ast.def_obj_field(f.id);
ret some(def_wrap_other(t));
}
}
@@ -296,13 +361,7 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
}
- case (ast.item_obj(_, ?ob, ?ty_params, _, _)) {
- for (ast.obj_field f in ob.fields) {
- if (_str.eq(f.ident, i)) {
- auto t = ast.def_obj_field(f.id);
- ret some(def_wrap_other(t));
- }
- }
+ case (ast.item_tag(_, _, ?ty_params, _)) {
for (ast.ty_param tp in ty_params) {
if (_str.eq(tp.ident, i)) {
auto t = ast.def_ty_arg(tp.id);
@@ -313,6 +372,9 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (ast.item_mod(_, ?m, _)) {
ret check_mod(i, m);
}
+ case (ast.item_native_mod(_, ?m, _)) {
+ ret check_native_mod(i, m);
+ }
case (ast.item_ty(_, _, ?ty_params, _, _)) {
for (ast.ty_param tp in ty_params) {
if (_str.eq(tp.ident, i)) {
@@ -325,6 +387,14 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
+ case (scope_native_item(?it)) {
+ alt (it.node) {
+ case (ast.native_item_fn(_, ?decl, ?ty_params, _, _)) {
+ ret handle_fn_decl(i, decl, ty_params);
+ }
+ }
+ }
+
case (scope_loop(?d)) {
alt (d.node) {
case (ast.decl_local(?local)) {
@@ -432,8 +502,7 @@ fn fold_expr_path(&env e, &span sp, &ast.path p, &option.t[def] d,
path_len = n_idents - remaining + 1u;
}
case (def_wrap_other(_)) {
- check (n_idents == 1u);
- path_len = 1u;
+ path_len = n_idents;
}
case (def_wrap_mod(?m)) {
e.sess.span_err(sp,
@@ -491,6 +560,10 @@ fn update_env_for_item(&env e, @ast.item i) -> env {
ret rec(scopes = cons[scope](scope_item(i), @e.scopes) with e);
}
+fn update_env_for_native_item(&env e, @ast.native_item i) -> env {
+ ret rec(scopes = cons[scope](scope_native_item(i), @e.scopes) with e);
+}
+
fn update_env_for_block(&env e, &ast.block b) -> env {
ret rec(scopes = cons[scope](scope_block(b), @e.scopes) with e);
}
@@ -500,6 +573,9 @@ fn update_env_for_expr(&env e, @ast.expr x) -> env {
case (ast.expr_for(?d, _, _, _)) {
ret rec(scopes = cons[scope](scope_loop(d), @e.scopes) with e);
}
+ case (ast.expr_for_each(?d, _, _, _)) {
+ ret rec(scopes = cons[scope](scope_loop(d), @e.scopes) with e);
+ }
case (_) { }
}
ret e;
@@ -517,6 +593,8 @@ fn resolve_imports(session.session sess, @ast.crate crate) -> @ast.crate {
= bind fold_view_item_import(_,_,import_index,_,_,_,_),
update_env_for_crate = bind update_env_for_crate(_,_),
update_env_for_item = bind update_env_for_item(_,_),
+ update_env_for_native_item =
+ bind update_env_for_native_item(_,_),
update_env_for_block = bind update_env_for_block(_,_),
update_env_for_arm = bind update_env_for_arm(_,_),
update_env_for_expr = bind update_env_for_expr(_,_)
@@ -539,6 +617,8 @@ fn resolve_crate(session.session sess, @ast.crate crate) -> @ast.crate {
fold_ty_path = bind fold_ty_path(_,_,_,_),
update_env_for_crate = bind update_env_for_crate(_,_),
update_env_for_item = bind update_env_for_item(_,_),
+ update_env_for_native_item =
+ bind update_env_for_native_item(_,_),
update_env_for_block = bind update_env_for_block(_,_),
update_env_for_arm = bind update_env_for_arm(_,_),
update_env_for_expr = bind update_env_for_expr(_,_)
diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs
index c2b0ae48..728f20dd 100644
--- a/src/comp/middle/trans.rs
+++ b/src/comp/middle/trans.rs
@@ -1,3 +1,4 @@
+import std._int;
import std._str;
import std._uint;
import std._vec;
@@ -16,6 +17,7 @@ import back.x86;
import back.abi;
import middle.ty.pat_ty;
+import middle.ty.plain_ty;
import util.common;
import util.common.append;
@@ -27,9 +29,11 @@ import lib.llvm.llvm;
import lib.llvm.builder;
import lib.llvm.target_data;
import lib.llvm.type_handle;
+import lib.llvm.type_names;
import lib.llvm.mk_pass_manager;
import lib.llvm.mk_target_data;
import lib.llvm.mk_type_handle;
+import lib.llvm.mk_type_names;
import lib.llvm.llvm.ModuleRef;
import lib.llvm.llvm.ValueRef;
import lib.llvm.llvm.TypeRef;
@@ -53,27 +57,33 @@ type glue_fns = rec(ValueRef activate_glue,
vec[ValueRef] upcall_glues,
ValueRef no_op_type_glue,
ValueRef memcpy_glue,
- ValueRef bzero_glue);
+ ValueRef bzero_glue,
+ ValueRef vec_append_glue);
-tag arity { nullary; n_ary; }
-type tag_info = rec(type_handle th,
- mutable vec[tup(ast.def_id,arity)] variants,
- mutable uint size);
+type tydesc_info = rec(ValueRef tydesc,
+ ValueRef take_glue,
+ ValueRef drop_glue);
state type crate_ctxt = rec(session.session sess,
ModuleRef llmod,
target_data td,
+ type_names tn,
ValueRef crate_ptr,
hashmap[str, ValueRef] upcalls,
hashmap[str, ValueRef] intrinsics,
hashmap[str, ValueRef] item_names,
hashmap[ast.def_id, ValueRef] item_ids,
hashmap[ast.def_id, @ast.item] items,
- hashmap[ast.def_id, @tag_info] tags,
+ hashmap[ast.def_id,
+ @ast.native_item] native_items,
+ // TODO: hashmap[tup(tag_id,subtys), @tag_info]
+ hashmap[@ty.t, uint] tag_sizes,
+ hashmap[ast.def_id, ValueRef] discrims,
hashmap[ast.def_id, ValueRef] fn_pairs,
hashmap[ast.def_id, ValueRef] consts,
hashmap[ast.def_id,()] obj_methods,
- hashmap[@ty.t, ValueRef] tydescs,
+ hashmap[@ty.t, @tydesc_info] tydescs,
+ vec[ast.ty_param] obj_typarams,
vec[ast.obj_field] obj_fields,
@glue_fns glues,
namegen names,
@@ -81,9 +91,10 @@ state type crate_ctxt = rec(session.session sess,
state type fn_ctxt = rec(ValueRef llfn,
ValueRef lltaskptr,
- ValueRef llclosure,
+ ValueRef llenv,
+ ValueRef llretptr,
mutable option.t[ValueRef] llself,
- mutable option.t[ValueRef] llretptr,
+ mutable option.t[ValueRef] lliterbody,
hashmap[ast.def_id, ValueRef] llargs,
hashmap[ast.def_id, ValueRef] llobjfields,
hashmap[ast.def_id, ValueRef] lllocals,
@@ -119,21 +130,25 @@ tag block_parent {
state type result = rec(mutable @block_ctxt bcx,
mutable ValueRef val);
+fn sep() -> str {
+ ret "_";
+}
+
fn res(@block_ctxt bcx, ValueRef val) -> result {
ret rec(mutable bcx = bcx,
mutable val = val);
}
-fn ty_str(TypeRef t) -> str {
- ret lib.llvm.type_to_str(t);
+fn ty_str(type_names tn, TypeRef t) -> str {
+ ret lib.llvm.type_to_str(tn, t);
}
fn val_ty(ValueRef v) -> TypeRef {
ret llvm.LLVMTypeOf(v);
}
-fn val_str(ValueRef v) -> str {
- ret ty_str(val_ty(v));
+fn val_str(type_names tn, ValueRef v) -> str {
+ ret ty_str(tn, val_ty(v));
}
@@ -206,9 +221,9 @@ fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
False);
}
-fn T_fn_pair(TypeRef tfn) -> TypeRef {
+fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
ret T_struct(vec(T_ptr(tfn),
- T_opaque_closure_ptr()));
+ T_opaque_closure_ptr(tn)));
}
fn T_ptr(TypeRef t) -> TypeRef {
@@ -225,25 +240,56 @@ fn T_opaque() -> TypeRef {
ret llvm.LLVMOpaqueType();
}
-fn T_task() -> TypeRef {
- ret T_struct(vec(T_int(), // Refcount
- T_int(), // Delegate pointer
- T_int(), // Stack segment pointer
- T_int(), // Runtime SP
- T_int(), // Rust SP
- T_int(), // GC chain
- T_int(), // Domain pointer
- T_int() // Crate cache pointer
- ));
+fn T_task(type_names tn) -> TypeRef {
+ auto s = "task";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_struct(vec(T_int(), // Refcount
+ T_int(), // Delegate pointer
+ T_int(), // Stack segment pointer
+ T_int(), // Runtime SP
+ T_int(), // Rust SP
+ T_int(), // GC chain
+ T_int(), // Domain pointer
+ T_int() // Crate cache pointer
+ ));
+ tn.associate(s, t);
+ ret t;
}
-fn T_tydesc() -> TypeRef {
+fn T_glue_fn(type_names tn) -> TypeRef {
+ auto s = "glue_fn";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ // Bit of a kludge: pick the fn typeref out of the tydesc..
+ let vec[TypeRef] tydesc_elts = _vec.init_elt[TypeRef](T_nil(), 10u);
+ llvm.LLVMGetStructElementTypes(T_tydesc(tn),
+ _vec.buf[TypeRef](tydesc_elts));
+ auto t =
+ llvm.LLVMGetElementType
+ (tydesc_elts.(abi.tydesc_field_drop_glue_off));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_tydesc(type_names tn) -> TypeRef {
+
+ auto s = "tydesc";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
auto th = mk_type_handle();
auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
auto tydescpp = T_ptr(T_ptr(abs_tydesc));
auto pvoid = T_ptr(T_i8());
- auto glue_fn_ty = T_ptr(T_fn(vec(T_taskptr(),
+ auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
+ T_taskptr(tn),
+ T_ptr(T_nil()),
tydescpp,
pvoid), T_void()));
auto tydesc = T_struct(vec(tydescpp, // first_param
@@ -258,7 +304,9 @@ fn T_tydesc() -> TypeRef {
glue_fn_ty)); // is_stateful
llvm.LLVMRefineType(abs_tydesc, tydesc);
- ret llvm.LLVMResolveTypeHandle(th.llth);
+ auto t = llvm.LLVMResolveTypeHandle(th.llth);
+ tn.associate(s, t);
+ ret t;
}
fn T_array(TypeRef t, uint n) -> TypeRef {
@@ -273,6 +321,10 @@ fn T_vec(TypeRef t) -> TypeRef {
));
}
+fn T_opaque_vec_ptr() -> TypeRef {
+ ret T_ptr(T_vec(T_int()));
+}
+
fn T_str() -> TypeRef {
ret T_vec(T_i8());
}
@@ -281,165 +333,307 @@ fn T_box(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int(), t));
}
-fn T_crate() -> TypeRef {
- ret T_struct(vec(T_int(), // ptrdiff_t image_base_off
- T_int(), // uintptr_t self_addr
- T_int(), // ptrdiff_t debug_abbrev_off
- T_int(), // size_t debug_abbrev_sz
- T_int(), // ptrdiff_t debug_info_off
- T_int(), // size_t debug_info_sz
- T_int(), // size_t activate_glue_off
- T_int(), // size_t yield_glue_off
- T_int(), // size_t unwind_glue_off
- T_int(), // size_t gc_glue_off
- T_int(), // size_t main_exit_task_glue_off
- T_int(), // int n_rust_syms
- T_int(), // int n_c_syms
- T_int() // int n_libs
- ));
+fn T_crate(type_names tn) -> TypeRef {
+ auto s = "crate";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_struct(vec(T_int(), // ptrdiff_t image_base_off
+ T_int(), // uintptr_t self_addr
+ T_int(), // ptrdiff_t debug_abbrev_off
+ T_int(), // size_t debug_abbrev_sz
+ T_int(), // ptrdiff_t debug_info_off
+ T_int(), // size_t debug_info_sz
+ T_int(), // size_t activate_glue_off
+ T_int(), // size_t yield_glue_off
+ T_int(), // size_t unwind_glue_off
+ T_int(), // size_t gc_glue_off
+ T_int(), // size_t main_exit_task_glue_off
+ T_int(), // int n_rust_syms
+ T_int(), // int n_c_syms
+ T_int(), // int n_libs
+ T_int() // uintptr_t abi_tag
+ ));
+ tn.associate(s, t);
+ ret t;
}
fn T_double() -> TypeRef {
ret llvm.LLVMDoubleType();
}
-fn T_taskptr() -> TypeRef {
- ret T_ptr(T_task());
+fn T_taskptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_task(tn));
}
-fn T_typaram_ptr() -> TypeRef {
- ret T_ptr(T_i8());
+// This type must never be used directly; it must always be cast away.
+fn T_typaram(type_names tn) -> TypeRef {
+ auto s = "typaram";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_i8();
+ tn.associate(s, t);
+ ret t;
}
-fn T_closure_ptr(TypeRef lltarget_ty,
- TypeRef llbindings_ty) -> TypeRef {
- ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
+fn T_typaram_ptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_typaram(tn));
+}
+
+fn T_closure_ptr(type_names tn,
+ TypeRef lltarget_ty,
+ TypeRef llbindings_ty,
+ uint n_ty_params) -> TypeRef {
+ ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
lltarget_ty,
- llbindings_ty)
- // FIXME: add captured typarams.
+ llbindings_ty,
+ T_captured_tydescs(tn, n_ty_params))
)));
}
-fn T_opaque_closure_ptr() -> TypeRef {
- ret T_closure_ptr(T_struct(vec(T_ptr(T_nil()),
- T_ptr(T_nil()))),
- T_nil());
+fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
+ auto s = "*closure";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
+ T_ptr(T_nil()))),
+ T_nil(),
+ 0u);
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_tag(type_names tn, uint size) -> TypeRef {
+ auto s = "tag_" + _uint.to_str(size, 10u);
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_struct(vec(T_int(), T_array(T_i8(), size)));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_opaque_tag(type_names tn) -> TypeRef {
+ auto s = "tag";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_struct(vec(T_int(), T_i8()));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_opaque_tag(tn));
+}
+
+fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
+ ret T_struct(_vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
+}
+
+fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
+ // This function is not publicly exposed because it returns an incomplete
+ // type. The dynamically-sized fields follow the captured tydescs.
+ fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
+ ret T_struct(vec(T_ptr(T_tydesc(tn)),
+ T_captured_tydescs(tn, n_captured_tydescs)));
+ }
+
+ ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
+}
+
+fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
+ ret T_obj_ptr(tn, 0u);
}
+// This function now fails if called on a type with dynamic size (as its
+// return value was always meaningless in that case anyhow). Beware!
+//
+// TODO: Enforce via a predicate.
fn type_of(@crate_ctxt cx, @ty.t t) -> TypeRef {
- let TypeRef llty = type_of_inner(cx, t);
- check (llty as int != 0);
- llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
- ret llty;
+ if (ty.type_has_dynamic_size(t)) {
+ log "type_of() called on a type with dynamic size: " +
+ ty.ty_to_str(t);
+ fail;
+ }
+
+ ret type_of_inner(cx, t, false);
+}
+
+fn type_of_explicit_args(@crate_ctxt cx,
+ vec[ty.arg] inputs) -> vec[TypeRef] {
+ let vec[TypeRef] atys = vec();
+ for (ty.arg arg in inputs) {
+ if (ty.type_has_dynamic_size(arg.ty)) {
+ check (arg.mode == ast.alias);
+ atys += T_typaram_ptr(cx.tn);
+ } else {
+ let TypeRef t;
+ alt (arg.mode) {
+ case (ast.alias) {
+ t = T_ptr(type_of_inner(cx, arg.ty, true));
+ }
+ case (_) {
+ t = type_of_inner(cx, arg.ty, false);
+ }
+ }
+ atys += t;
+ }
+ }
+ ret atys;
}
-// NB: this must match trans_args and create_llargs_for_fn_args.
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn type_of_fn_full(@crate_ctxt cx,
+ ast.proto proto,
option.t[TypeRef] obj_self,
vec[ty.arg] inputs,
@ty.t output) -> TypeRef {
- let vec[TypeRef] atys = vec(T_taskptr());
-
- auto fn_ty = ty.plain_ty(ty.ty_fn(inputs, output));
- auto ty_param_count = ty.count_ty_params(fn_ty);
- auto i = 0u;
- while (i < ty_param_count) {
- atys += T_ptr(T_tydesc());
- i += 1u;
- }
+ let vec[TypeRef] atys = vec();
+ // Arg 0: Output pointer.
if (ty.type_has_dynamic_size(output)) {
- atys += T_typaram_ptr();
+ atys += T_typaram_ptr(cx.tn);
+ } else {
+ atys += T_ptr(type_of_inner(cx, output, false));
}
+ // Arg 1: Task pointer.
+ atys += T_taskptr(cx.tn);
+
+ // Arg 2: Env (closure-bindings / self-obj)
alt (obj_self) {
case (some[TypeRef](?t)) {
check (t as int != 0);
atys += t;
}
case (_) {
- atys += T_opaque_closure_ptr();
+ atys += T_opaque_closure_ptr(cx.tn);
}
}
- for (ty.arg arg in inputs) {
- if (ty.type_has_dynamic_size(arg.ty)) {
- check (arg.mode == ast.alias);
- atys += T_typaram_ptr();
- } else {
- let TypeRef t = type_of(cx, arg.ty);
- alt (arg.mode) {
- case (ast.alias) {
- t = T_ptr(t);
- }
- case (_) { /* fall through */ }
- }
- atys += t;
+ // Args >3: ty params, if not acquired via capture...
+ if (obj_self == none[TypeRef]) {
+ auto ty_param_count =
+ ty.count_ty_params(plain_ty(ty.ty_fn(proto,
+ inputs,
+ output)));
+ auto i = 0u;
+ while (i < ty_param_count) {
+ atys += T_ptr(T_tydesc(cx.tn));
+ i += 1u;
}
}
- auto ret_ty;
- if (ty.type_is_nil(output) || ty.type_has_dynamic_size(output)) {
- ret_ty = llvm.LLVMVoidType();
- } else {
- ret_ty = type_of(cx, output);
+ if (proto == ast.proto_iter) {
+ // If it's an iter, the 'output' type of the iter is actually the
+ // *input* type of the function we're given as our iter-block
+ // argument.
+ atys += T_fn_pair(cx.tn,
+ type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
+ vec(rec(mode=ast.val, ty=output)),
+ plain_ty(ty.ty_nil)));
}
- ret T_fn(atys, ret_ty);
+ // ... then explicit args.
+ atys += type_of_explicit_args(cx, inputs);
+
+ ret T_fn(atys, llvm.LLVMVoidType());
}
-fn type_of_fn(@crate_ctxt cx, vec[ty.arg] inputs, @ty.t output) -> TypeRef {
- ret type_of_fn_full(cx, none[TypeRef], inputs, output);
+fn type_of_fn(@crate_ctxt cx,
+ ast.proto proto,
+ vec[ty.arg] inputs, @ty.t output) -> TypeRef {
+ ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output);
+}
+
+fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
+ vec[ty.arg] inputs,
+ @ty.t output) -> TypeRef {
+ let vec[TypeRef] atys = vec();
+ if (abi == ast.native_abi_rust) {
+ atys += T_taskptr(cx.tn);
+ auto t = ty.ty_native_fn(abi, inputs, output);
+ auto ty_param_count = ty.count_ty_params(plain_ty(t));
+ auto i = 0u;
+ while (i < ty_param_count) {
+ atys += T_ptr(T_tydesc(cx.tn));
+ i += 1u;
+ }
+ }
+ atys += type_of_explicit_args(cx, inputs);
+ ret T_fn(atys, type_of_inner(cx, output, false));
}
-fn type_of_inner(@crate_ctxt cx, @ty.t t) -> TypeRef {
+fn type_of_inner(@crate_ctxt cx, @ty.t t, bool boxed) -> TypeRef {
+ let TypeRef llty = 0 as TypeRef;
+
alt (t.struct) {
- case (ty.ty_nil) { ret T_nil(); }
- case (ty.ty_bool) { ret T_bool(); }
- case (ty.ty_int) { ret T_int(); }
- case (ty.ty_uint) { ret T_int(); }
+ case (ty.ty_native) { llty = T_ptr(T_i8()); }
+ case (ty.ty_nil) { llty = T_nil(); }
+ case (ty.ty_bool) { llty = T_bool(); }
+ case (ty.ty_int) { llty = T_int(); }
+ case (ty.ty_uint) { llty = T_int(); }
case (ty.ty_machine(?tm)) {
alt (tm) {
- case (common.ty_i8) { ret T_i8(); }
- case (common.ty_u8) { ret T_i8(); }
- case (common.ty_i16) { ret T_i16(); }
- case (common.ty_u16) { ret T_i16(); }
- case (common.ty_i32) { ret T_i32(); }
- case (common.ty_u32) { ret T_i32(); }
- case (common.ty_i64) { ret T_i64(); }
- case (common.ty_u64) { ret T_i64(); }
- case (common.ty_f32) { ret T_f32(); }
- case (common.ty_f64) { ret T_f64(); }
+ case (common.ty_i8) { llty = T_i8(); }
+ case (common.ty_u8) { llty = T_i8(); }
+ case (common.ty_i16) { llty = T_i16(); }
+ case (common.ty_u16) { llty = T_i16(); }
+ case (common.ty_i32) { llty = T_i32(); }
+ case (common.ty_u32) { llty = T_i32(); }
+ case (common.ty_i64) { llty = T_i64(); }
+ case (common.ty_u64) { llty = T_i64(); }
+ case (common.ty_f32) { llty = T_f32(); }
+ case (common.ty_f64) { llty = T_f64(); }
}
}
- case (ty.ty_char) { ret T_char(); }
- case (ty.ty_str) { ret T_ptr(T_str()); }
- case (ty.ty_tag(?tag_id)) {
- ret llvm.LLVMResolveTypeHandle(cx.tags.get(tag_id).th.llth);
+ case (ty.ty_char) { llty = T_char(); }
+ case (ty.ty_str) { llty = T_ptr(T_str()); }
+ case (ty.ty_tag(_, _)) {
+ if (boxed) {
+ llty = T_opaque_tag(cx.tn);
+ } else {
+ auto size = static_size_of_tag(cx, t);
+ llty = T_tag(cx.tn, size);
+ }
}
case (ty.ty_box(?t)) {
- ret T_ptr(T_box(type_of(cx, t)));
+ llty = T_ptr(T_box(type_of_inner(cx, t, true)));
}
case (ty.ty_vec(?t)) {
- ret T_ptr(T_vec(type_of(cx, t)));
+ llty = T_ptr(T_vec(type_of_inner(cx, t, true)));
}
case (ty.ty_tup(?elts)) {
let vec[TypeRef] tys = vec();
for (@ty.t elt in elts) {
- tys += type_of(cx, elt);
+ tys += type_of_inner(cx, elt, boxed);
}
- ret T_struct(tys);
+ llty = T_struct(tys);
}
case (ty.ty_rec(?fields)) {
let vec[TypeRef] tys = vec();
for (ty.field f in fields) {
- tys += type_of(cx, f.ty);
+ tys += type_of_inner(cx, f.ty, boxed);
}
- ret T_struct(tys);
+ llty = T_struct(tys);
}
- case (ty.ty_fn(?args, ?out)) {
- ret T_fn_pair(type_of_fn(cx, args, out));
+ case (ty.ty_fn(?proto, ?args, ?out)) {
+ llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out));
+ }
+ case (ty.ty_native_fn(?abi, ?args, ?out)) {
+ llty = T_fn_pair(cx.tn, type_of_native_fn(cx, abi, args, out));
}
case (ty.ty_obj(?meths)) {
auto th = mk_type_handle();
@@ -448,39 +642,54 @@ fn type_of_inner(@crate_ctxt cx, @ty.t t) -> TypeRef {
let vec[TypeRef] mtys = vec();
for (ty.method m in meths) {
let TypeRef mty =
- type_of_fn_full(cx,
+ type_of_fn_full(cx, m.proto,
some[TypeRef](self_ty),
m.inputs, m.output);
mtys += T_ptr(mty);
}
let TypeRef vtbl = T_struct(mtys);
- let TypeRef body = T_struct(vec(T_ptr(T_tydesc()),
- T_nil()));
- let TypeRef pair =
- T_struct(vec(T_ptr(vtbl),
- T_ptr(T_box(body))));
+ let TypeRef pair = T_struct(vec(T_ptr(vtbl),
+ T_opaque_obj_ptr(cx.tn)));
+
auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
llvm.LLVMRefineType(abs_pair, pair);
abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
- ret abs_pair;
+ llty = abs_pair;
}
case (ty.ty_var(_)) {
log "ty_var in trans.type_of";
fail;
}
case (ty.ty_param(_)) {
- ret T_typaram_ptr();
+ llty = T_i8();
}
+ case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
}
- fail;
+
+ check (llty as int != 0);
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
+ ret llty;
}
fn type_of_arg(@crate_ctxt cx, &ty.arg arg) -> TypeRef {
- auto ty = type_of(cx, arg.ty);
+ alt (arg.ty.struct) {
+ case (ty.ty_param(_)) {
+ if (arg.mode == ast.alias) {
+ ret T_typaram_ptr(cx.tn);
+ }
+ }
+ case (_) {
+ // fall through
+ }
+ }
+
+ auto typ;
if (arg.mode == ast.alias) {
- ty = T_ptr(ty);
+ typ = T_ptr(type_of_inner(cx, arg.ty, true));
+ } else {
+ typ = type_of_inner(cx, arg.ty, false);
}
- ret ty;
+ ret typ;
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
@@ -606,11 +815,11 @@ fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
}
-fn decl_glue(ModuleRef llmod, str s) -> ValueRef {
- ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr()), T_void()));
+fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
+ ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
}
-fn decl_upcall(ModuleRef llmod, uint _n) -> ValueRef {
+fn decl_upcall_glue(ModuleRef llmod, type_names tn, uint _n) -> ValueRef {
// It doesn't actually matter what type we come up with here, at the
// moment, as we cast the upcall function pointers to int before passing
// them to the indirect upcall-invocation glue. But eventually we'd like
@@ -618,7 +827,7 @@ fn decl_upcall(ModuleRef llmod, uint _n) -> ValueRef {
let int n = _n as int;
let str s = abi.upcall_glue_name(n);
let vec[TypeRef] args =
- vec(T_taskptr(), // taskptr
+ vec(T_taskptr(tn), // taskptr
T_int()) // callee
+ _vec.init_elt[TypeRef](T_int(), n as uint);
@@ -629,7 +838,7 @@ fn get_upcall(@crate_ctxt cx, str name, int n_args) -> ValueRef {
if (cx.upcalls.contains_key(name)) {
ret cx.upcalls.get(name);
}
- auto inputs = vec(T_taskptr());
+ auto inputs = vec(T_taskptr(cx.tn));
inputs += _vec.init_elt[TypeRef](T_int(), n_args as uint);
auto output = T_int();
auto f = decl_cdecl_fn(cx.llmod, name, T_fn(inputs, output));
@@ -644,14 +853,16 @@ fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args) -> result {
let ValueRef llglue = cx.fcx.ccx.glues.upcall_glues.(n);
let vec[ValueRef] call_args = vec(cx.fcx.lltaskptr, llupcall);
+
for (ValueRef a in args) {
call_args += cx.build.ZExtOrBitCast(a, T_int());
}
+
ret res(cx, cx.build.FastCall(llglue, call_args));
}
fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
- ret trans_upcall(cx, "upcall_free", vec(cx.build.PtrToInt(v, T_int()),
+ ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v),
C_int(0)));
}
@@ -680,6 +891,11 @@ fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
ret cx.build.And(bumped, cx.build.Not(mask));
}
+// Returns the real size of the given type for the current target.
+fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
+ ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
+}
+
fn llsize_of(TypeRef t) -> ValueRef {
ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
}
@@ -702,51 +918,111 @@ fn align_of(@block_ctxt cx, @ty.t t) -> result {
ret dynamic_align_of(cx, t);
}
+// Computes the size of the data part of a non-dynamically-sized tag.
+fn static_size_of_tag(@crate_ctxt cx, @ty.t t) -> uint {
+ if (ty.type_has_dynamic_size(t)) {
+ log "dynamically sized type passed to static_size_of_tag()";
+ fail;
+ }
+
+ if (cx.tag_sizes.contains_key(t)) {
+ ret cx.tag_sizes.get(t);
+ }
+
+ auto tid;
+ let vec[@ty.t] subtys;
+ alt (t.struct) {
+ case (ty.ty_tag(?tid_, ?subtys_)) {
+ tid = tid_;
+ subtys = subtys_;
+ }
+ case (_) {
+ log "non-tag passed to static_size_of_tag()";
+ fail;
+ }
+ }
+
+ // Compute max(variant sizes).
+ auto max_size = 0u;
+ auto variants = tag_variants(cx, tid);
+ for (ast.variant variant in variants) {
+ let vec[@ty.t] tys = variant_types(cx, variant);
+ auto tup_ty = ty.plain_ty(ty.ty_tup(tys));
+
+ // Here we possibly do a recursive call.
+ auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));
+
+ if (max_size < this_size) {
+ max_size = this_size;
+ }
+ }
+
+ cx.tag_sizes.insert(t, max_size);
+ ret max_size;
+}
+
fn dynamic_size_of(@block_ctxt cx, @ty.t t) -> result {
+ fn align_elements(@block_ctxt cx, vec[@ty.t] elts) -> result {
+ //
+ // C padding rules:
+ //
+ //
+ // - Pad after each element so that next element is aligned.
+ // - Pad after final structure member so that whole structure
+ // is aligned to max alignment of interior.
+ //
+ auto off = C_int(0);
+ auto max_align = C_int(1);
+ auto bcx = cx;
+ for (@ty.t e in elts) {
+ auto elt_align = align_of(bcx, e);
+ bcx = elt_align.bcx;
+ auto elt_size = size_of(bcx, e);
+ bcx = elt_size.bcx;
+ auto aligned_off = align_to(bcx, off, elt_align.val);
+ off = cx.build.Add(aligned_off, elt_size.val);
+ max_align = umax(bcx, max_align, elt_align.val);
+ }
+ off = align_to(bcx, off, max_align);
+ ret res(bcx, off);
+ }
+
alt (t.struct) {
case (ty.ty_param(?p)) {
auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
}
case (ty.ty_tup(?elts)) {
- //
- // C padding rules:
- //
- //
- // - Pad after each element so that next element is aligned.
- // - Pad after final structure member so that whole structure
- // is aligned to max alignment of interior.
- //
- auto off = C_int(0);
- auto max_align = C_int(1);
- auto bcx = cx;
- for (@ty.t e in elts) {
- auto elt_align = align_of(bcx, e);
- bcx = elt_align.bcx;
- auto elt_size = size_of(bcx, e);
- bcx = elt_size.bcx;
- auto aligned_off = align_to(bcx, off, elt_align.val);
- off = cx.build.Add(aligned_off, elt_size.val);
- max_align = umax(bcx, max_align, elt_align.val);
- }
- off = align_to(bcx, off, max_align);
- ret res(bcx, off);
+ ret align_elements(cx, elts);
}
case (ty.ty_rec(?flds)) {
- auto off = C_int(0);
- auto max_align = C_int(1);
- auto bcx = cx;
+ let vec[@ty.t] tys = vec();
for (ty.field f in flds) {
- auto elt_align = align_of(bcx, f.ty);
- bcx = elt_align.bcx;
- auto elt_size = size_of(bcx, f.ty);
- bcx = elt_size.bcx;
- auto aligned_off = align_to(bcx, off, elt_align.val);
- off = cx.build.Add(aligned_off, elt_size.val);
- max_align = umax(bcx, max_align, elt_align.val);
+ tys += vec(f.ty);
}
- off = align_to(bcx, off, max_align);
- ret res(bcx, off);
+ ret align_elements(cx, tys);
+ }
+ case (ty.ty_tag(?tid, ?tps)) {
+ auto bcx = cx;
+
+ // Compute max(variant sizes).
+ let ValueRef max_size = bcx.build.Alloca(T_int());
+ bcx.build.Store(C_int(0), max_size);
+
+ auto variants = tag_variants(bcx.fcx.ccx, tid);
+ for (ast.variant variant in variants) {
+ let vec[@ty.t] tys = variant_types(bcx.fcx.ccx, variant);
+ auto rslt = align_elements(bcx, tys);
+ bcx = rslt.bcx;
+
+ auto this_size = rslt.val;
+ auto old_max_size = bcx.build.Load(max_size);
+ bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
+ }
+
+ auto max_size_val = bcx.build.Load(max_size);
+ auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
+ ret res(bcx, total_size);
}
}
}
@@ -781,10 +1057,10 @@ fn dynamic_align_of(@block_ctxt cx, @ty.t t) -> result {
}
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
-// tuple-like structure (tup, rec, tag) with a static index. This one is
-// driven off ty.struct and knows what to do when it runs into a ty_param
-// stuck in the middle of the thing it's GEP'ing into. Much like size_of and
-// align_of, above.
+// tuple-like structure (tup, rec) with a static index. This one is driven off
+// ty.struct and knows what to do when it runs into a ty_param stuck in the
+// middle of the thing it's GEP'ing into. Much like size_of and align_of,
+// above.
fn GEP_tup_like(@block_ctxt cx, @ty.t t,
ValueRef base, vec[int] ixs) -> result {
@@ -868,38 +1144,86 @@ fn GEP_tup_like(@block_ctxt cx, @ty.t t,
// flattened the incoming structure.
auto s = split_type(t, ixs, 0u);
- auto prefix_ty = ty.plain_ty(ty.ty_tup(s.prefix));
+ auto prefix_ty = plain_ty(ty.ty_tup(s.prefix));
auto bcx = cx;
auto sz = size_of(bcx, prefix_ty);
bcx = sz.bcx;
auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
auto bumped = bcx.build.GEP(raw, vec(sz.val));
- alt (s.target.struct) {
- case (ty.ty_param(_)) { ret res(bcx, bumped); }
- case (_) {
- auto ty = T_ptr(type_of(bcx.fcx.ccx, s.target));
- ret res(bcx, bcx.build.PointerCast(bumped, ty));
+
+ if (ty.type_has_dynamic_size(s.target)) {
+ ret res(bcx, bumped);
+ }
+
+ auto typ = T_ptr(type_of(bcx.fcx.ccx, s.target));
+ ret res(bcx, bcx.build.PointerCast(bumped, typ));
+}
+
+// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
+// This function uses GEP_tup_like() above and automatically performs casts as
+// appropriate. @llblobptr is the data part of a tag value; its actual type is
+// meaningless, as it will be cast away.
+fn GEP_tag(@block_ctxt cx, ValueRef llblobptr, &ast.variant variant, int ix)
+ -> result {
+ // Synthesize a tuple type so that GEP_tup_like() can work its magic.
+ // Separately, store the type of the element we're interested in.
+ auto arg_tys = arg_tys_of_fn(variant.ann);
+ auto elem_ty = ty.plain_ty(ty.ty_nil); // typestate infelicity
+ auto i = 0;
+ let vec[@ty.t] true_arg_tys = vec();
+ for (ty.arg a in arg_tys) {
+ true_arg_tys += vec(a.ty);
+ if (i == ix) {
+ elem_ty = a.ty;
}
+
+ i += 1;
}
+ auto tup_ty = ty.plain_ty(ty.ty_tup(true_arg_tys));
+
+ // Cast the blob pointer to the appropriate type, if we need to (i.e. if
+ // the blob pointer isn't dynamically sized).
+ let ValueRef llunionptr;
+ if (!ty.type_has_dynamic_size(tup_ty)) {
+ auto llty = type_of(cx.fcx.ccx, tup_ty);
+ llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
+ } else {
+ llunionptr = llblobptr;
+ }
+
+ // Do the GEP_tup_like().
+ auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));
+
+ // Cast the result to the appropriate type, if necessary.
+ auto val;
+ if (!ty.type_has_dynamic_size(elem_ty)) {
+ auto llelemty = type_of(rslt.bcx.fcx.ccx, elem_ty);
+ val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
+ } else {
+ val = rslt.val;
+ }
+
+ ret res(rslt.bcx, val);
}
-fn trans_malloc_inner(@block_ctxt cx, TypeRef llptr_ty) -> result {
- auto llbody_ty = lib.llvm.llvm.LLVMGetElementType(llptr_ty);
+fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
+ -> result {
// FIXME: need a table to collect tydesc globals.
auto tydesc = C_int(0);
- auto sz = llsize_of(llbody_ty);
- auto sub = trans_upcall(cx, "upcall_malloc", vec(sz, tydesc));
- sub.val = sub.bcx.build.IntToPtr(sub.val, llptr_ty);
- ret sub;
+ auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc));
+ rslt = res(rslt.bcx, vi2p(cx, rslt.val, llptr_ty));
+ ret rslt;
}
-fn trans_malloc(@block_ctxt cx, @ty.t t) -> result {
- auto scope_cx = find_scope_cx(cx);
- auto llptr_ty = type_of(cx.fcx.ccx, t);
- auto sub = trans_malloc_inner(cx, llptr_ty);
- scope_cx.cleanups += clean(bind drop_ty(_, sub.val, t));
- ret sub;
+fn trans_malloc_boxed(@block_ctxt cx, @ty.t t) -> result {
+ // Synthesize a fake box type structurally so we have something
+ // to measure the size of.
+ auto boxed_body = plain_ty(ty.ty_tup(vec(plain_ty(ty.ty_int), t)));
+ auto box_ptr = plain_ty(ty.ty_box(t));
+ auto sz = size_of(cx, boxed_body);
+ auto llty = type_of(cx.fcx.ccx, box_ptr);
+ ret trans_raw_malloc(sz.bcx, llty, sz.val);
}
@@ -941,6 +1265,7 @@ fn linearize_ty_params(@block_ctxt cx, @ty.t t)
r.defs += pid;
}
}
+ case (_) { }
}
ret t;
}
@@ -960,6 +1285,7 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
// Is the supplied type a type param? If so, return the passed-in tydesc.
alt (ty.type_param(t)) {
case (some[ast.def_id](?id)) {
+ check (cx.fcx.lltydescs.contains_key(id));
ret res(cx, cx.fcx.lltydescs.get(id));
}
case (none[ast.def_id]) { /* fall through */ }
@@ -975,16 +1301,23 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
check (n_params == _vec.len[ValueRef](tys._1));
if (!cx.fcx.ccx.tydescs.contains_key(t)) {
- make_tydesc(cx.fcx.ccx, t, tys._0);
+ declare_tydesc(cx.fcx.ccx, t);
+ define_tydesc(cx.fcx.ccx, t, tys._0);
}
- auto root = cx.fcx.ccx.tydescs.get(t);
+ auto root = cx.fcx.ccx.tydescs.get(t).tydesc;
+
+ auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc(cx.fcx.ccx.tn)),
+ n_params));
- auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc()), n_params));
auto i = 0;
+ auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
+ cx.build.Store(root, tdp);
+ i += 1;
for (ValueRef td in tys._1) {
auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
cx.build.Store(td, tdp);
+ i += 1;
}
auto bcx = cx;
@@ -997,75 +1330,121 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
vec(p2i(bcx.fcx.ccx.crate_ptr),
sz.val,
align.val,
- C_int(n_params as int),
- bcx.build.PtrToInt(tydescs, T_int())));
+ C_int((1u + n_params) as int),
+ vp2i(bcx, tydescs)));
- ret res(v.bcx, v.bcx.build.IntToPtr(v.val, T_ptr(T_tydesc())));
+ ret res(v.bcx, vi2p(v.bcx, v.val,
+ T_ptr(T_tydesc(cx.fcx.ccx.tn))));
}
// Otherwise, generate a tydesc if necessary, and return it.
if (!cx.fcx.ccx.tydescs.contains_key(t)) {
let vec[ast.def_id] defs = vec();
- make_tydesc(cx.fcx.ccx, t, defs);
+ declare_tydesc(cx.fcx.ccx, t);
+ define_tydesc(cx.fcx.ccx, t, defs);
}
- ret res(cx, cx.fcx.ccx.tydescs.get(t));
+ ret res(cx, cx.fcx.ccx.tydescs.get(t).tydesc);
}
-fn make_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
- auto tg = make_take_glue;
- auto take_glue = make_generic_glue(cx, t, "take", tg, typaram_defs);
- auto dg = make_drop_glue;
- auto drop_glue = make_generic_glue(cx, t, "drop", dg, typaram_defs);
+// Generates the declaration for (but doesn't fill in) a type descriptor. This
+// needs to be separate from make_tydesc() below, because sometimes type glue
+// functions needs to refer to their own type descriptors.
+fn declare_tydesc(@crate_ctxt cx, @ty.t t) {
+ auto take_glue = declare_generic_glue(cx, t, "take");
+ auto drop_glue = declare_generic_glue(cx, t, "drop");
- auto llty = type_of(cx, t);
- auto pvoid = T_ptr(T_i8());
- auto glue_fn_ty = T_ptr(T_fn(vec(T_taskptr(),
- T_ptr(T_ptr(T_tydesc())),
- pvoid), T_void()));
- auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc()))),
- llsize_of(llty),
- llalign_of(llty),
- take_glue, // take_glue_off
- drop_glue, // drop_glue_off
+ auto llsize;
+ auto llalign;
+ if (!ty.type_has_dynamic_size(t)) {
+ auto llty = type_of(cx, t);
+ llsize = llsize_of(llty);
+ llalign = llalign_of(llty);
+ } else {
+ // These will be overwritten as the derived tydesc is generated, so
+ // we create placeholder values.
+ llsize = C_int(0);
+ llalign = C_int(0);
+ }
+
+ auto glue_fn_ty = T_ptr(T_glue_fn(cx.tn));
+
+ // FIXME: this adjustment has to do with the ridiculous encoding of
+ // glue-pointer-constants in the tydesc records: They are tydesc-relative
+ // displacements. This is purely for compatibility with rustboot and
+ // should go when it is discarded.
+ fn off(ValueRef tydescp,
+ ValueRef gluefn) -> ValueRef {
+ ret i2p(llvm.LLVMConstSub(p2i(gluefn), p2i(tydescp)),
+ val_ty(gluefn));
+ }
+
+ auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
+ auto gvar = llvm.LLVMAddGlobal(cx.llmod, T_tydesc(cx.tn),
+ _str.buf(name));
+ auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(cx.tn)))),
+ llsize,
+ llalign,
+ off(gvar, take_glue), // take_glue_off
+ off(gvar, drop_glue), // drop_glue_off
C_null(glue_fn_ty), // free_glue_off
C_null(glue_fn_ty), // sever_glue_off
C_null(glue_fn_ty), // mark_glue_off
C_null(glue_fn_ty), // obj_drop_glue_off
C_null(glue_fn_ty))); // is_stateful
- auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
- auto gvar = llvm.LLVMAddGlobal(cx.llmod, val_ty(tydesc), _str.buf(name));
llvm.LLVMSetInitializer(gvar, tydesc);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
as llvm.Linkage);
- cx.tydescs.insert(t, gvar);
+
+ auto info = rec(
+ tydesc=gvar,
+ take_glue=take_glue,
+ drop_glue=drop_glue
+ );
+
+ cx.tydescs.insert(t, @info);
}
-fn make_generic_glue(@crate_ctxt cx, @ty.t t, str name,
- val_and_ty_fn helper,
- vec[ast.def_id] typaram_defs) -> ValueRef {
- auto llfnty = T_fn(vec(T_taskptr(),
- T_ptr(T_ptr(T_tydesc())),
- T_ptr(T_i8())), T_void());
+// declare_tydesc() above must have been called first.
+fn define_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
+ auto info = cx.tydescs.get(t);
+ auto gvar = info.tydesc;
+
+ auto tg = make_take_glue;
+ auto take_glue = make_generic_glue(cx, t, info.take_glue, tg,
+ typaram_defs);
+ auto dg = make_drop_glue;
+ auto drop_glue = make_generic_glue(cx, t, info.drop_glue, dg,
+ typaram_defs);
+}
- auto fn_name = cx.names.next("_rust_" + name) + "." + ty.ty_to_str(t);
+fn declare_generic_glue(@crate_ctxt cx, @ty.t t, str name) -> ValueRef {
+ auto llfnty = T_glue_fn(cx.tn);
+
+ auto fn_name = cx.names.next("_rust_" + name) + sep() + ty.ty_to_str(t);
fn_name = sanitize(fn_name);
- auto llfn = decl_fastcall_fn(cx.llmod, fn_name, llfnty);
+ ret decl_fastcall_fn(cx.llmod, fn_name, llfnty);
+}
- auto fcx = new_fn_ctxt(cx, fn_name, llfn);
+fn make_generic_glue(@crate_ctxt cx, @ty.t t, ValueRef llfn,
+ val_and_ty_fn helper,
+ vec[ast.def_id] typaram_defs) -> ValueRef {
+ auto fcx = new_fn_ctxt(cx, llfn);
auto bcx = new_top_block_ctxt(fcx);
auto re;
if (!ty.type_is_scalar(t)) {
auto llty;
- if (ty.type_is_structural(t)) {
+ if (ty.type_has_dynamic_size(t)) {
+ llty = T_ptr(T_i8());
+ } else if (ty.type_is_structural(t)) {
llty = T_ptr(type_of(cx, t));
} else {
llty = type_of(cx, t);
}
- auto lltyparams = llvm.LLVMGetParam(llfn, 1u);
+ auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
auto p = 0;
for (ast.def_id d in typaram_defs) {
auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p)));
@@ -1074,7 +1453,7 @@ fn make_generic_glue(@crate_ctxt cx, @ty.t t, str name,
p += 1;
}
- auto llrawptr = llvm.LLVMGetParam(llfn, 2u);
+ auto llrawptr = llvm.LLVMGetParam(llfn, 4u);
auto llval = bcx.build.BitCast(llrawptr, llty);
re = helper(bcx, llval, t);
@@ -1166,16 +1545,12 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
vec(C_int(0),
C_int(abi.box_rc_field_body)));
- auto fields =
- cx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_fields)));
auto tydescptr =
cx.build.GEP(body,
vec(C_int(0),
C_int(abi.obj_body_elt_tydesc)));
- call_tydesc_glue_full(cx, fields, cx.build.Load(tydescptr),
+ call_tydesc_glue_full(cx, body, cx.build.Load(tydescptr),
abi.tydesc_field_drop_glue_off);
// Then free the body.
@@ -1195,7 +1570,7 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
T_int(), C_int(0));
}
- case (ty.ty_fn(_,_)) {
+ case (ty.ty_fn(_,_,_)) {
fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
// Call through the closure's own fields-drop glue first.
@@ -1203,7 +1578,6 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
cx.build.GEP(v,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
-
auto bindings =
cx.build.GEP(body,
vec(C_int(0),
@@ -1241,6 +1615,7 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
bind drop_ty(_, _, _));
} else if (ty.type_is_scalar(t) ||
+ ty.type_is_native(t) ||
ty.type_is_nil(t)) {
ret res(cx, C_nil());
}
@@ -1294,42 +1669,105 @@ fn decr_refcnt_and_if_zero(@block_ctxt cx,
ret res(next_cx, phi);
}
-fn type_of_variant(@crate_ctxt cx, &ast.variant v) -> TypeRef {
- let vec[TypeRef] lltys = vec();
+// Tag information
+
+fn variant_types(@crate_ctxt cx, &ast.variant v) -> vec[@ty.t] {
+ let vec[@ty.t] tys = vec();
alt (ty.ann_to_type(v.ann).struct) {
- case (ty.ty_fn(?args, _)) {
+ case (ty.ty_fn(_, ?args, _)) {
for (ty.arg arg in args) {
- lltys += vec(type_of(cx, arg.ty));
+ tys += vec(arg.ty);
}
}
+ case (ty.ty_tag(_, _)) { /* nothing */ }
case (_) { fail; }
}
+ ret tys;
+}
+
+fn type_of_variant(@crate_ctxt cx, &ast.variant v) -> TypeRef {
+ let vec[TypeRef] lltys = vec();
+ auto tys = variant_types(cx, v);
+ for (@ty.t typ in tys) {
+ lltys += vec(type_of(cx, typ));
+ }
ret T_struct(lltys);
}
+// Returns the type parameters of a tag.
+fn tag_ty_params(@crate_ctxt cx, ast.def_id id) -> vec[ast.ty_param] {
+ check (cx.items.contains_key(id));
+ alt (cx.items.get(id).node) {
+ case (ast.item_tag(_, _, ?tps, _)) { ret tps; }
+ }
+ fail; // not reached
+}
+
+// Returns the variants in a tag.
+fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[ast.variant] {
+ check (cx.items.contains_key(id));
+ alt (cx.items.get(id).node) {
+ case (ast.item_tag(_, ?variants, _, _)) { ret variants; }
+ }
+ fail; // not reached
+}
+
+// Returns a new plain tag type of the given ID with no type parameters. Don't
+// use this function in new code; it's a hack to keep things working for now.
+fn mk_plain_tag(ast.def_id tid) -> @ty.t {
+ let vec[@ty.t] tps = vec();
+ ret ty.plain_ty(ty.ty_tag(tid, tps));
+}
+
+
+type val_fn = fn(@block_ctxt cx, ValueRef v) -> result;
+
type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, @ty.t t) -> result;
+type val_pair_and_ty_fn =
+ fn(@block_ctxt cx, ValueRef av, ValueRef bv, @ty.t t) -> result;
+
// Iterates through the elements of a structural type.
fn iter_structural_ty(@block_ctxt cx,
ValueRef v,
@ty.t t,
val_and_ty_fn f)
-> result {
+ fn adaptor_fn(val_and_ty_fn f,
+ @block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t) -> result {
+ ret f(cx, av, t);
+ }
+ be iter_structural_ty_full(cx, v, v, t,
+ bind adaptor_fn(f, _, _, _, _));
+}
+
+
+fn iter_structural_ty_full(@block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t,
+ val_pair_and_ty_fn f)
+ -> result {
let result r = res(cx, C_nil());
fn iter_boxpp(@block_ctxt cx,
- ValueRef box_cell,
- val_and_ty_fn f) -> result {
- auto box_ptr = cx.build.Load(box_cell);
- auto tnil = ty.plain_ty(ty.ty_nil);
- auto tbox = ty.plain_ty(ty.ty_box(tnil));
+ ValueRef box_a_cell,
+ ValueRef box_b_cell,
+ val_pair_and_ty_fn f) -> result {
+ auto box_a_ptr = cx.build.Load(box_a_cell);
+ auto box_b_ptr = cx.build.Load(box_b_cell);
+ auto tnil = plain_ty(ty.ty_nil);
+ auto tbox = plain_ty(ty.ty_box(tnil));
auto inner_cx = new_sub_block_ctxt(cx, "iter box");
auto next_cx = new_sub_block_ctxt(cx, "next");
- auto null_test = cx.build.IsNull(box_ptr);
+ auto null_test = cx.build.IsNull(box_a_ptr);
cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);
- auto r = f(inner_cx, box_ptr, tbox);
+ auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
r.bcx.build.Br(next_cx.llbb);
ret res(next_cx, r.val);
}
@@ -1338,9 +1776,13 @@ fn iter_structural_ty(@block_ctxt cx,
case (ty.ty_tup(?args)) {
let int i = 0;
for (@ty.t arg in args) {
- auto elt = r.bcx.build.GEP(v, vec(C_int(0), C_int(i)));
+ r = GEP_tup_like(r.bcx, t, av, vec(0, i));
+ auto elt_a = r.val;
+ r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
+ auto elt_b = r.val;
r = f(r.bcx,
- load_scalar_or_boxed(r.bcx, elt, arg),
+ load_scalar_or_boxed(r.bcx, elt_a, arg),
+ load_scalar_or_boxed(r.bcx, elt_b, arg),
arg);
i += 1;
}
@@ -1348,90 +1790,101 @@ fn iter_structural_ty(@block_ctxt cx,
case (ty.ty_rec(?fields)) {
let int i = 0;
for (ty.field fld in fields) {
- auto llfld = r.bcx.build.GEP(v, vec(C_int(0), C_int(i)));
+ r = GEP_tup_like(r.bcx, t, av, vec(0, i));
+ auto llfld_a = r.val;
+ r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
+ auto llfld_b = r.val;
r = f(r.bcx,
- load_scalar_or_boxed(r.bcx, llfld, fld.ty),
+ load_scalar_or_boxed(r.bcx, llfld_a, fld.ty),
+ load_scalar_or_boxed(r.bcx, llfld_b, fld.ty),
fld.ty);
i += 1;
}
}
- case (ty.ty_tag(?tid)) {
- check (cx.fcx.ccx.tags.contains_key(tid));
- auto info = cx.fcx.ccx.tags.get(tid);
- auto n_variants = _vec.len[tup(ast.def_id,arity)](info.variants);
-
- // Look up the tag in the typechecked AST.
- check (cx.fcx.ccx.items.contains_key(tid));
- auto tag_item = cx.fcx.ccx.items.get(tid);
- let vec[ast.variant] variants = vec(); // FIXME: typestate bug
- alt (tag_item.node) {
- case (ast.item_tag(_, ?vs, _, _)) {
- variants = vs;
- }
- case (_) {
- log "trans: ty_tag doesn't actually refer to a tag";
- fail;
- }
- }
+ case (ty.ty_tag(?tid, ?tps)) {
+ auto variants = tag_variants(cx.fcx.ccx, tid);
+ auto n_variants = _vec.len[ast.variant](variants);
+
+ auto lldiscrim_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(0)));
+ auto llunion_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(1)));
+ auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);
- auto lldiscrim_ptr = cx.build.GEP(v, vec(C_int(0), C_int(0)));
- auto llunion_ptr = cx.build.GEP(v, vec(C_int(0), C_int(1)));
- auto lldiscrim = cx.build.Load(lldiscrim_ptr);
+ auto lldiscrim_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(0)));
+ auto llunion_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(1)));
+ auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
- auto unr_cx = new_sub_block_ctxt(cx, "tag-iter-unr");
+ // NB: we must hit the discriminant first so that structural
+ // comparison know not to proceed when the discriminants differ.
+ auto bcx = cx;
+ bcx = f(bcx, lldiscrim_a, lldiscrim_b,
+ plain_ty(ty.ty_int)).bcx;
+
+ auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
unr_cx.build.Unreachable();
- auto llswitch = cx.build.Switch(lldiscrim, unr_cx.llbb,
- n_variants);
+ auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
+ n_variants);
- auto next_cx = new_sub_block_ctxt(cx, "tag-iter-next");
+ auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
auto i = 0u;
- for (tup(ast.def_id,arity) variant in info.variants) {
- auto variant_cx = new_sub_block_ctxt(cx, "tag-iter-variant-" +
+ for (ast.variant variant in variants) {
+ auto variant_cx = new_sub_block_ctxt(bcx,
+ "tag-iter-variant-" +
_uint.to_str(i, 10u));
llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);
- alt (variant._1) {
- case (n_ary) {
- let vec[ValueRef] vals = vec(C_int(0), C_int(1),
- C_int(i as int));
- auto llvar = variant_cx.build.GEP(v, vals);
- auto llvarty = type_of_variant(cx.fcx.ccx,
- variants.(i));
-
- auto fn_ty = ty.ann_to_type(variants.(i).ann);
- alt (fn_ty.struct) {
- case (ty.ty_fn(?args, _)) {
- auto llvarp = variant_cx.build.
- TruncOrBitCast(llunion_ptr,
- T_ptr(llvarty));
-
- auto j = 0u;
- for (ty.arg a in args) {
- auto v = vec(C_int(0),
- C_int(j as int));
- auto llfldp =
- variant_cx.build.GEP(llvarp, v);
-
- auto llfld =
- load_scalar_or_boxed(variant_cx,
- llfldp, a.ty);
-
- auto res = f(variant_cx, llfld, a.ty);
- variant_cx = res.bcx;
- j += 1u;
- }
+ if (_vec.len[ast.variant_arg](variant.args) > 0u) {
+ // N-ary variant.
+ auto llvarty = type_of_variant(bcx.fcx.ccx, variants.(i));
+
+ auto fn_ty = ty.ann_to_type(variants.(i).ann);
+ alt (fn_ty.struct) {
+ case (ty.ty_fn(_, ?args, _)) {
+ auto llvarp_a = variant_cx.build.
+ TruncOrBitCast(llunion_a_ptr, T_ptr(llvarty));
+
+ auto llvarp_b = variant_cx.build.
+ TruncOrBitCast(llunion_b_ptr, T_ptr(llvarty));
+
+ auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);
+
+ auto j = 0u;
+ for (ty.arg a in args) {
+ auto v = vec(C_int(0), C_int(j as int));
+
+ auto llfldp_a =
+ variant_cx.build.GEP(llvarp_a, v);
+
+ auto llfldp_b =
+ variant_cx.build.GEP(llvarp_b, v);
+
+ auto ty_subst = ty.substitute_ty_params(
+ ty_params, tps, a.ty);
+
+ auto llfld_a =
+ load_scalar_or_boxed(variant_cx,
+ llfldp_a,
+ ty_subst);
+
+ auto llfld_b =
+ load_scalar_or_boxed(variant_cx,
+ llfldp_b,
+ ty_subst);
+
+ auto res = f(variant_cx,
+ llfld_a, llfld_b, ty_subst);
+ variant_cx = res.bcx;
+ j += 1u;
}
- case (_) { fail; }
}
-
- variant_cx.build.Br(next_cx.llbb);
- }
- case (nullary) {
- // Nothing to do.
- variant_cx.build.Br(next_cx.llbb);
+ case (_) { fail; }
}
+
+ variant_cx.build.Br(next_cx.llbb);
+ } else {
+ // Nullary variant; nothing to do.
+ variant_cx.build.Br(next_cx.llbb);
}
i += 1u;
@@ -1439,27 +1892,96 @@ fn iter_structural_ty(@block_ctxt cx,
ret res(next_cx, C_nil());
}
- case (ty.ty_fn(_,_)) {
- auto box_cell =
- cx.build.GEP(v,
+ case (ty.ty_fn(_,_,_)) {
+ auto box_cell_a =
+ cx.build.GEP(av,
+ vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ auto box_cell_b =
+ cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.fn_field_box)));
- ret iter_boxpp(cx, box_cell, f);
+ ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (ty.ty_obj(_)) {
- auto box_cell =
- cx.build.GEP(v,
+ auto box_cell_a =
+ cx.build.GEP(av,
+ vec(C_int(0),
+ C_int(abi.obj_field_box)));
+ auto box_cell_b =
+ cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.obj_field_box)));
- ret iter_boxpp(cx, box_cell, f);
+ ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (_) {
- cx.fcx.ccx.sess.unimpl("type in iter_structural_ty");
+ cx.fcx.ccx.sess.unimpl("type in iter_structural_ty_full");
}
}
ret r;
}
+// Iterates through a pointer range, until the src* hits the src_lim*.
+fn iter_sequence_raw(@block_ctxt cx,
+ ValueRef src, // elt*
+ ValueRef src_lim, // elt*
+ ValueRef elt_sz,
+ val_fn f) -> result {
+
+ auto bcx = cx;
+
+ let ValueRef src_int = vp2i(bcx, src);
+ let ValueRef src_lim_int = vp2i(bcx, src_lim);
+
+ auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
+ auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
+ auto next_cx = new_sub_block_ctxt(cx, "next");
+
+ bcx.build.Br(cond_cx.llbb);
+
+ let ValueRef src_curr = cond_cx.build.Phi(T_int(),
+ vec(src_int), vec(bcx.llbb));
+
+ auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
+ src_curr, src_lim_int);
+
+ cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
+
+ auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
+
+ auto body_res = f(body_cx, src_curr_ptr);
+ body_cx = body_res.bcx;
+
+ auto src_next = body_cx.build.Add(src_curr, elt_sz);
+ body_cx.build.Br(cond_cx.llbb);
+
+ cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
+ vec(body_cx.llbb));
+
+ ret res(next_cx, C_nil());
+}
+
+
+fn iter_sequence_inner(@block_ctxt cx,
+ ValueRef src, // elt*
+ ValueRef src_lim, // elt*
+ @ty.t elt_ty,
+ val_and_ty_fn f) -> result {
+ fn adaptor_fn(val_and_ty_fn f,
+ @ty.t elt_ty,
+ @block_ctxt cx,
+ ValueRef v) -> result {
+ auto llty = type_of(cx.fcx.ccx, elt_ty);
+ auto p = cx.build.PointerCast(v, T_ptr(llty));
+ ret f(cx, load_scalar_or_boxed(cx, p, elt_ty), elt_ty);
+ }
+
+ auto elt_sz = size_of(cx, elt_ty);
+ be iter_sequence_raw(elt_sz.bcx, src, src_lim, elt_sz.val,
+ bind adaptor_fn(f, elt_ty, _, _));
+}
+
+
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx,
ValueRef v,
@@ -1479,43 +2001,18 @@ fn iter_sequence(@block_ctxt cx,
auto llunit_ty = type_of(cx.fcx.ccx, elt_ty);
auto bcx = cx;
- auto unit_sz = size_of(bcx, elt_ty);
- bcx = unit_sz.bcx;
auto len = bcx.build.Load(lenptr);
if (trailing_null) {
+ auto unit_sz = size_of(bcx, elt_ty);
+ bcx = unit_sz.bcx;
len = bcx.build.Sub(len, unit_sz.val);
}
- auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
- auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
- auto next_cx = new_sub_block_ctxt(cx, "next");
-
- bcx.build.Br(cond_cx.llbb);
-
- auto ix = cond_cx.build.Phi(T_int(), vec(C_int(0)), vec(cx.llbb));
- auto scaled_ix = cond_cx.build.Phi(T_int(),
- vec(C_int(0)), vec(cx.llbb));
+ auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
+ T_ptr(llunit_ty));
- auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntNE,
- scaled_ix, len);
- cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
-
- auto elt = body_cx.build.GEP(p0, vec(C_int(0), ix));
- auto body_res = f(body_cx,
- load_scalar_or_boxed(body_cx, elt, elt_ty),
- elt_ty);
- auto next_ix = body_res.bcx.build.Add(ix, C_int(1));
- auto next_scaled_ix = body_res.bcx.build.Add(scaled_ix, unit_sz.val);
-
- cond_cx.build.AddIncomingToPhi(ix, vec(next_ix),
- vec(body_res.bcx.llbb));
-
- cond_cx.build.AddIncomingToPhi(scaled_ix, vec(next_scaled_ix),
- vec(body_res.bcx.llbb));
-
- body_res.bcx.build.Br(cond_cx.llbb);
- ret res(next_cx, C_nil());
+ ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
}
alt (t.struct) {
@@ -1523,7 +2020,7 @@ fn iter_sequence(@block_ctxt cx,
ret iter_sequence_body(cx, v, et, f, false);
}
case (ty.ty_str) {
- auto et = ty.plain_ty(ty.ty_machine(common.ty_u8));
+ auto et = plain_ty(ty.ty_machine(common.ty_u8));
ret iter_sequence_body(cx, v, et, f, true);
}
case (_) { fail; }
@@ -1541,7 +2038,20 @@ fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
lltydescs = cx.build.Load(lltydescs);
auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
auto llfn = cx.build.Load(llfnptr);
- cx.build.FastCall(llfn, vec(cx.fcx.lltaskptr, lltydescs, llrawptr));
+
+ // FIXME: this adjustment has to do with the ridiculous encoding of
+ // glue-pointer-constants in the tydesc records: They are tydesc-relative
+ // displacements. This is purely for compatibility with rustboot and
+ // should go when it is discarded.
+ llfn = vi2p(cx, cx.build.Add(vp2i(cx, llfn),
+ vp2i(cx, tydesc)),
+ val_ty(llfn));
+
+ cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
+ cx.fcx.lltaskptr,
+ C_null(T_ptr(T_nil())),
+ lltydescs,
+ llrawptr));
}
fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
@@ -1552,7 +2062,6 @@ fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
fn incr_all_refcnts(@block_ctxt cx,
ValueRef v,
@ty.t t) -> result {
-
if (!ty.type_is_scalar(t)) {
call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue_off);
}
@@ -1625,7 +2134,7 @@ fn copy_ty(@block_ctxt cx,
ValueRef dst,
ValueRef src,
@ty.t t) -> result {
- if (ty.type_is_scalar(t)) {
+ if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
ret res(cx, cx.build.Store(src, dst));
} else if (ty.type_is_nil(t)) {
@@ -1744,7 +2253,11 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
case (ast.box) {
auto e_ty = ty.expr_ty(e);
auto e_val = sub.val;
- sub = trans_malloc(sub.bcx, node_ann_type(sub.bcx.fcx.ccx, a));
+ auto box_ty = node_ann_type(sub.bcx.fcx.ccx, a);
+ sub = trans_malloc_boxed(sub.bcx, e_ty);
+ find_scope_cx(cx).cleanups +=
+ clean(bind drop_ty(_, sub.val, box_ty));
+
auto box = sub.val;
auto rc = sub.bcx.build.GEP(box,
vec(C_int(0),
@@ -1753,6 +2266,15 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
sub.bcx.build.Store(C_int(1), rc);
+
+ // Cast the body type to the type of the value. This is needed to
+ // make tags work, since tags have a different LLVM type depending
+ // on whether they're boxed or not.
+ if (!ty.type_has_dynamic_size(e_ty)) {
+ auto llety = T_ptr(type_of(sub.bcx.fcx.ccx, e_ty));
+ body = sub.bcx.build.PointerCast(body, llety);
+ }
+
sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
ret res(sub.bcx, box);
}
@@ -1767,41 +2289,241 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
}
ret res(sub.bcx, val);
}
+ case (ast._mutable) {
+ ret trans_expr(cx, e);
+ }
}
fail;
}
-fn trans_eager_binop(@block_ctxt cx, ast.binop op,
- ValueRef lhs, ValueRef rhs) -> ValueRef {
+fn trans_compare(@block_ctxt cx, ast.binop op, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ if (ty.type_is_scalar(t)) {
+ ret res(cx, trans_scalar_compare(cx, op, t, lhs, rhs));
+
+ } else if (ty.type_is_structural(t)) {
+ auto scx = new_sub_block_ctxt(cx, "structural compare start");
+ auto next = new_sub_block_ctxt(cx, "structural compare end");
+ cx.build.Br(scx.llbb);
+
+ /*
+ * We're doing lexicographic comparison here. We start with the
+ * assumption that the two input elements are equal. Depending on
+ * operator, this means that the result is either true or false;
+ * equality produces 'true' for ==, <= and >=. It produces 'false' for
+ * !=, < and >.
+ *
+ * We then move one element at a time through the structure checking
+ * for pairwise element equality. If we have equality, our assumption
+ * about overall sequence equality is not modified, so we have to move
+ * to the next element.
+ *
+ * If we do not have pairwise element equality, we have reached an
+ * element that 'decides' the lexicographic comparison. So we exit the
+ * loop with a flag that indicates the true/false sense of that
+ * decision, by testing the element again with the operator we're
+ * interested in.
+ *
+ * When we're lucky, LLVM should be able to fold some of these two
+ * tests together (as they're applied to the same operands and in some
+ * cases are sometimes redundant). But we don't bother trying to
+ * optimize combinations like that, at this level.
+ */
+
+ auto flag = scx.build.Alloca(T_i1());
+
+ alt (op) {
+ // ==, <= and >= default to true if they find == all the way.
+ case (ast.eq) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (ast.le) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (ast.ge) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (_) {
+ // ==, <= and >= default to false if they find == all the way.
+ scx.build.Store(C_integral(0, T_i1()), flag);
+ }
+ }
+
+ fn inner(@block_ctxt last_cx,
+ ValueRef flag,
+ ast.binop op,
+ @block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t) -> result {
+
+ auto cnt_cx = new_sub_block_ctxt(cx, "continue comparison");
+ auto stop_cx = new_sub_block_ctxt(cx, "stop comparison");
+
+ // First 'eq' comparison: if so, continue to next elts.
+ auto eq_r = trans_compare(cx, ast.eq, t, av, bv);
+ eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
+
+ // Second 'op' comparison: find out how this elt-pair decides.
+ auto stop_r = trans_compare(stop_cx, op, t, av, bv);
+ stop_r.bcx.build.Store(stop_r.val, flag);
+ stop_r.bcx.build.Br(last_cx.llbb);
+ ret res(cnt_cx, C_nil());
+ }
+
+ auto r = iter_structural_ty_full(scx, lhs, rhs, t,
+ bind inner(next, flag, op,
+ _, _, _, _));
+
+ r.bcx.build.Br(next.llbb);
+ auto v = next.build.Load(flag);
+ ret res(next, v);
+
+ } else {
+ // FIXME: compare vec, str, box?
+ cx.fcx.ccx.sess.unimpl("type in trans_compare");
+ ret res(cx, C_bool(false));
+ }
+}
+
+fn trans_scalar_compare(@block_ctxt cx, ast.binop op, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ if (ty.type_is_fp(t)) {
+ ret trans_fp_compare(cx, op, t, lhs, rhs);
+ } else {
+ ret trans_integral_compare(cx, op, t, lhs, rhs);
+ }
+}
+
+fn trans_fp_compare(@block_ctxt cx, ast.binop op, @ty.t fptype,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ auto cmp = lib.llvm.LLVMIntEQ;
alt (op) {
- case (ast.add) { ret cx.build.Add(lhs, rhs); }
- case (ast.sub) { ret cx.build.Sub(lhs, rhs); }
-
- // FIXME (issue #57): switch by signedness.
- case (ast.mul) { ret cx.build.Mul(lhs, rhs); }
- case (ast.div) { ret cx.build.SDiv(lhs, rhs); }
- case (ast.rem) { ret cx.build.SRem(lhs, rhs); }
-
- case (ast.bitor) { ret cx.build.Or(lhs, rhs); }
- case (ast.bitand) { ret cx.build.And(lhs, rhs); }
- case (ast.bitxor) { ret cx.build.Xor(lhs, rhs); }
- case (ast.lsl) { ret cx.build.Shl(lhs, rhs); }
- case (ast.lsr) { ret cx.build.LShr(lhs, rhs); }
- case (ast.asr) { ret cx.build.AShr(lhs, rhs); }
- case (_) {
- auto cmp = lib.llvm.LLVMIntEQ;
- alt (op) {
- case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
- case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }
-
- // FIXME (issue #57): switch by signedness.
- case (ast.lt) { cmp = lib.llvm.LLVMIntSLT; }
- case (ast.le) { cmp = lib.llvm.LLVMIntSLE; }
- case (ast.ge) { cmp = lib.llvm.LLVMIntSGE; }
- case (ast.gt) { cmp = lib.llvm.LLVMIntSGT; }
+ // FIXME: possibly use the unordered-or-< predicates here,
+ // for now we're only going with ordered-and-< style (no NaNs).
+ case (ast.eq) { cmp = lib.llvm.LLVMRealOEQ; }
+ case (ast.ne) { cmp = lib.llvm.LLVMRealONE; }
+ case (ast.lt) { cmp = lib.llvm.LLVMRealOLT; }
+ case (ast.gt) { cmp = lib.llvm.LLVMRealOGT; }
+ case (ast.le) { cmp = lib.llvm.LLVMRealOLE; }
+ case (ast.ge) { cmp = lib.llvm.LLVMRealOGE; }
+ }
+
+ ret cx.build.FCmp(cmp, lhs, rhs);
+}
+
+fn trans_integral_compare(@block_ctxt cx, ast.binop op, @ty.t intype,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ auto cmp = lib.llvm.LLVMIntEQ;
+ alt (op) {
+ case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
+ case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }
+
+ case (ast.lt) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSLT;
+ } else {
+ cmp = lib.llvm.LLVMIntULT;
+ }
+ }
+ case (ast.le) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSLE;
+ } else {
+ cmp = lib.llvm.LLVMIntULE;
+ }
+ }
+ case (ast.gt) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSGT;
+ } else {
+ cmp = lib.llvm.LLVMIntUGT;
+ }
+ }
+ case (ast.ge) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSGE;
+ } else {
+ cmp = lib.llvm.LLVMIntUGE;
}
- ret cx.build.ICmp(cmp, lhs, rhs);
+ }
+ }
+ ret cx.build.ICmp(cmp, lhs, rhs);
+}
+
+fn trans_vec_append(@block_ctxt cx, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ auto elt_ty = ty.sequence_element_type(t);
+
+ auto skip_null = C_bool(false);
+ alt (t.struct) {
+ case (ty.ty_str) { skip_null = C_bool(true); }
+ case (_) { }
+ }
+
+ auto bcx = cx;
+
+ auto llvec_tydesc = get_tydesc(bcx, t);
+ bcx = llvec_tydesc.bcx;
+
+ auto llelt_tydesc = get_tydesc(bcx, elt_ty);
+ bcx = llelt_tydesc.bcx;
+
+ auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
+ auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());
+
+ ret res(bcx, bcx.build.FastCall(cx.fcx.ccx.glues.vec_append_glue,
+ vec(cx.fcx.lltaskptr,
+ llvec_tydesc.val,
+ llelt_tydesc.val,
+ dst, src, skip_null)));
+}
+
+fn trans_vec_add(@block_ctxt cx, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+ auto r = alloc_ty(cx, t);
+ auto tmp = r.val;
+ r = copy_ty(r.bcx, INIT, tmp, lhs, t);
+ auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
+ tmp = load_scalar_or_boxed(bcx, tmp, t);
+ find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tmp, t));
+ ret res(bcx, tmp);
+}
+
+
+fn trans_eager_binop(@block_ctxt cx, ast.binop op, @ty.t intype,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ alt (op) {
+ case (ast.add) {
+ if (ty.type_is_sequence(intype)) {
+ ret trans_vec_add(cx, intype, lhs, rhs);
+ }
+ ret res(cx, cx.build.Add(lhs, rhs));
+ }
+ case (ast.sub) { ret res(cx, cx.build.Sub(lhs, rhs)); }
+
+ case (ast.mul) { ret res(cx, cx.build.Mul(lhs, rhs)); }
+ case (ast.div) {
+ if (ty.type_is_signed(intype)) {
+ ret res(cx, cx.build.SDiv(lhs, rhs));
+ } else {
+ ret res(cx, cx.build.UDiv(lhs, rhs));
+ }
+ }
+ case (ast.rem) {
+ if (ty.type_is_signed(intype)) {
+ ret res(cx, cx.build.SRem(lhs, rhs));
+ } else {
+ ret res(cx, cx.build.URem(lhs, rhs));
+ }
+ }
+
+ case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
+ case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
+ case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
+ case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
+ case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
+ case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
+ case (_) {
+ ret trans_compare(cx, op, intype, lhs, rhs);
}
}
fail;
@@ -1827,6 +2549,21 @@ fn autoderef(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
}
}
+fn autoderefed_ty(@ty.t t) -> @ty.t {
+ let @ty.t t1 = t;
+
+ while (true) {
+ alt (t1.struct) {
+ case (ty.ty_box(?inner)) {
+ t1 = inner;
+ }
+ case (_) {
+ ret t1;
+ }
+ }
+ }
+}
+
fn trans_binary(@block_ctxt cx, ast.binop op,
@ast.expr a, @ast.expr b) -> result {
@@ -1876,11 +2613,14 @@ fn trans_binary(@block_ctxt cx, ast.binop op,
case (_) {
// Remaining cases are eager:
auto lhs = trans_expr(cx, a);
- lhs = autoderef(lhs.bcx, lhs.val, ty.expr_ty(a));
+ auto lhty = ty.expr_ty(a);
+ lhs = autoderef(lhs.bcx, lhs.val, lhty);
auto rhs = trans_expr(lhs.bcx, b);
- rhs = autoderef(rhs.bcx, rhs.val, ty.expr_ty(b));
- ret res(rhs.bcx, trans_eager_binop(rhs.bcx, op,
- lhs.val, rhs.val));
+ auto rhty = ty.expr_ty(b);
+ rhs = autoderef(rhs.bcx, rhs.val, rhty);
+ ret trans_eager_binop(rhs.bcx, op,
+ autoderefed_ty(lhty),
+ lhs.val, rhs.val);
}
}
fail;
@@ -1983,6 +2723,7 @@ fn trans_for(@block_ctxt cx,
cx.build.Br(scope_cx.llbb);
auto local_res = alloc_local(scope_cx, local);
auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
+ scope_cx.cleanups += clean(bind drop_slot(_, local_res.val, t));
bcx = trans_block(bcx, body).bcx;
bcx.build.Br(next_cx.llbb);
ret res(next_cx, C_nil());
@@ -2002,6 +2743,103 @@ fn trans_for(@block_ctxt cx,
bind inner(_, local, _, _, body));
}
+fn trans_for_each(@block_ctxt cx,
+ @ast.decl decl,
+ @ast.expr seq,
+ &ast.block body) -> result {
+
+ /*
+ * The translation is a little .. complex here. Code like:
+ *
+ * let ty1 p = ...;
+ *
+ * let ty1 q = ...;
+ *
+ * foreach (ty v in foo(a,b)) { body(p,q,v) }
+ *
+ *
+ * Turns into a something like so (C/Rust mishmash):
+ *
+ * type env = { *ty1 p, *ty2 q, ... };
+ *
+ * let env e = { &p, &q, ... };
+ *
+ * fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
+ *
+ * foo([foreach123_body, env*], a, b);
+ *
+ */
+
+ // Step 1: walk body and figure out which references it makes
+ // escape. This could be determined upstream, and probably ought
+ // to be so, eventualy. For first cut, skip this. Null env.
+
+ auto env_ty = T_opaque_closure_ptr(cx.fcx.ccx.tn);
+
+
+ // Step 2: Declare foreach body function.
+
+ // FIXME: possibly support alias-mode here?
+ auto decl_ty = plain_ty(ty.ty_nil);
+ alt (decl.node) {
+ case (ast.decl_local(?local)) {
+ decl_ty = node_ann_type(cx.fcx.ccx, local.ann);
+ }
+ }
+
+ let str s =
+ cx.fcx.ccx.names.next("_rust_foreach")
+ + sep() + cx.fcx.ccx.path;
+
+ // The 'env' arg entering the body function is a fake env member (as in
+ // the env-part of the normal rust calling convention) that actually
+ // points to a stack allocated env in this frame. We bundle that env
+ // pointer along with the foreach-body-fn pointer into a 'normal' fn pair
+ // and pass it in as a first class fn-arg to the iterator.
+
+ auto iter_body_llty = type_of_fn_full(cx.fcx.ccx, ast.proto_fn,
+ none[TypeRef],
+ vec(rec(mode=ast.val, ty=decl_ty)),
+ plain_ty(ty.ty_nil));
+
+ let ValueRef lliterbody = decl_fastcall_fn(cx.fcx.ccx.llmod,
+ s, iter_body_llty);
+
+ // FIXME: handle ty params properly.
+ let vec[ast.ty_param] ty_params = vec();
+
+ auto fcx = new_fn_ctxt(cx.fcx.ccx, lliterbody);
+ auto bcx = new_top_block_ctxt(fcx);
+
+ // FIXME: populate lllocals from llenv here.
+ auto res = trans_block(bcx, body);
+ res.bcx.build.RetVoid();
+
+
+ // Step 3: Call iter passing [lliterbody, llenv], plus other args.
+
+ alt (seq.node) {
+
+ case (ast.expr_call(?f, ?args, ?ann)) {
+
+ auto pair = cx.build.Alloca(T_fn_pair(cx.fcx.ccx.tn,
+ iter_body_llty));
+ auto code_cell = cx.build.GEP(pair,
+ vec(C_int(0),
+ C_int(abi.fn_field_code)));
+ cx.build.Store(lliterbody, code_cell);
+
+ // log "lliterbody: " + val_str(cx.fcx.ccx.tn, lliterbody);
+ ret trans_call(cx, f,
+ some[ValueRef](cx.build.Load(pair)),
+ args,
+ ann);
+ }
+ }
+ fail;
+}
+
+
fn trans_while(@block_ctxt cx, @ast.expr cond,
&ast.block body) -> result {
@@ -2061,17 +2899,29 @@ fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
+
+ case (ast.pat_lit(?lt, ?ann)) {
+ auto lllit = trans_lit(cx.fcx.ccx, *lt, ann);
+ auto lltype = ty.ann_to_type(ann);
+ auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
+
+ auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
+ lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
+ ret res(matched_cx, llval);
+ }
+
case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
auto lltagptr = cx.build.GEP(llval, vec(C_int(0), C_int(0)));
auto lltag = cx.build.Load(lltagptr);
auto vdef = option.get[ast.variant_def](vdef_opt);
auto variant_id = vdef._1;
- auto tinfo = cx.fcx.ccx.tags.get(vdef._0);
auto variant_tag = 0;
+
+ auto variants = tag_variants(cx.fcx.ccx, vdef._0);
auto i = 0;
- for (tup(ast.def_id,arity) vinfo in tinfo.variants) {
- auto this_variant_id = vinfo._0;
+ for (ast.variant v in variants) {
+ auto this_variant_id = v.id;
if (variant_id._0 == this_variant_id._0 &&
variant_id._1 == this_variant_id._1) {
variant_tag = i;
@@ -2113,6 +2963,7 @@ fn trans_pat_binding(@block_ctxt cx, @ast.pat pat, ValueRef llval)
-> result {
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
+ case (ast.pat_lit(_, _)) { ret res(cx, llval); }
case (ast.pat_bind(?id, ?def_id, ?ann)) {
auto ty = node_ann_type(cx.fcx.ccx, ann);
auto llty = type_of(cx.fcx.ccx, ty);
@@ -2204,6 +3055,34 @@ fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
llobj=none[ValueRef]);
}
+fn lval_generic_fn(@block_ctxt cx,
+ ty.ty_params_and_ty tpt,
+ ast.def_id fn_id,
+ &ast.ann ann)
+ -> lval_result {
+
+ check (cx.fcx.ccx.fn_pairs.contains_key(fn_id));
+ auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(fn_id));
+ auto monoty = node_ann_type(cx.fcx.ccx, ann);
+ auto tys = ty.resolve_ty_params(tpt, monoty);
+
+ if (_vec.len[@ty.t](tys) != 0u) {
+ auto bcx = cx;
+ let vec[ValueRef] tydescs = vec();
+ for (@ty.t t in tys) {
+ auto td = get_tydesc(bcx, t);
+ bcx = td.bcx;
+ append[ValueRef](tydescs, td.val);
+ }
+ auto gen = rec( item_type = tpt._1,
+ tydescs = tydescs );
+ lv = rec(res = res(bcx, lv.res.val),
+ generic = some[generic_info](gen)
+ with lv);
+ }
+ ret lv;
+}
+
fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
&ast.ann ann) -> lval_result {
alt (dopt) {
@@ -2226,49 +3105,60 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
ret lval_mem(cx, cx.fcx.llobjfields.get(did));
}
case (ast.def_fn(?did)) {
- check (cx.fcx.ccx.fn_pairs.contains_key(did));
- check (cx.fcx.ccx.item_ids.contains_key(did));
-
+ check (cx.fcx.ccx.items.contains_key(did));
auto fn_item = cx.fcx.ccx.items.get(did);
- auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(did));
- auto monoty = node_ann_type(cx.fcx.ccx, ann);
- auto tys = ty.resolve_ty_params(fn_item, monoty);
-
- if (_vec.len[@ty.t](tys) != 0u) {
- auto bcx = cx;
- let vec[ValueRef] tydescs = vec();
- for (@ty.t t in tys) {
- auto td = get_tydesc(bcx, t);
- bcx = td.bcx;
- append[ValueRef](tydescs, td.val);
- }
- auto gen = rec( item_type = ty.item_ty(fn_item)._1,
- tydescs = tydescs );
- lv = rec(res = res(bcx, lv.res.val),
- generic = some[generic_info](gen)
- with lv);
- }
-
- ret lv;
+ ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
}
case (ast.def_obj(?did)) {
- check (cx.fcx.ccx.fn_pairs.contains_key(did));
- ret lval_val(cx, cx.fcx.ccx.fn_pairs.get(did));
+ check (cx.fcx.ccx.items.contains_key(did));
+ auto fn_item = cx.fcx.ccx.items.get(did);
+ ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
}
case (ast.def_variant(?tid, ?vid)) {
- check (cx.fcx.ccx.tags.contains_key(tid));
if (cx.fcx.ccx.fn_pairs.contains_key(vid)) {
- ret lval_val(cx, cx.fcx.ccx.fn_pairs.get(vid));
+ check (cx.fcx.ccx.items.contains_key(tid));
+ auto tag_item = cx.fcx.ccx.items.get(tid);
+ auto params = ty.item_ty(tag_item)._0;
+ auto fty = plain_ty(ty.ty_nil);
+ alt (tag_item.node) {
+ case (ast.item_tag(_, ?variants, _, _)) {
+ for (ast.variant v in variants) {
+ if (v.id == vid) {
+ fty = node_ann_type(cx.fcx.ccx,
+ v.ann);
+ }
+ }
+ }
+ }
+ ret lval_generic_fn(cx, tup(params, fty), vid, ann);
} else {
- // Nullary variants are just scalar constants.
- check (cx.fcx.ccx.item_ids.contains_key(vid));
- ret lval_val(cx, cx.fcx.ccx.item_ids.get(vid));
+ // Nullary variant.
+ auto tag_ty = node_ann_type(cx.fcx.ccx, ann);
+ auto lldiscrim_gv = cx.fcx.ccx.discrims.get(vid);
+ auto lldiscrim = cx.build.Load(lldiscrim_gv);
+
+ auto alloc_result = alloc_ty(cx, tag_ty);
+ auto lltagblob = alloc_result.val;
+ auto lltagptr = alloc_result.bcx.build.PointerCast(
+ lltagblob, T_ptr(type_of(cx.fcx.ccx, tag_ty)));
+
+ auto lldiscrimptr = alloc_result.bcx.build.GEP(
+ lltagptr, vec(C_int(0), C_int(0)));
+ alloc_result.bcx.build.Store(lldiscrim, lldiscrimptr);
+
+ ret lval_val(alloc_result.bcx, lltagptr);
}
}
case (ast.def_const(?did)) {
check (cx.fcx.ccx.consts.contains_key(did));
ret lval_mem(cx, cx.fcx.ccx.consts.get(did));
}
+ case (ast.def_native_fn(?did)) {
+ check (cx.fcx.ccx.native_items.contains_key(did));
+ auto fn_item = cx.fcx.ccx.native_items.get(did);
+ ret lval_generic_fn(cx, ty.native_item_ty(fn_item),
+ did, ann);
+ }
case (_) {
cx.fcx.ccx.sess.unimpl("def variant in trans");
}
@@ -2283,11 +3173,10 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
fn trans_field(@block_ctxt cx, &ast.span sp, @ast.expr base,
&ast.ident field, &ast.ann ann) -> lval_result {
- auto lv = trans_lval(cx, base);
- auto r = lv.res;
- r = autoderef(r.bcx, r.val, ty.expr_ty(base));
- check (lv.is_mem);
+ auto r = trans_expr(cx, base);
auto t = ty.expr_ty(base);
+ r = autoderef(r.bcx, r.val, t);
+ t = autoderefed_ty(t);
alt (t.struct) {
case (ty.ty_tup(?fields)) {
let uint ix = ty.field_num(cx.fcx.ccx.sess, sp, field);
@@ -2325,10 +3214,23 @@ fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
auto v = lv.val;
auto bcx = ix.bcx;
+ // Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
+ auto ix_val;
+ auto ix_size = llsize_of_real(cx.fcx.ccx, val_ty(ix.val));
+ auto int_size = llsize_of_real(cx.fcx.ccx, T_int());
+ if (ix_size < int_size) {
+ ix_val = bcx.build.ZExt(ix.val, T_int());
+ } else if (ix_size > int_size) {
+ ix_val = bcx.build.Trunc(ix.val, T_int());
+ } else {
+ ix_val = ix.val;
+ }
+
auto llunit_ty = node_type(cx.fcx.ccx, ann);
auto unit_sz = size_of(bcx, node_ann_type(cx.fcx.ccx, ann));
bcx = unit_sz.bcx;
- auto scaled_ix = bcx.build.Mul(ix.val, unit_sz.val);
+
+ auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
lim = bcx.build.Load(lim);
@@ -2345,7 +3247,7 @@ fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
fail_res.bcx.build.Br(next_cx.llbb);
auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
- auto elt = next_cx.build.GEP(body, vec(C_int(0), ix.val));
+ auto elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
ret lval_mem(next_cx, elt);
}
@@ -2400,125 +3302,24 @@ fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
ret e_res;
}
-
-// NB: this must match type_of_fn_full and create_llargs_for_fn_args.
-fn trans_args(@block_ctxt cx,
- ValueRef llclosure,
- option.t[ValueRef] llobj,
- option.t[generic_info] gen,
- &vec[@ast.expr] es,
- @ty.t fn_ty)
- -> tup(@block_ctxt, vec[ValueRef], option.t[ValueRef]) {
- let vec[ValueRef] vs = vec(cx.fcx.lltaskptr);
- let @block_ctxt bcx = cx;
-
- let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
-
- let option.t[ValueRef] llretslot_opt = none[ValueRef];
-
- alt (gen) {
- case (some[generic_info](?g)) {
- for (ValueRef t in g.tydescs) {
- vs += t;
- }
- args = ty.ty_fn_args(g.item_type);
- if (ty.type_has_dynamic_size(ty.ty_fn_ret(g.item_type))) {
- auto retty = ty.ty_fn_ret(fn_ty);
- auto llretty = type_of(cx.fcx.ccx, retty);
- auto llretslot = cx.build.Alloca(llretty);
- vs += cx.build.PointerCast(llretslot, T_ptr(T_i8()));
- llretslot_opt = some[ValueRef](llretslot);
- }
- }
- case (_) { }
- }
-
- alt (llobj) {
- case (some[ValueRef](?ob)) {
- // Every object is always found in memory,
- // and not-yet-loaded (as part of an lval x.y
- // doted method-call).
- vs += cx.build.Load(ob);
- }
- case (_) {
- vs += llclosure;
- }
- }
-
- auto i = 0u;
- for (@ast.expr e in es) {
- auto mode = args.(i).mode;
-
- auto val;
- if (ty.type_is_structural(ty.expr_ty(e))) {
- auto re = trans_expr(bcx, e);
- val = re.val;
- bcx = re.bcx;
- if (mode == ast.val) {
- // Until here we've been treating structures by pointer;
- // we are now passing it as an arg, so need to load it.
- val = bcx.build.Load(val);
- }
- } else if (mode == ast.alias) {
- let lval_result lv;
- if (ty.is_lval(e)) {
- lv = trans_lval(bcx, e);
- } else {
- auto r = trans_expr(bcx, e);
- lv = lval_val(r.bcx, r.val);
- }
- bcx = lv.res.bcx;
-
- if (lv.is_mem) {
- val = lv.res.val;
- } else {
- // Non-mem but we're trying to alias; synthesize an
- // alloca, spill to it and pass its address.
- auto llty = val_ty(lv.res.val);
- auto llptr = lv.res.bcx.build.Alloca(llty);
- lv.res.bcx.build.Store(lv.res.val, llptr);
- val = llptr;
- }
-
- } else {
- auto re = trans_expr(bcx, e);
- val = re.val;
- bcx = re.bcx;
- }
-
- if (ty.type_has_dynamic_size(args.(i).ty)) {
- val = bcx.build.PointerCast(val, T_typaram_ptr());
- }
-
- vs += val;
- i += 1u;
- }
-
- ret tup(bcx, vs, llretslot_opt);
-}
-
fn trans_bind_thunk(@crate_ctxt cx,
@ty.t incoming_fty,
@ty.t outgoing_fty,
vec[option.t[@ast.expr]] args,
TypeRef llclosure_ty,
- vec[@ty.t] bound_tys) -> ValueRef {
+ vec[@ty.t] bound_tys,
+ uint ty_param_count) -> ValueRef {
// Construct a thunk-call with signature incoming_fty, and that copies
// args forward into a call to outgoing_fty.
- let str s = cx.names.next("_rust_thunk") + "." + cx.path;
+ let str s = cx.names.next("_rust_thunk") + sep() + cx.path;
let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx, incoming_fty));
let ValueRef llthunk = decl_fastcall_fn(cx.llmod, s, llthunk_ty);
- let @ty.t rty = ret_ty_of_fn_ty(incoming_fty);
-
- // FIXME: handle ty params properly.
- let vec[ast.ty_param] ty_params = vec();
-
- auto fcx = new_fn_ctxt(cx, s, llthunk);
+ auto fcx = new_fn_ctxt(cx, llthunk);
auto bcx = new_top_block_ctxt(fcx);
- auto llclosure = bcx.build.PointerCast(fcx.llclosure, llclosure_ty);
+ auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ty);
auto llbody = bcx.build.GEP(llclosure,
vec(C_int(0),
@@ -2536,10 +3337,33 @@ fn trans_bind_thunk(@crate_ctxt cx,
vec(C_int(0),
C_int(abi.fn_field_box)));
lltargetclosure = bcx.build.Load(lltargetclosure);
- let vec[ValueRef] llargs = vec(fcx.lltaskptr,
+
+ auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
+ auto outgoing_arg_tys = ty.ty_fn_args(outgoing_fty);
+
+ auto llretptr = fcx.llretptr;
+ if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
+ llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.tn));
+ }
+
+ let vec[ValueRef] llargs = vec(llretptr,
+ fcx.lltaskptr,
lltargetclosure);
- let uint a = 0u;
+
+ // Copy in the type parameters.
+ let uint i = 0u;
+ while (i < ty_param_count) {
+ auto lltyparam_ptr =
+ bcx.build.GEP(llbody, vec(C_int(0),
+ C_int(abi.closure_elt_ty_params),
+ C_int(i as int)));
+ llargs += vec(bcx.build.Load(lltyparam_ptr));
+ i += 1u;
+ }
+
+ let uint a = 2u + i; // retptr, task ptr, env come first
let int b = 0;
+ let uint outgoing_arg_index = 0u;
for (option.t[@ast.expr] arg in args) {
alt (arg) {
@@ -2556,10 +3380,19 @@ fn trans_bind_thunk(@crate_ctxt cx,
// Arg will be provided when the thunk is invoked.
case (none[@ast.expr]) {
let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
+ if (ty.type_has_dynamic_size(outgoing_arg_tys.
+ (outgoing_arg_index).ty)) {
+ // Cast to a generic typaram pointer in order to make a
+ // type-compatible call.
+ passed_arg = bcx.build.PointerCast(passed_arg,
+ T_typaram_ptr(cx.tn));
+ }
llargs += passed_arg;
a += 1u;
}
}
+
+ outgoing_arg_index += 0u;
}
// FIXME: turn this call + ret into a tail call.
@@ -2567,21 +3400,9 @@ fn trans_bind_thunk(@crate_ctxt cx,
vec(C_int(0),
C_int(abi.fn_field_code)));
lltargetfn = bcx.build.Load(lltargetfn);
- auto r = bcx.build.FastCall(lltargetfn, llargs);
- alt (fcx.llretptr) {
- case (some[ValueRef](?llptr)) {
- bcx.build.Store(bcx.build.Load(r), llptr);
- bcx.build.RetVoid();
- }
- case (none[ValueRef]) {
- if (ty.type_is_nil(rty)) {
- bcx.build.RetVoid();
- } else {
- bcx.build.Ret(r);
- }
- }
- }
+ auto r = bcx.build.FastCall(lltargetfn, llargs);
+ bcx.build.RetVoid();
ret llthunk;
}
@@ -2604,7 +3425,23 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
}
}
}
- if (_vec.len[@ast.expr](bound) == 0u) {
+
+ // Figure out which tydescs we need to pass, if any.
+ let @ty.t outgoing_fty;
+ let vec[ValueRef] lltydescs;
+ alt (f_res.generic) {
+ case (none[generic_info]) {
+ outgoing_fty = ty.expr_ty(f);
+ lltydescs = vec();
+ }
+ case (some[generic_info](?ginfo)) {
+ outgoing_fty = ginfo.item_type;
+ lltydescs = ginfo.tydescs;
+ }
+ }
+ auto ty_param_count = _vec.len[ValueRef](lltydescs);
+
+ if (_vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
// Trivial 'binding': just return the static pair-ptr.
ret f_res.res;
} else {
@@ -2615,22 +3452,32 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
// Translate the bound expressions.
let vec[@ty.t] bound_tys = vec();
let vec[ValueRef] bound_vals = vec();
+ auto i = 0u;
for (@ast.expr e in bound) {
auto arg = trans_expr(bcx, e);
bcx = arg.bcx;
+
append[ValueRef](bound_vals, arg.val);
append[@ty.t](bound_tys, ty.expr_ty(e));
+
+ i += 1u;
}
+ // Get the type of the bound function.
+ let TypeRef lltarget_ty = type_of(bcx.fcx.ccx, outgoing_fty);
+
// Synthesize a closure type.
- let @ty.t bindings_ty = ty.plain_ty(ty.ty_tup(bound_tys));
- let TypeRef lltarget_ty = type_of(bcx.fcx.ccx, ty.expr_ty(f));
+ let @ty.t bindings_ty = plain_ty(ty.ty_tup(bound_tys));
let TypeRef llbindings_ty = type_of(bcx.fcx.ccx, bindings_ty);
- let TypeRef llclosure_ty = T_closure_ptr(lltarget_ty,
- llbindings_ty);
+ let TypeRef llclosure_ty = T_closure_ptr(cx.fcx.ccx.tn,
+ lltarget_ty,
+ llbindings_ty,
+ ty_param_count);
// Malloc a box for the body.
- auto r = trans_malloc_inner(bcx, llclosure_ty);
+ // FIXME: this isn't generic-safe
+ auto r = trans_raw_malloc(bcx, llclosure_ty,
+ llsize_of(llvm.LLVMGetElementType(llclosure_ty)));
auto box = r.val;
bcx = r.bcx;
auto rc = bcx.build.GEP(box,
@@ -2656,19 +3503,40 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_target)));
- bcx.build.Store(bcx.build.Load(f_res.res.val), bound_target);
+ auto src = bcx.build.Load(f_res.res.val);
+ bcx.build.Store(src, bound_target);
// Copy expr values into boxed bindings.
- let int i = 0;
+ i = 0u;
auto bindings =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_bindings)));
for (ValueRef v in bound_vals) {
auto bound = bcx.build.GEP(bindings,
- vec(C_int(0),C_int(i)));
+ vec(C_int(0), C_int(i as int)));
bcx = copy_ty(r.bcx, INIT, bound, v, bound_tys.(i)).bcx;
- i += 1;
+ i += 1u;
+ }
+
+ // If necessary, copy tydescs describing type parameters into the
+ // appropriate slot in the closure.
+ alt (f_res.generic) {
+ case (none[generic_info]) { /* nothing to do */ }
+ case (some[generic_info](?ginfo)) {
+ auto ty_params_slot =
+ bcx.build.GEP(closure,
+ vec(C_int(0),
+ C_int(abi.closure_elt_ty_params)));
+ auto i = 0;
+ for (ValueRef td in ginfo.tydescs) {
+ auto ty_param_slot = bcx.build.GEP(ty_params_slot,
+ vec(C_int(0),
+ C_int(i)));
+ bcx.build.Store(td, ty_param_slot);
+ i += 1;
+ }
+ }
}
// Make thunk and store thunk-ptr in outer pair's code slot.
@@ -2678,8 +3546,9 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
let @ty.t pair_ty = node_ann_type(cx.fcx.ccx, ann);
let ValueRef llthunk =
- trans_bind_thunk(cx.fcx.ccx, pair_ty, ty.expr_ty(f),
- args, llclosure_ty, bound_tys);
+ trans_bind_thunk(cx.fcx.ccx, pair_ty, outgoing_fty,
+ args, llclosure_ty, bound_tys,
+ ty_param_count);
bcx.build.Store(llthunk, pair_code);
@@ -2687,9 +3556,11 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
auto pair_box = bcx.build.GEP(pair_v,
vec(C_int(0),
C_int(abi.fn_field_box)));
- bcx.build.Store(bcx.build.PointerCast(box,
- T_opaque_closure_ptr()),
- pair_box);
+ bcx.build.Store
+ (bcx.build.PointerCast
+ (box,
+ T_opaque_closure_ptr(bcx.fcx.ccx.tn)),
+ pair_box);
find_scope_cx(cx).cleanups +=
clean(bind drop_slot(_, pair_v, pair_ty));
@@ -2699,11 +3570,153 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
}
}
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
+fn trans_args(@block_ctxt cx,
+ ValueRef llenv,
+ option.t[ValueRef] llobj,
+ option.t[generic_info] gen,
+ option.t[ValueRef] lliterbody,
+ &vec[@ast.expr] es,
+ @ty.t fn_ty)
+ -> tup(@block_ctxt, vec[ValueRef], ValueRef) {
+
+ let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
+ let vec[ValueRef] llargs = vec();
+ let vec[ValueRef] lltydescs = vec();
+ let @block_ctxt bcx = cx;
+
+
+ // Arg 0: Output pointer.
+ auto retty = ty.ty_fn_ret(fn_ty);
+ auto llretslot_res = alloc_ty(bcx, retty);
+ bcx = llretslot_res.bcx;
+ auto llretslot = llretslot_res.val;
+
+ alt (gen) {
+ case (some[generic_info](?g)) {
+ lltydescs = g.tydescs;
+ args = ty.ty_fn_args(g.item_type);
+ retty = ty.ty_fn_ret(g.item_type);
+ }
+ case (_) {
+ }
+ }
+ if (ty.type_has_dynamic_size(retty)) {
+ llargs += bcx.build.PointerCast(llretslot,
+ T_typaram_ptr(cx.fcx.ccx.tn));
+ } else if (ty.count_ty_params(retty) != 0u) {
+ // It's possible that the callee has some generic-ness somewhere in
+ // its return value -- say a method signature within an obj or a fn
+ // type deep in a structure -- which the caller has a concrete view
+ // of. If so, cast the caller's view of the restlot to the callee's
+ // view, for the sake of making a type-compatible call.
+ llargs += cx.build.PointerCast(llretslot,
+ T_ptr(type_of(bcx.fcx.ccx, retty)));
+ } else {
+ llargs += llretslot;
+ }
+
+
+ // Arg 1: Task pointer.
+ llargs += bcx.fcx.lltaskptr;
+
+ // Arg 2: Env (closure-bindings / self-obj)
+ alt (llobj) {
+ case (some[ValueRef](?ob)) {
+ // Every object is always found in memory,
+ // and not-yet-loaded (as part of an lval x.y
+ // doted method-call).
+ llargs += bcx.build.Load(ob);
+ }
+ case (_) {
+ llargs += llenv;
+ }
+ }
+
+ // Args >3: ty_params ...
+ llargs += lltydescs;
+
+ // ... then possibly an lliterbody argument.
+ alt (lliterbody) {
+ case (none[ValueRef]) {}
+ case (some[ValueRef](?lli)) {
+ llargs += lli;
+ }
+ }
+
+ // ... then explicit args.
+
+ // First we figure out the caller's view of the types of the arguments.
+ // This will be needed if this is a generic call, because the callee has
+ // to cast her view of the arguments to the caller's view.
+ auto arg_tys = type_of_explicit_args(cx.fcx.ccx, args);
+
+ auto i = 0u;
+ for (@ast.expr e in es) {
+ auto mode = args.(i).mode;
+
+ auto val;
+ if (ty.type_is_structural(ty.expr_ty(e))) {
+ auto re = trans_expr(bcx, e);
+ val = re.val;
+ bcx = re.bcx;
+ if (mode == ast.val) {
+ // Until here we've been treating structures by pointer;
+ // we are now passing it as an arg, so need to load it.
+ val = bcx.build.Load(val);
+ }
+ } else if (mode == ast.alias) {
+ let lval_result lv;
+ if (ty.is_lval(e)) {
+ lv = trans_lval(bcx, e);
+ } else {
+ auto r = trans_expr(bcx, e);
+ lv = lval_val(r.bcx, r.val);
+ }
+ bcx = lv.res.bcx;
+
+ if (lv.is_mem) {
+ val = lv.res.val;
+ } else {
+ // Non-mem but we're trying to alias; synthesize an
+ // alloca, spill to it and pass its address.
+ auto llty = val_ty(lv.res.val);
+ auto llptr = lv.res.bcx.build.Alloca(llty);
+ lv.res.bcx.build.Store(lv.res.val, llptr);
+ val = llptr;
+ }
+
+ } else {
+ auto re = trans_expr(bcx, e);
+ val = re.val;
+ bcx = re.bcx;
+ }
+
+ if (ty.count_ty_params(args.(i).ty) > 0u) {
+ auto lldestty = arg_tys.(i);
+ val = bcx.build.PointerCast(val, lldestty);
+ }
+
+ llargs += val;
+ i += 1u;
+ }
+
+ ret tup(bcx, llargs, llretslot);
+}
+
fn trans_call(@block_ctxt cx, @ast.expr f,
- vec[@ast.expr] args, &ast.ann ann) -> result {
+ option.t[ValueRef] lliterbody,
+ vec[@ast.expr] args,
+ &ast.ann ann) -> result {
auto f_res = trans_lval(cx, f);
auto faddr = f_res.res.val;
- auto llclosure = C_null(T_opaque_closure_ptr());
+ auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.ccx.tn));
alt (f_res.llobj) {
case (some[ValueRef](_)) {
@@ -2718,71 +3731,67 @@ fn trans_call(@block_ctxt cx, @ast.expr f,
C_int(abi.fn_field_code)));
faddr = bcx.build.Load(faddr);
- llclosure = bcx.build.GEP(pair, vec(C_int(0),
- C_int(abi.fn_field_box)));
- llclosure = bcx.build.Load(llclosure);
+ auto llclosure = bcx.build.GEP(pair,
+ vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ llenv = bcx.build.Load(llclosure);
}
}
auto fn_ty = ty.expr_ty(f);
auto ret_ty = ty.ann_to_type(ann);
auto args_res = trans_args(f_res.res.bcx,
- llclosure, f_res.llobj,
+ llenv, f_res.llobj,
f_res.generic,
+ lliterbody,
args, fn_ty);
auto bcx = args_res._0;
- auto real_retval = bcx.build.FastCall(faddr, args_res._1);
- auto retval = real_retval;
+ auto llargs = args_res._1;
+ auto llretslot = args_res._2;
- if (ty.type_is_nil(ret_ty)) {
- retval = C_nil();
- }
+ /*
+ log "calling: " + val_str(cx.fcx.ccx.tn, faddr);
- // Check for a generic retslot.
- alt (args_res._2) {
+ for (ValueRef arg in llargs) {
+ log "arg: " + val_str(cx.fcx.ccx.tn, arg);
+ }
+ */
- case (some[ValueRef](?llretslot)) {
- retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
- }
+ bcx.build.FastCall(faddr, llargs);
+ auto retval = C_nil();
- case (none[ValueRef]) {
- if (! (ty.type_is_scalar(ret_ty) ||
- ty.type_is_boxed(ret_ty))) {
- // Structured returns come back as first-class values. This is
- // nice for LLVM but wrong for us; we treat structured values
- // by pointer in most of our code here. So spill it to an
- // alloca.
- auto local = bcx.build.Alloca(type_of(cx.fcx.ccx, ret_ty));
- bcx.build.Store(retval, local);
- retval = local;
- }
- }
+ if (!ty.type_is_nil(ret_ty)) {
+ retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
+ // Retval doesn't correspond to anything really tangible in the frame,
+ // but it's a ref all the same, so we put a note here to drop it when
+ // we're done in this scope.
+ find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
}
- // Retval doesn't correspond to anything really tangible in the frame, but
- // it's a ref all the same, so we put a note here to drop it when we're
- // done in this scope.
- find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
-
ret res(bcx, retval);
}
fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
&ast.ann ann) -> result {
- auto t = node_ann_type(cx.fcx.ccx, ann);
- auto llty = type_of(cx.fcx.ccx, t);
- auto tup_val = cx.build.Alloca(llty);
+ auto bcx = cx;
+ auto t = node_ann_type(bcx.fcx.ccx, ann);
+ auto tup_res = alloc_ty(bcx, t);
+ auto tup_val = tup_res.val;
+ bcx = tup_res.bcx;
+
find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tup_val, t));
let int i = 0;
- auto r = res(cx, C_nil());
+
for (ast.elt e in elts) {
- auto t = ty.expr_ty(e.expr);
- auto src_res = trans_expr(r.bcx, e.expr);
- auto dst_elt = r.bcx.build.GEP(tup_val, vec(C_int(0), C_int(i)));
- r = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, t);
+ auto e_ty = ty.expr_ty(e.expr);
+ auto src_res = trans_expr(bcx, e.expr);
+ bcx = src_res.bcx;
+ auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
+ bcx = dst_res.bcx;
+ bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
- ret res(r.bcx, tup_val);
+ ret res(bcx, tup_val);
}
fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
@@ -2807,44 +3816,89 @@ fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
// FIXME: pass tydesc properly.
auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)));
+ bcx = sub.bcx;
auto llty = type_of(bcx.fcx.ccx, t);
- auto vec_val = sub.bcx.build.IntToPtr(sub.val, llty);
+ auto vec_val = vi2p(bcx, sub.val, llty);
find_scope_cx(bcx).cleanups += clean(bind drop_ty(_, vec_val, t));
- auto body = sub.bcx.build.GEP(vec_val, vec(C_int(0),
- C_int(abi.vec_elt_data)));
+ auto body = bcx.build.GEP(vec_val, vec(C_int(0),
+ C_int(abi.vec_elt_data)));
+
+ auto pseudo_tup_ty =
+ plain_ty(ty.ty_tup(_vec.init_elt[@ty.t](unit_ty,
+ _vec.len[@ast.expr](args))));
let int i = 0;
+
for (@ast.expr e in args) {
- auto src_res = trans_expr(sub.bcx, e);
- auto dst_elt = sub.bcx.build.GEP(body, vec(C_int(0), C_int(i)));
- sub = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, unit_ty);
+ auto src_res = trans_expr(bcx, e);
+ bcx = src_res.bcx;
+ auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
+ bcx = dst_res.bcx;
+ bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, unit_ty).bcx;
i += 1;
}
- auto fill = sub.bcx.build.GEP(vec_val,
- vec(C_int(0), C_int(abi.vec_elt_fill)));
- sub.bcx.build.Store(data_sz, fill);
+ auto fill = bcx.build.GEP(vec_val,
+ vec(C_int(0), C_int(abi.vec_elt_fill)));
+ bcx.build.Store(data_sz, fill);
- ret res(sub.bcx, vec_val);
+ ret res(bcx, vec_val);
}
fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
- &ast.ann ann) -> result {
- auto t = node_ann_type(cx.fcx.ccx, ann);
- auto llty = type_of(cx.fcx.ccx, t);
- auto rec_val = cx.build.Alloca(llty);
+ option.t[@ast.expr] base, &ast.ann ann) -> result {
+
+ auto bcx = cx;
+ auto t = node_ann_type(bcx.fcx.ccx, ann);
+ auto llty = type_of(bcx.fcx.ccx, t);
+ auto rec_res = alloc_ty(bcx, t);
+ auto rec_val = rec_res.val;
+ bcx = rec_res.bcx;
+
find_scope_cx(cx).cleanups += clean(bind drop_ty(_, rec_val, t));
let int i = 0;
- auto r = res(cx, C_nil());
- for (ast.field f in fields) {
- auto t = ty.expr_ty(f.expr);
- auto src_res = trans_expr(r.bcx, f.expr);
- auto dst_elt = r.bcx.build.GEP(rec_val, vec(C_int(0), C_int(i)));
- // FIXME: calculate copy init-ness in typestate.
- r = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, t);
+
+ auto base_val = C_nil();
+
+ alt (base) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?bexp)) {
+ auto base_res = trans_expr(bcx, bexp);
+ bcx = base_res.bcx;
+ base_val = base_res.val;
+ }
+ }
+
+ let vec[ty.field] ty_fields = vec();
+ alt (t.struct) {
+ case (ty.ty_rec(?flds)) { ty_fields = flds; }
+ }
+
+ for (ty.field tf in ty_fields) {
+ auto e_ty = tf.ty;
+ auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
+ bcx = dst_res.bcx;
+
+ auto expr_provided = false;
+ auto src_res = res(bcx, C_nil());
+
+ for (ast.field f in fields) {
+ if (_str.eq(f.ident, tf.ident)) {
+ expr_provided = true;
+ src_res = trans_expr(bcx, f.expr);
+ }
+ }
+ if (!expr_provided) {
+ src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
+ src_res = res(src_res.bcx,
+ load_scalar_or_boxed(bcx, src_res.val, e_ty));
+ }
+
+ bcx = src_res.bcx;
+ bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
- ret res(r.bcx, rec_val);
+ ret res(bcx, rec_val);
}
@@ -2871,6 +3925,10 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
ret trans_for(cx, decl, seq, body);
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
+ ret trans_for_each(cx, decl, seq, body);
+ }
+
case (ast.expr_while(?cond, ?body, _)) {
ret trans_while(cx, cond, body);
}
@@ -2911,10 +3969,11 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
auto lhs_val = load_scalar_or_boxed(lhs_res.res.bcx,
lhs_res.res.val, t);
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
- auto v = trans_eager_binop(rhs_res.bcx, op, lhs_val, rhs_res.val);
+ auto v = trans_eager_binop(rhs_res.bcx, op, t,
+ lhs_val, rhs_res.val);
// FIXME: calculate copy init-ness in typestate.
- ret copy_ty(rhs_res.bcx, DROP_EXISTING,
- lhs_res.res.val, v, t);
+ ret copy_ty(v.bcx, DROP_EXISTING,
+ lhs_res.res.val, v.val, t);
}
case (ast.expr_bind(?f, ?args, ?ann)) {
@@ -2922,7 +3981,7 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
}
case (ast.expr_call(?f, ?args, ?ann)) {
- ret trans_call(cx, f, args, ann);
+ ret trans_call(cx, f, none[ValueRef], args, ann);
}
case (ast.expr_cast(?e, _, ?ann)) {
@@ -2937,8 +3996,36 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
ret trans_tup(cx, args, ann);
}
- case (ast.expr_rec(?args, ?ann)) {
- ret trans_rec(cx, args, ann);
+ case (ast.expr_rec(?args, ?base, ?ann)) {
+ ret trans_rec(cx, args, base, ann);
+ }
+
+ case (ast.expr_ext(_, _, _, ?expanded, _)) {
+ ret trans_expr(cx, expanded);
+ }
+
+ case (ast.expr_fail) {
+ ret trans_fail(cx, e.span, "explicit failure");
+ }
+
+ case (ast.expr_log(?a)) {
+ ret trans_log(cx, a);
+ }
+
+ case (ast.expr_check_expr(?a)) {
+ ret trans_check_expr(cx, a);
+ }
+
+ case (ast.expr_ret(?e)) {
+ ret trans_ret(cx, e);
+ }
+
+ case (ast.expr_put(?e)) {
+ ret trans_put(cx, e);
+ }
+
+ case (ast.expr_be(?e)) {
+ ret trans_be(cx, e);
}
// lval cases fall through to trans_lval and then
@@ -2962,7 +4049,7 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
fn load_scalar_or_boxed(@block_ctxt cx,
ValueRef v,
@ty.t t) -> ValueRef {
- if (ty.type_is_scalar(t) || ty.type_is_boxed(t)) {
+ if (ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t)) {
ret cx.build.Load(v);
} else {
ret v;
@@ -2975,7 +4062,7 @@ fn trans_log(@block_ctxt cx, @ast.expr e) -> result {
auto e_ty = ty.expr_ty(e);
alt (e_ty.struct) {
case (ty.ty_str) {
- auto v = sub.bcx.build.PtrToInt(sub.val, T_int());
+ auto v = vp2i(sub.bcx, sub.val);
ret trans_upcall(sub.bcx,
"upcall_log_str",
vec(v));
@@ -3014,6 +4101,38 @@ fn trans_fail(@block_ctxt cx, common.span sp, str fail_str) -> result {
ret trans_upcall(cx, "upcall_fail", args);
}
+fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
+ auto llcallee = C_nil();
+ auto llenv = C_nil();
+
+ alt (cx.fcx.lliterbody) {
+ case (some[ValueRef](?lli)) {
+ auto slot = cx.build.Alloca(val_ty(lli));
+ cx.build.Store(lli, slot);
+
+ llcallee = cx.build.GEP(slot, vec(C_int(0),
+ C_int(abi.fn_field_code)));
+ llcallee = cx.build.Load(llcallee);
+
+ llenv = cx.build.GEP(slot, vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ llenv = cx.build.Load(llenv);
+ }
+ }
+ auto bcx = cx;
+ auto dummy_retslot = bcx.build.Alloca(T_nil());
+ let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
+ alt (e) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?x)) {
+ auto r = trans_expr(bcx, x);
+ llargs += r.val;
+ bcx = r.bcx;
+ }
+ }
+ ret res(bcx, bcx.build.FastCall(llcallee, llargs));
+}
+
fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
auto bcx = cx;
auto val = C_nil();
@@ -3024,18 +4143,7 @@ fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
auto r = trans_expr(cx, x);
bcx = r.bcx;
val = r.val;
-
- // A return is an implicit copy into a newborn anonymous
- // 'return value' in the caller frame.
- bcx = incr_all_refcnts(bcx, val, t).bcx;
-
- if (ty.type_is_structural(t)) {
- // We usually treat structurals by-pointer; in particular,
- // trans_expr will have given us a structure pointer. But in
- // this case we're about to return. LLVM wants a first-class
- // value here (which makes sense; the frame is going away!)
- val = r.bcx.build.Load(val);
- }
+ bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
}
case (_) { /* fall through */ }
}
@@ -3055,38 +4163,18 @@ fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
}
}
- alt (e) {
- case (some[@ast.expr](?ex)) {
- auto t = ty.expr_ty(ex);
-
- if (ty.type_is_nil(t)) {
- bcx.build.RetVoid();
- val = C_nil();
- ret res(bcx, val); // FIXME: early return needed due to
- // typestate bug
- }
-
- alt (cx.fcx.llretptr) {
- case (some[ValueRef](?llptr)) {
- // Generic return via tydesc + retptr.
- bcx = copy_ty(bcx, INIT, llptr, val, t).bcx;
- bcx.build.RetVoid();
- }
- case (none[ValueRef]) {
- val = bcx.build.Ret(val);
- }
- }
- ret res(bcx, val);
- }
- case (_) { /* fall through */ }
- }
-
- // FIXME: until LLVM has a unit type, we are moving around
- // C_nil values rather than their void type.
bcx.build.RetVoid();
ret res(bcx, C_nil());
}
+fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
+ // FIXME: This should be a typestate precondition
+ check (ast.is_call_expr(e));
+ // FIXME: Turn this into a real tail call once
+ // calling convention issues are settled
+ ret trans_ret(cx, some(e));
+}
+
fn init_local(@block_ctxt cx, @ast.local local) -> result {
// Make a note to drop this slot on the way out.
@@ -3121,22 +4209,6 @@ fn init_local(@block_ctxt cx, @ast.local local) -> result {
fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
auto bcx = cx;
alt (s.node) {
- case (ast.stmt_log(?a)) {
- bcx = trans_log(cx, a).bcx;
- }
-
- case (ast.stmt_check_expr(?a)) {
- bcx = trans_check_expr(cx, a).bcx;
- }
-
- case (ast.stmt_fail) {
- bcx = trans_fail(cx, s.span, "explicit failure").bcx;
- }
-
- case (ast.stmt_ret(?e)) {
- bcx = trans_ret(cx, e).bcx;
- }
-
case (ast.stmt_expr(?e)) {
bcx = trans_expr(cx, e).bcx;
}
@@ -3184,7 +4256,13 @@ fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
// Use this when you're at the top block of a function or the like.
fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
- ret new_block_ctxt(fcx, parent_none, SCOPE_BLOCK, "function top level");
+ auto cx = new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
+ "function top level");
+
+ // FIXME: hack to give us some spill room to make up for an LLVM
+ // bug where it destroys its own callee-saves.
+ cx.build.Alloca(T_array(T_int(), 10u));
+ ret cx;
}
// Use this when you're at a curly-brace or similar lexical scope.
@@ -3237,8 +4315,7 @@ iter block_locals(&ast.block b) -> @ast.local {
}
}
-fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
- auto t = node_ann_type(cx.fcx.ccx, local.ann);
+fn alloc_ty(@block_ctxt cx, @ty.t t) -> result {
auto val = C_int(0);
auto bcx = cx;
if (ty.type_has_dynamic_size(t)) {
@@ -3248,10 +4325,16 @@ fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
} else {
val = bcx.build.Alloca(type_of(cx.fcx.ccx, t));
}
- bcx.fcx.lllocals.insert(local.id, val);
ret res(bcx, val);
}
+fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
+ auto t = node_ann_type(cx.fcx.ccx, local.ann);
+ auto r = alloc_ty(cx, t);
+ r.bcx.fcx.lllocals.insert(local.id, r.val);
+ ret r;
+}
+
fn trans_block(@block_ctxt cx, &ast.block b) -> result {
auto bcx = cx;
@@ -3287,12 +4370,19 @@ fn trans_block(@block_ctxt cx, &ast.block b) -> result {
ret res(bcx, r.val);
}
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn new_fn_ctxt(@crate_ctxt cx,
- str name,
ValueRef llfndecl) -> @fn_ctxt {
- let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 0u);
- let ValueRef llclosure = llvm.LLVMGetParam(llfndecl, 1u);
+ let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
+ let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
@@ -3301,9 +4391,10 @@ fn new_fn_ctxt(@crate_ctxt cx,
ret @rec(llfn=llfndecl,
lltaskptr=lltaskptr,
- llclosure=llclosure,
+ llenv=llenv,
+ llretptr=llretptr,
mutable llself=none[ValueRef],
- mutable llretptr=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
llargs=llargs,
llobjfields=llobjfields,
lllocals=lllocals,
@@ -3311,39 +4402,46 @@ fn new_fn_ctxt(@crate_ctxt cx,
ccx=cx);
}
-// NB: this must match trans_args and type_of_fn_full.
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn create_llargs_for_fn_args(&@fn_ctxt cx,
+ ast.proto proto,
option.t[TypeRef] ty_self,
@ty.t ret_ty,
&vec[ast.arg] args,
&vec[ast.ty_param] ty_params) {
- let uint arg_n = 1u;
-
- for (ast.ty_param tp in ty_params) {
- auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
- check (llarg as int != 0);
- cx.lltydescs.insert(tp.id, llarg);
- arg_n += 1u;
- }
-
- if (ty.type_has_dynamic_size(ret_ty)) {
- cx.llretptr = some[ValueRef](llvm.LLVMGetParam(cx.llfn, arg_n));
- arg_n += 1u;
- }
alt (ty_self) {
case (some[TypeRef](_)) {
- auto llself = llvm.LLVMGetParam(cx.llfn, arg_n);
- check (llself as int != 0);
- cx.llself = some[ValueRef](llself);
- arg_n += 1u;
+ cx.llself = some[ValueRef](cx.llenv);
}
case (_) {
- // llclosure, we don't know what it is.
+ }
+ }
+
+ auto arg_n = 3u;
+
+ if (ty_self == none[TypeRef]) {
+ for (ast.ty_param tp in ty_params) {
+ auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
+ check (llarg as int != 0);
+ cx.lltydescs.insert(tp.id, llarg);
arg_n += 1u;
}
}
+ if (proto == ast.proto_iter) {
+ auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
+ check (llarg as int != 0);
+ cx.lliterbody = some[ValueRef](llarg);
+ arg_n += 1u;
+ }
+
for (ast.arg arg in args) {
auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
check (llarg as int != 0);
@@ -3398,7 +4496,7 @@ fn is_terminated(@block_ctxt cx) -> bool {
fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
alt (ty.ann_to_type(ann).struct) {
- case (ty.ty_fn(?arg_tys, _)) {
+ case (ty.ty_fn(_, ?arg_tys, _)) {
ret arg_tys;
}
}
@@ -3407,7 +4505,7 @@ fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
fn ret_ty_of_fn_ty(@ty.t t) -> @ty.t {
alt (t.struct) {
- case (ty.ty_fn(_, ?ret_ty)) {
+ case (ty.ty_fn(_, _, ?ret_ty)) {
ret ret_ty;
}
}
@@ -3419,42 +4517,73 @@ fn ret_ty_of_fn(ast.ann ann) -> @ty.t {
ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
}
-fn create_llobjfields_for_fields(@block_ctxt cx, ValueRef llself) {
+fn populate_fn_ctxt_from_llself(@block_ctxt cx, ValueRef llself) -> result {
+ auto bcx = cx;
- let vec[TypeRef] llfield_tys = vec();
+ let vec[@ty.t] field_tys = vec();
- for (ast.obj_field f in cx.fcx.ccx.obj_fields) {
- llfield_tys += node_type(cx.fcx.ccx, f.ann);
+ for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
+ field_tys += vec(node_ann_type(bcx.fcx.ccx, f.ann));
}
- let TypeRef llfields_ty = T_struct(llfield_tys);
- let TypeRef lltydesc_ty = T_ptr(T_tydesc());
- let TypeRef llobj_body_ty = T_struct(vec(lltydesc_ty,
- llfields_ty));
- let TypeRef llobj_box_ty = T_ptr(T_box(llobj_body_ty));
+ // Synthesize a tuple type for the fields so that GEP_tup_like() can work
+ // its magic.
+ auto fields_tup_ty = ty.plain_ty(ty.ty_tup(field_tys));
+
+ auto n_typarams = _vec.len[ast.ty_param](bcx.fcx.ccx.obj_typarams);
+ let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.ccx.tn, n_typarams);
auto box_cell =
- cx.build.GEP(llself,
- vec(C_int(0),
- C_int(abi.obj_field_box)));
+ bcx.build.GEP(llself,
+ vec(C_int(0),
+ C_int(abi.obj_field_box)));
+
+ auto box_ptr = bcx.build.Load(box_cell);
+
+ box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
+
+ auto obj_typarams = bcx.build.GEP(box_ptr,
+ vec(C_int(0),
+ C_int(abi.box_rc_field_body),
+ C_int(abi.obj_body_elt_typarams)));
+
+ // The object fields immediately follow the type parameters, so we skip
+ // over them to get the pointer.
+ auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
+ llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));
+
+ // If we can (i.e. the type is statically sized), then cast the resulting
+ // fields pointer to the appropriate LLVM type. If not, just leave it as
+ // i8 *.
+ if (!ty.type_has_dynamic_size(fields_tup_ty)) {
+ auto llfields_ty = type_of(bcx.fcx.ccx, fields_tup_ty);
+ obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
+ } else {
+ obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
+ }
- auto box_ptr = cx.build.Load(box_cell);
- box_ptr = cx.build.PointerCast(box_ptr, llobj_box_ty);
+ let int i = 0;
- auto obj_fields = cx.build.GEP(box_ptr,
- vec(C_int(0),
- C_int(abi.box_rc_field_body),
- C_int(abi.obj_body_elt_fields)));
+ for (ast.ty_param p in bcx.fcx.ccx.obj_typarams) {
+ let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
+ vec(C_int(0),
+ C_int(i)));
+ lltyparam = bcx.build.Load(lltyparam);
+ bcx.fcx.lltydescs.insert(p.id, lltyparam);
+ i += 1;
+ }
- let int i = 0;
- for (ast.obj_field f in cx.fcx.ccx.obj_fields) {
- let ValueRef llfield = cx.build.GEP(obj_fields,
- vec(C_int(0),
- C_int(i)));
+ i = 0;
+ for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
+ auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
+ bcx = rslt.bcx;
+ auto llfield = rslt.val;
cx.fcx.llobjfields.insert(f.id, llfield);
i += 1;
}
+
+ ret res(bcx, C_nil());
}
fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
@@ -3464,17 +4593,18 @@ fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
auto llfndecl = cx.item_ids.get(fid);
cx.item_names.insert(cx.path, llfndecl);
- auto fcx = new_fn_ctxt(cx, cx.path, llfndecl);
- create_llargs_for_fn_args(fcx, ty_self, ret_ty_of_fn(ann),
- f.inputs, ty_params);
+ auto fcx = new_fn_ctxt(cx, llfndecl);
+ create_llargs_for_fn_args(fcx, f.proto,
+ ty_self, ret_ty_of_fn(ann),
+ f.decl.inputs, ty_params);
auto bcx = new_top_block_ctxt(fcx);
- copy_args_to_allocas(bcx, ty_self, f.inputs,
+ copy_args_to_allocas(bcx, ty_self, f.decl.inputs,
arg_tys_of_fn(ann));
alt (fcx.llself) {
case (some[ValueRef](?llself)) {
- create_llobjfields_for_fields(bcx, llself);
+ bcx = populate_fn_ctxt_from_llself(bcx, llself).bcx;
}
case (_) {
}
@@ -3504,17 +4634,17 @@ fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
auto llfnty = T_nil();
alt (node_ann_type(cx, m.node.ann).struct) {
- case (ty.ty_fn(?inputs, ?output)) {
- llfnty = type_of_fn_full(cx,
+ case (ty.ty_fn(?proto, ?inputs, ?output)) {
+ llfnty = type_of_fn_full(cx, proto,
some[TypeRef](self_ty),
inputs, output);
}
}
- let @crate_ctxt mcx = @rec(path=cx.path + "." + m.node.ident
+ let @crate_ctxt mcx = @rec(path=cx.path + sep() + m.node.ident
with *cx);
- let str s = cx.names.next("_rust_method") + "." + mcx.path;
+ let str s = cx.names.next("_rust_method") + sep() + mcx.path;
let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfnty);
cx.item_ids.insert(m.node.id, llfn);
@@ -3525,7 +4655,7 @@ fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
auto vtbl = C_struct(methods);
auto gvar = llvm.LLVMAddGlobal(cx.llmod,
val_ty(vtbl),
- _str.buf("_rust_vtbl" + "." + cx.path));
+ _str.buf("_rust_vtbl" + sep() + cx.path));
llvm.LLVMSetInitializer(gvar, vtbl);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
@@ -3548,8 +4678,9 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
id=f.id));
}
- auto fcx = new_fn_ctxt(cx, cx.path, llctor_decl);
- create_llargs_for_fn_args(fcx, none[TypeRef], ret_ty_of_fn(ann),
+ auto fcx = new_fn_ctxt(cx, llctor_decl);
+ create_llargs_for_fn_args(fcx, ast.proto_fn,
+ none[TypeRef], ret_ty_of_fn(ann),
fn_args, ty_params);
auto bcx = new_top_block_ctxt(fcx);
@@ -3558,7 +4689,7 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
auto llself_ty = type_of(cx, ret_ty_of_fn(ann));
- auto pair = bcx.build.Alloca(llself_ty);
+ auto pair = bcx.fcx.llretptr;
auto vtbl = trans_vtbl(cx, llself_ty, ob, ty_params);
auto pair_vtbl = bcx.build.GEP(pair,
vec(C_int(0),
@@ -3568,10 +4699,11 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
C_int(abi.obj_field_box)));
bcx.build.Store(vtbl, pair_vtbl);
- let TypeRef llbox_ty = T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
- T_nil()))));
- if (_vec.len[ty.arg](arg_tys) == 0u) {
- // Store null into pair, if no args.
+ let TypeRef llbox_ty = T_opaque_obj_ptr(cx.tn);
+
+ if (_vec.len[ast.ty_param](ty_params) == 0u &&
+ _vec.len[ty.arg](arg_tys) == 0u) {
+ // Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
// Malloc a box for the body and copy args in.
@@ -3581,55 +4713,76 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
}
// Synthesize an obj body type.
- let @ty.t fields_ty = ty.plain_ty(ty.ty_tup(obj_fields));
- let TypeRef llfields_ty = type_of(bcx.fcx.ccx, fields_ty);
- let TypeRef llobj_body_ty =
- T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
- llfields_ty))));
+ auto tydesc_ty = plain_ty(ty.ty_type);
+ let vec[@ty.t] tps = vec();
+ for (ast.ty_param tp in ty_params) {
+ append[@ty.t](tps, tydesc_ty);
+ }
+
+ let @ty.t typarams_ty = plain_ty(ty.ty_tup(tps));
+ let @ty.t fields_ty = plain_ty(ty.ty_tup(obj_fields));
+ let @ty.t body_ty = plain_ty(ty.ty_tup(vec(tydesc_ty,
+ typarams_ty,
+ fields_ty)));
+ let @ty.t boxed_body_ty = plain_ty(ty.ty_box(body_ty));
// Malloc a box for the body.
- auto r = trans_malloc_inner(bcx, llobj_body_ty);
- bcx = r.bcx;
- auto box = r.val;
- auto rc = bcx.build.GEP(box,
- vec(C_int(0),
- C_int(abi.box_rc_field_refcnt)));
- auto body = bcx.build.GEP(box,
- vec(C_int(0),
- C_int(abi.box_rc_field_body)));
- bcx.build.Store(C_int(1), rc);
+ auto box = trans_malloc_boxed(bcx, body_ty);
+ bcx = box.bcx;
+ auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
+ vec(0, abi.box_rc_field_refcnt));
+ bcx = rc.bcx;
+ auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
+ vec(0, abi.box_rc_field_body));
+ bcx = body.bcx;
+ bcx.build.Store(C_int(1), rc.val);
// Store body tydesc.
auto body_tydesc =
- bcx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_tydesc)));
-
- auto fields_tydesc = get_tydesc(r.bcx, fields_ty);
- bcx = fields_tydesc.bcx;
- bcx.build.Store(fields_tydesc.val, body_tydesc);
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_tydesc));
+ bcx = body_tydesc.bcx;
+
+ auto body_td = get_tydesc(bcx, body_ty);
+ bcx = body_td.bcx;
+ bcx.build.Store(body_td.val, body_tydesc.val);
+
+ // Copy typarams into captured typarams.
+ auto body_typarams =
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_typarams));
+ bcx = body_typarams.bcx;
+ let int i = 0;
+ for (ast.ty_param tp in ty_params) {
+ auto typaram = bcx.fcx.lltydescs.get(tp.id);
+ auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
+ vec(0, i));
+ bcx = capture.bcx;
+ bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
+ i += 1;
+ }
// Copy args into body fields.
auto body_fields =
- bcx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_fields)));
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_fields));
+ bcx = body_fields.bcx;
- let int i = 0;
+ i = 0;
for (ast.obj_field f in ob.fields) {
auto arg = bcx.fcx.llargs.get(f.id);
arg = load_scalar_or_boxed(bcx, arg, arg_tys.(i).ty);
- auto field = bcx.build.GEP(body_fields,
- vec(C_int(0),C_int(i)));
- bcx = copy_ty(bcx, INIT, field, arg, arg_tys.(i).ty).bcx;
+ auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
+ vec(0, i));
+ bcx = field.bcx;
+ bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
i += 1;
}
-
// Store box ptr in outer pair.
- auto p = bcx.build.PointerCast(box, llbox_ty);
+ auto p = bcx.build.PointerCast(box.val, llbox_ty);
bcx.build.Store(p, pair_box);
}
- bcx.build.Ret(bcx.build.Load(pair));
+ bcx.build.RetVoid();
}
fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
@@ -3652,8 +4805,9 @@ fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
check (cx.item_ids.contains_key(variant.id));
let ValueRef llfndecl = cx.item_ids.get(variant.id);
- auto fcx = new_fn_ctxt(cx, cx.path, llfndecl);
- create_llargs_for_fn_args(fcx, none[TypeRef], ret_ty_of_fn(variant.ann),
+ auto fcx = new_fn_ctxt(cx, llfndecl);
+ create_llargs_for_fn_args(fcx, ast.proto_fn,
+ none[TypeRef], ret_ty_of_fn(variant.ann),
fn_args, ty_params);
auto bcx = new_top_block_ctxt(fcx);
@@ -3661,41 +4815,45 @@ fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
auto arg_tys = arg_tys_of_fn(variant.ann);
copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
- auto info = cx.tags.get(tag_id);
+ // Cast the tag to a type we can GEP into.
+ auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
+ T_opaque_tag_ptr(fcx.ccx.tn));
- auto lltagty = T_struct(vec(T_int(), T_array(T_i8(), info.size)));
-
- // FIXME: better name.
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("tag"), lltagty);
-
- auto lltagptr = bcx.build.Alloca(lltagty);
- auto lldiscrimptr = bcx.build.GEP(lltagptr, vec(C_int(0), C_int(0)));
+ auto lldiscrimptr = bcx.build.GEP(lltagptr,
+ vec(C_int(0), C_int(0)));
bcx.build.Store(C_int(index), lldiscrimptr);
- auto llblobptr = bcx.build.GEP(lltagptr, vec(C_int(0), C_int(1)));
-
- // First, generate the union type.
- let vec[TypeRef] llargtys = vec();
- for (ty.arg arg in arg_tys) {
- llargtys += vec(type_of(cx, arg.ty));
- }
-
- auto llunionty = T_struct(llargtys);
- auto llunionptr = bcx.build.TruncOrBitCast(llblobptr, T_ptr(llunionty));
+ auto llblobptr = bcx.build.GEP(lltagptr,
+ vec(C_int(0), C_int(1)));
i = 0u;
for (ast.variant_arg va in variant.args) {
- auto llargval = bcx.build.Load(fcx.llargs.get(va.id));
- auto lldestptr = bcx.build.GEP(llunionptr,
- vec(C_int(0), C_int(i as int)));
+ auto rslt = GEP_tag(bcx, llblobptr, variant, i as int);
+ bcx = rslt.bcx;
+ auto lldestptr = rslt.val;
+
+ // If this argument to this function is a tag, it'll have come in to
+ // this function as an opaque blob due to the way that type_of()
+ // works. So we have to cast to the destination's view of the type.
+ auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
+ val_ty(lldestptr));
+
+ auto arg_ty = arg_tys.(i).ty;
+ auto llargval;
+ if (ty.type_is_structural(arg_ty)) {
+ llargval = llargptr;
+ } else {
+ llargval = bcx.build.Load(llargptr);
+ }
+
+ rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
+ bcx = rslt.bcx;
- bcx.build.Store(llargval, lldestptr);
i += 1u;
}
- auto lltagval = bcx.build.Load(lltagptr);
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
- bcx.build.Ret(lltagval);
+ bcx.build.RetVoid();
}
// FIXME: this should do some structural hash-consing to avoid
@@ -3732,20 +4890,21 @@ fn trans_const(@crate_ctxt cx, @ast.expr e,
fn trans_item(@crate_ctxt cx, &ast.item item) {
alt (item.node) {
case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_fn(sub_cx, f, fid, none[TypeRef], tps, ann);
}
case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name,
+ auto sub_cx = @rec(path=cx.path + sep() + name,
+ obj_typarams=tps,
obj_fields=ob.fields with *cx);
trans_obj(sub_cx, ob, oid, tps, ann);
}
case (ast.item_mod(?name, ?m, _)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_mod(sub_cx, m);
}
case (ast.item_tag(?name, ?variants, ?tps, ?tag_id)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
auto i = 0;
for (ast.variant variant in variants) {
trans_tag_variant(sub_cx, tag_id, variant, i, tps);
@@ -3753,7 +4912,7 @@ fn trans_item(@crate_ctxt cx, &ast.item item) {
}
}
case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_const(sub_cx, expr, cid, ann);
}
case (_) { /* fall through */ }
@@ -3784,15 +4943,21 @@ fn decl_fn_and_pair(@crate_ctxt cx,
auto llfty = get_pair_fn_ty(llpairty);
// Declare the function itself.
- let str s = cx.names.next("_rust_" + kind) + "." + name;
+ let str s = cx.names.next("_rust_" + kind) + sep() + name;
let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfty);
// Declare the global constant pair that points to it.
- let str ps = cx.names.next("_rust_" + kind + "_pair") + "." + name;
+ let str ps = cx.names.next("_rust_" + kind + "_pair") + sep() + name;
+
+ register_fn_pair(cx, ps, llpairty, llfn, id);
+}
+
+fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
+ ast.def_id id) {
let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
_str.buf(ps));
auto pair = C_struct(vec(llfn,
- C_null(T_opaque_closure_ptr())));
+ C_null(T_opaque_closure_ptr(cx.tn))));
llvm.LLVMSetInitializer(gvar, pair);
llvm.LLVMSetGlobalConstant(gvar, True);
@@ -3804,12 +4969,86 @@ fn decl_fn_and_pair(@crate_ctxt cx,
cx.fn_pairs.insert(id, gvar);
}
+fn native_fn_wrapper_type(@crate_ctxt cx, &ast.ann ann) -> TypeRef {
+ auto x = node_ann_type(cx, ann);
+ alt (x.struct) {
+ case (ty.ty_native_fn(?abi, ?args, ?out)) {
+ ret type_of_fn(cx, ast.proto_fn, args, out);
+ }
+ }
+ fail;
+}
+
+fn decl_native_fn_and_pair(@crate_ctxt cx,
+ str name,
+ &ast.ann ann,
+ ast.def_id id) {
+ // Declare the wrapper.
+ auto wrapper_type = native_fn_wrapper_type(cx, ann);
+ let str s = cx.names.next("_rust_wrapper") + sep() + name;
+ let ValueRef wrapper_fn = decl_fastcall_fn(cx.llmod, s, wrapper_type);
+
+ // Declare the global constant pair that points to it.
+ auto wrapper_pair_type = T_fn_pair(cx.tn, wrapper_type);
+ let str ps = cx.names.next("_rust_wrapper_pair") + sep() + name;
+
+ register_fn_pair(cx, ps, wrapper_pair_type, wrapper_fn, id);
+
+ // Declare the function itself.
+ auto llfty = get_pair_fn_ty(node_type(cx, ann));
+ auto function = decl_cdecl_fn(cx.llmod, name, llfty);
+
+ // Build the wrapper.
+ auto fcx = new_fn_ctxt(cx, wrapper_fn);
+ auto bcx = new_top_block_ctxt(fcx);
+ auto fn_type = node_ann_type(cx, ann);
+
+ let vec[ValueRef] call_args = vec();
+ auto abi = ty.ty_fn_abi(fn_type);
+ auto arg_n = 3u;
+ alt (abi) {
+ case (ast.native_abi_rust) {
+ call_args += vec(fcx.lltaskptr);
+ auto num_ty_param = ty.count_ty_params(plain_ty(fn_type.struct));
+ for each (uint i in _uint.range(0u, num_ty_param)) {
+ auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
+ check (llarg as int != 0);
+ call_args += vec(llarg);
+ arg_n += 1u;
+ }
+ }
+ case (ast.native_abi_cdecl) {
+ }
+ }
+ auto args = ty.ty_fn_args(fn_type);
+ for (ty.arg arg in args) {
+ auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
+ check (llarg as int != 0);
+ call_args += vec(llarg);
+ arg_n += 1u;
+ }
+ auto r = bcx.build.Call(function, call_args);
+ bcx.build.Store(r, fcx.llretptr);
+ bcx.build.RetVoid();
+}
+
+fn collect_native_item(&@crate_ctxt cx, @ast.native_item i) -> @crate_ctxt {
+ alt (i.node) {
+ case (ast.native_item_fn(?name, _, _, ?fid, ?ann)) {
+ cx.native_items.insert(fid, i);
+ if (! cx.obj_methods.contains_key(fid)) {
+ decl_native_fn_and_pair(cx, name, ann, fid);
+ }
+ }
+ case (_) { /* fall through */ }
+ }
+ ret cx;
+}
fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
alt (i.node) {
case (ast.item_fn(?name, ?f, _, ?fid, ?ann)) {
- // TODO: type-params
cx.items.insert(fid, i);
if (! cx.obj_methods.contains_key(fid)) {
decl_fn_and_pair(cx, "fn", name, ann, fid);
@@ -3817,7 +5056,6 @@ fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
}
case (ast.item_obj(?name, ?ob, _, ?oid, ?ann)) {
- // TODO: type-params
cx.items.insert(oid, i);
decl_fn_and_pair(cx, "obj_ctor", name, ann, oid);
for (@ast.method m in ob.methods) {
@@ -3833,13 +5071,7 @@ fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
cx.items.insert(mid, i);
}
- case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto vi = new_def_hash[uint]();
- auto navi = new_def_hash[uint]();
- let vec[tup(ast.def_id,arity)] variant_info = vec();
- cx.tags.insert(tag_id, @rec(th=mk_type_handle(),
- mutable variants=variant_info,
- mutable size=0u));
+ case (ast.item_tag(_, ?variants, ?tps, ?tag_id)) {
cx.items.insert(tag_id, i);
}
@@ -3854,7 +5086,8 @@ fn collect_items(@crate_ctxt cx, @ast.crate crate) {
let fold.ast_fold[@crate_ctxt] fld =
fold.new_identity_fold[@crate_ctxt]();
- fld = @rec( update_env_for_item = bind collect_item(_,_)
+ fld = @rec( update_env_for_item = bind collect_item(_,_),
+ update_env_for_native_item = bind collect_native_item(_,_)
with *fld );
fold.fold_crate[@crate_ctxt](cx, fld, crate);
@@ -3890,103 +5123,30 @@ fn collect_tag_ctors(@crate_ctxt cx, @ast.crate crate) {
}
-// The tag type resolution pass, which determines all the LLVM types that
-// correspond to each tag type in the crate.
-
-fn resolve_tag_types_for_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
- alt (i.node) {
- case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto max_align = 0u;
- auto max_size = 0u;
-
- auto info = cx.tags.get(tag_id);
- let vec[tup(ast.def_id,arity)] variant_info = vec();
-
- for (ast.variant variant in variants) {
- auto arity_info;
- if (_vec.len[ast.variant_arg](variant.args) > 0u) {
- auto llvariantty = type_of_variant(cx, variant);
- auto align =
- llvm.LLVMPreferredAlignmentOfType(cx.td.lltd,
- llvariantty);
- auto size =
- llvm.LLVMStoreSizeOfType(cx.td.lltd,
- llvariantty) as uint;
- if (max_align < align) { max_align = align; }
- if (max_size < size) { max_size = size; }
-
- arity_info = n_ary;
- } else {
- arity_info = nullary;
- }
-
- variant_info += vec(tup(variant.id, arity_info));
- }
-
- info.variants = variant_info;
- info.size = max_size;
-
- // FIXME: alignment is wrong here, manually insert padding I
- // guess :(
- auto tag_ty = T_struct(vec(T_int(), T_array(T_i8(), max_size)));
- auto th = cx.tags.get(tag_id).th.llth;
- llvm.LLVMRefineType(llvm.LLVMResolveTypeHandle(th), tag_ty);
- }
- case (_) {
- // fall through
- }
- }
-
- ret cx;
-}
-
-fn resolve_tag_types(@crate_ctxt cx, @ast.crate crate) {
- let fold.ast_fold[@crate_ctxt] fld =
- fold.new_identity_fold[@crate_ctxt]();
-
- fld = @rec( update_env_for_item = bind resolve_tag_types_for_item(_,_)
- with *fld );
-
- fold.fold_crate[@crate_ctxt](cx, fld, crate);
-}
-
// The constant translation pass.
fn trans_constant(&@crate_ctxt cx, @ast.item it) -> @crate_ctxt {
alt (it.node) {
case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto info = cx.tags.get(tag_id);
+ auto i = 0u;
+ auto n_variants = _vec.len[ast.variant](variants);
+ while (i < n_variants) {
+ auto variant = variants.(i);
- auto tag_ty = llvm.LLVMResolveTypeHandle(info.th.llth);
- check (llvm.LLVMCountStructElementTypes(tag_ty) == 2u);
- auto elts = vec(0 as TypeRef, 0 as TypeRef);
- llvm.LLVMGetStructElementTypes(tag_ty, _vec.buf[TypeRef](elts));
- auto union_ty = elts.(1);
+ auto discrim_val = C_int(i as int);
- auto i = 0u;
- while (i < _vec.len[tup(ast.def_id,arity)](info.variants)) {
- auto variant_info = info.variants.(i);
- alt (variant_info._1) {
- case (nullary) {
- // Nullary tags become constants.
- auto union_val = C_zero_byte_arr(info.size as uint);
- auto val = C_struct(vec(C_int(i as int), union_val));
-
- // FIXME: better name
- auto gvar = llvm.LLVMAddGlobal(cx.llmod, val_ty(val),
- _str.buf("tag"));
- llvm.LLVMSetInitializer(gvar, val);
- llvm.LLVMSetGlobalConstant(gvar, True);
- llvm.LLVMSetLinkage(gvar,
- lib.llvm.LLVMPrivateLinkage
- as llvm.Linkage);
- cx.item_ids.insert(variant_info._0, gvar);
- }
- case (n_ary) {
- // N-ary tags are treated as functions and generated
- // later.
- }
- }
+ // FIXME: better name.
+ auto discrim_gvar = llvm.LLVMAddGlobal(cx.llmod, T_int(),
+ _str.buf("tag_discrim"));
+
+ // FIXME: Eventually we do want to export these, but we need
+ // to figure out what name they get first!
+ llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
+ llvm.LLVMSetGlobalConstant(discrim_gvar, True);
+ llvm.LLVMSetLinkage(discrim_gvar, lib.llvm.LLVMPrivateLinkage
+ as llvm.Linkage);
+
+ cx.discrims.insert(variant.id, discrim_gvar);
i += 1u;
}
@@ -4016,21 +5176,36 @@ fn trans_constants(@crate_ctxt cx, @ast.crate crate) {
fold.fold_crate[@crate_ctxt](cx, fld, crate);
}
+
+fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
+ ret cx.build.PtrToInt(v, T_int());
+}
+
+
+fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
+ ret cx.build.IntToPtr(v, t);
+}
+
fn p2i(ValueRef v) -> ValueRef {
ret llvm.LLVMConstPtrToInt(v, T_int());
}
+fn i2p(ValueRef v, TypeRef t) -> ValueRef {
+ ret llvm.LLVMConstIntToPtr(v, t);
+}
+
fn trans_exit_task_glue(@crate_ctxt cx) {
let vec[TypeRef] T_args = vec();
let vec[ValueRef] V_args = vec();
auto llfn = cx.glues.exit_task_glue;
- let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 3u);
auto fcx = @rec(llfn=llfn,
lltaskptr=lltaskptr,
- llclosure=C_null(T_opaque_closure_ptr()),
+ llenv=C_null(T_opaque_closure_ptr(cx.tn)),
+ llretptr=C_null(T_ptr(T_nil())),
mutable llself=none[ValueRef],
- mutable llretptr=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
llargs=new_def_hash[ValueRef](),
llobjfields=new_def_hash[ValueRef](),
lllocals=new_def_hash[ValueRef](),
@@ -4043,9 +5218,9 @@ fn trans_exit_task_glue(@crate_ctxt cx) {
}
fn create_typedefs(@crate_ctxt cx) {
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_crate"), T_crate());
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_task"), T_task());
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_tydesc"), T_tydesc());
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("crate"), T_crate(cx.tn));
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("task"), T_task(cx.tn));
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("tydesc"), T_tydesc(cx.tn));
}
fn create_crate_constant(@crate_ctxt cx) {
@@ -4075,12 +5250,37 @@ fn create_crate_constant(@crate_ctxt cx) {
exit_task_glue_off, // size_t main_exit_task_glue_off
C_null(T_int()), // int n_rust_syms
C_null(T_int()), // int n_c_syms
- C_null(T_int()) // int n_libs
+ C_null(T_int()), // int n_libs
+ C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
));
llvm.LLVMSetInitializer(cx.crate_ptr, crate_val);
}
+fn find_main_fn(@crate_ctxt cx) -> ValueRef {
+ auto e = sep() + "main";
+ let ValueRef v = C_nil();
+ let uint n = 0u;
+ for each (tup(str,ValueRef) i in cx.item_names.items()) {
+ if (_str.ends_with(i._0, e)) {
+ n += 1u;
+ v = i._1;
+ }
+ }
+ alt (n) {
+ case (0u) {
+ cx.sess.err("main fn not found");
+ }
+ case (1u) {
+ ret v;
+ }
+ case (_) {
+ cx.sess.err("multiple main fns found");
+ }
+ }
+ fail;
+}
+
fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
auto T_main_args = vec(T_int(), T_int());
auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int());
@@ -4100,8 +5300,7 @@ fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
auto llargc = llvm.LLVMGetParam(llmain, 0u);
auto llargv = llvm.LLVMGetParam(llmain, 1u);
- check (cx.item_names.contains_key("_rust.main"));
- auto llrust_main = cx.item_names.get("_rust.main");
+ auto llrust_main = find_main_fn(cx);
//
// Emit the moral equivalent of:
@@ -4131,6 +5330,24 @@ fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {
ret intrinsics;
}
+
+fn trace_str(@block_ctxt cx, str s) {
+ trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.ccx, s))));
+}
+
+fn trace_word(@block_ctxt cx, ValueRef v) {
+ trans_upcall(cx, "upcall_trace_word", vec(v));
+}
+
+fn trace_ptr(@block_ctxt cx, ValueRef v) {
+ trace_word(cx, cx.build.PtrToInt(v, T_int()));
+}
+
+fn trap(@block_ctxt bcx) {
+ let vec[ValueRef] v = vec();
+ bcx.build.Call(bcx.fcx.ccx.intrinsics.get("llvm.trap"), v);
+}
+
fn check_module(ModuleRef llmod) {
auto pm = mk_pass_manager();
llvm.LLVMAddVerifierPass(pm.llpm);
@@ -4139,8 +5356,8 @@ fn check_module(ModuleRef llmod) {
// TODO: run the linter here also, once there are llvm-c bindings for it.
}
-fn make_no_op_type_glue(ModuleRef llmod) -> ValueRef {
- auto ty = T_fn(vec(T_taskptr(), T_ptr(T_i8())), T_void());
+fn make_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
+ auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
auto fun = decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
auto bb_name = _str.buf("_rust_no_op_type_glue_bb");
auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
@@ -4234,9 +5451,197 @@ fn make_bzero_glue(ModuleRef llmod) -> ValueRef {
ret fun;
}
-fn make_glues(ModuleRef llmod) -> @glue_fns {
- ret @rec(activate_glue = decl_glue(llmod, abi.activate_glue_name()),
- yield_glue = decl_glue(llmod, abi.yield_glue_name()),
+fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
+ /*
+ * Args to vec_append_glue:
+ *
+ * 0. (Implicit) task ptr
+ *
+ * 1. Pointer to the tydesc of the vec, so that we can tell if it's gc
+ * mem, and have a tydesc to pass to malloc if we're allocating anew.
+ *
+ * 2. Pointer to the tydesc of the vec's stored element type, so that
+ * elements can be copied to a newly alloc'ed vec if one must be
+ * created.
+ *
+ * 3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
+ *
+ * 4. Src vec (i.e. ptr to rust_vec).
+ *
+ * 5. Flag indicating whether to skip trailing null on dst.
+ *
+ */
+
+ auto ty = T_fn(vec(T_taskptr(tn),
+ T_ptr(T_tydesc(tn)),
+ T_ptr(T_tydesc(tn)),
+ T_ptr(T_opaque_vec_ptr()),
+ T_opaque_vec_ptr(), T_bool()),
+ T_void());
+
+ auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
+ ret llfn;
+}
+
+fn trans_vec_append_glue(@crate_ctxt cx) {
+
+ auto llfn = cx.glues.vec_append_glue;
+
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
+ let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
+ let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
+ let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
+ let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
+ let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
+
+ auto fcx = @rec(llfn=llfn,
+ lltaskptr=lltaskptr,
+ llenv=C_null(T_ptr(T_nil())),
+ llretptr=C_null(T_ptr(T_nil())),
+ mutable llself=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
+ llargs=new_def_hash[ValueRef](),
+ llobjfields=new_def_hash[ValueRef](),
+ lllocals=new_def_hash[ValueRef](),
+ lltydescs=new_def_hash[ValueRef](),
+ ccx=cx);
+
+ auto bcx = new_top_block_ctxt(fcx);
+
+ auto lldst_vec = bcx.build.Load(lldst_vec_ptr);
+
+ // First the dst vec needs to grow to accommodate the src vec.
+ // To do this we have to figure out how many bytes to add.
+
+ fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ }
+
+ fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
+ ret bcx.build.Store(fill,
+ bcx.build.GEP(v,
+ vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ }
+
+ fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
+ ValueRef skipnull) -> ValueRef {
+ auto f = bcx.build.Load(bcx.build.GEP(v,
+ vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
+ }
+
+ fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ auto p = bcx.build.GEP(v, vec(C_int(0),
+ C_int(abi.vec_elt_data)));
+ ret bcx.build.PointerCast(p, T_ptr(T_i8()));
+ }
+
+
+ fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ auto len = vec_fill(bcx, v);
+ ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
+ }
+
+ fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
+ ValueRef skipnull) -> ValueRef {
+ auto len = vec_fill_adjusted(bcx, v, skipnull);
+ ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
+ }
+
+
+ auto llcopy_dst_ptr = bcx.build.Alloca(T_int());
+ auto llnew_vec_res =
+ trans_upcall(bcx, "upcall_vec_grow",
+ vec(vp2i(bcx, lldst_vec),
+ vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
+ vp2i(bcx, llcopy_dst_ptr),
+ vp2i(bcx, llvec_tydesc)));
+
+ bcx = llnew_vec_res.bcx;
+ auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
+ T_opaque_vec_ptr());
+
+ put_vec_fill(bcx, llnew_vec, C_int(0));
+
+ auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
+ auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
+
+ auto pp0 = bcx.build.Alloca(T_ptr(T_i8()));
+ bcx.build.Store(vec_p0(bcx, llnew_vec), pp0);
+
+ bcx.build.CondBr(bcx.build.TruncOrBitCast
+ (bcx.build.Load(llcopy_dst_ptr),
+ T_i1()),
+ copy_dst_cx.llbb,
+ copy_src_cx.llbb);
+
+
+ fn copy_elts(@block_ctxt cx,
+ ValueRef elt_tydesc,
+ ValueRef dst,
+ ValueRef src,
+ ValueRef n_bytes) -> result {
+
+ auto src_lim = cx.build.GEP(src, vec(n_bytes));
+
+ auto elt_llsz =
+ cx.build.Load(cx.build.GEP(elt_tydesc,
+ vec(C_int(0),
+ C_int(abi.tydesc_field_size))));
+
+ fn take_one(ValueRef elt_tydesc,
+ @block_ctxt cx, ValueRef v) -> result {
+ call_tydesc_glue_full(cx, v,
+ elt_tydesc,
+ abi.tydesc_field_take_glue_off);
+ ret res(cx, v);
+ }
+
+ auto bcx = iter_sequence_raw(cx, src, src_lim,
+ elt_llsz, bind take_one(elt_tydesc,
+ _, _)).bcx;
+
+ ret call_memcpy(bcx, dst, src, n_bytes);
+ }
+
+ // Copy any dst elements in, omitting null if doing str.
+ auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
+ copy_dst_cx = copy_elts(copy_dst_cx,
+ llelt_tydesc,
+ copy_dst_cx.build.Load(pp0),
+ vec_p0(copy_dst_cx, lldst_vec),
+ n_bytes).bcx;
+
+ put_vec_fill(copy_dst_cx, llnew_vec, n_bytes);
+ copy_dst_cx.build.Store(vec_p1(copy_dst_cx, llnew_vec), pp0);
+ copy_dst_cx.build.Br(copy_src_cx.llbb);
+
+
+ // Copy any src elements in, carrying along null if doing str.
+ n_bytes = vec_fill(copy_src_cx, llsrc_vec);
+ copy_src_cx = copy_elts(copy_src_cx,
+ llelt_tydesc,
+ copy_src_cx.build.Load(pp0),
+ vec_p0(copy_src_cx, llsrc_vec),
+ n_bytes).bcx;
+
+ put_vec_fill(copy_src_cx, llnew_vec,
+ copy_src_cx.build.Add(vec_fill(copy_src_cx,
+ llnew_vec),
+ n_bytes));
+
+ // Write new_vec back through the alias we were given.
+ copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
+ copy_src_cx.build.RetVoid();
+}
+
+
+fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
+ ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
+ yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
/*
* Note: the signature passed to decl_cdecl_fn here looks unusual
* because it is. It corresponds neither to an upcall signature
@@ -4248,14 +5653,19 @@ fn make_glues(ModuleRef llmod) -> @glue_fns {
* this is the signature required to retrieve it.
*/
exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
- T_fn(vec(T_taskptr()), T_void())),
+ T_fn(vec(T_int(),
+ T_int(),
+ T_int(),
+ T_taskptr(tn)),
+ T_void())),
upcall_glues =
- _vec.init_fn[ValueRef](bind decl_upcall(llmod, _),
+ _vec.init_fn[ValueRef](bind decl_upcall_glue(llmod, tn, _),
abi.n_upcall_glues as uint),
- no_op_type_glue = make_no_op_type_glue(llmod),
+ no_op_type_glue = make_no_op_type_glue(llmod, tn),
memcpy_glue = make_memcpy_glue(llmod),
- bzero_glue = make_bzero_glue(llmod));
+ bzero_glue = make_bzero_glue(llmod),
+ vec_append_glue = make_vec_append_glue(llmod, tn));
}
fn trans_crate(session.session sess, @ast.crate crate, str output,
@@ -4267,33 +5677,40 @@ fn trans_crate(session.session sess, @ast.crate crate, str output,
llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
auto td = mk_target_data(x86.get_data_layout());
+ auto tn = mk_type_names();
let ValueRef crate_ptr =
- llvm.LLVMAddGlobal(llmod, T_crate(), _str.buf("rust_crate"));
+ llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));
llvm.LLVMSetModuleInlineAsm(llmod, _str.buf(x86.get_module_asm()));
auto intrinsics = declare_intrinsics(llmod);
- auto glues = make_glues(llmod);
+ auto glues = make_glues(llmod, tn);
auto hasher = ty.hash_ty;
auto eqer = ty.eq_ty;
- auto tydescs = map.mk_hashmap[@ty.t,ValueRef](hasher, eqer);
+ auto tag_sizes = map.mk_hashmap[@ty.t,uint](hasher, eqer);
+ auto tydescs = map.mk_hashmap[@ty.t,@tydesc_info](hasher, eqer);
+ let vec[ast.ty_param] obj_typarams = vec();
let vec[ast.obj_field] obj_fields = vec();
auto cx = @rec(sess = sess,
llmod = llmod,
td = td,
+ tn = tn,
crate_ptr = crate_ptr,
upcalls = new_str_hash[ValueRef](),
intrinsics = intrinsics,
item_names = new_str_hash[ValueRef](),
item_ids = new_def_hash[ValueRef](),
items = new_def_hash[@ast.item](),
- tags = new_def_hash[@tag_info](),
+ native_items = new_def_hash[@ast.native_item](),
+ tag_sizes = tag_sizes,
+ discrims = new_def_hash[ValueRef](),
fn_pairs = new_def_hash[ValueRef](),
consts = new_def_hash[ValueRef](),
obj_methods = new_def_hash[()](),
tydescs = tydescs,
+ obj_typarams = obj_typarams,
obj_fields = obj_fields,
glues = glues,
names = namegen(0),
@@ -4302,12 +5719,12 @@ fn trans_crate(session.session sess, @ast.crate crate, str output,
create_typedefs(cx);
collect_items(cx, crate);
- resolve_tag_types(cx, crate);
collect_tag_ctors(cx, crate);
trans_constants(cx, crate);
trans_mod(cx, crate.node.module);
trans_exit_task_glue(cx);
+ trans_vec_append_glue(cx);
create_crate_constant(cx);
if (!shared) {
trans_main_fn(cx, cx.crate_ptr);
diff --git a/src/comp/middle/ty.rs b/src/comp/middle/ty.rs
index f27595a1..5a595db6 100644
--- a/src/comp/middle/ty.rs
+++ b/src/comp/middle/ty.rs
@@ -19,7 +19,10 @@ import util.common.span;
type arg = rec(ast.mode mode, @t ty);
type field = rec(ast.ident ident, @t ty);
-type method = rec(ast.ident ident, vec[arg] inputs, @t output);
+type method = rec(ast.proto proto,
+ ast.ident ident,
+ vec[arg] inputs,
+ @t output);
// NB: If you change this, you'll probably want to change the corresponding
// AST structure in front/ast.rs as well.
@@ -32,16 +35,19 @@ tag sty {
ty_machine(util.common.ty_mach);
ty_char;
ty_str;
- ty_tag(ast.def_id);
+ ty_tag(ast.def_id, vec[@t]);
ty_box(@t);
ty_vec(@t);
ty_tup(vec[@t]);
ty_rec(vec[field]);
- ty_fn(vec[arg], @t); // TODO: effect
+ ty_fn(ast.proto, vec[arg], @t); // TODO: effect
+ ty_native_fn(ast.native_abi, vec[arg], @t); // TODO: effect
ty_obj(vec[method]);
ty_var(int); // ephemeral type var
ty_local(ast.def_id); // type of a local var
- ty_param(ast.def_id); // fn type param
+ ty_param(ast.def_id); // fn/tag type param
+ ty_type;
+ ty_native;
// TODO: ty_fn_arg(@t), for a possibly-aliased function argument
}
@@ -103,6 +109,7 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
case (ast.ty_str) { s = "str"; }
case (ast.ty_box(?t)) { s = "@" + ast_ty_to_str(t); }
case (ast.ty_vec(?t)) { s = "vec[" + ast_ty_to_str(t) + "]"; }
+ case (ast.ty_type) { s = "type"; }
case (ast.ty_tup(?elts)) {
auto f = ast_ty_to_str;
@@ -118,9 +125,13 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
s += ")";
}
- case (ast.ty_fn(?inputs, ?output)) {
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
auto f = ast_fn_input_to_str;
- s = "fn(";
+ if (proto == ast.proto_fn) {
+ s = "fn(";
+ } else {
+ s = "iter(";
+ }
auto is = _vec.map[rec(ast.mode mode, @ast.ty ty),str](f, inputs);
s += _str.connect(is, ", ");
s += ")";
@@ -138,6 +149,7 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
s = "mutable " + ast_ty_to_str(t);
}
+
case (_) {
fail; // FIXME: typestate bug
}
@@ -157,6 +169,8 @@ fn path_to_str(&ast.path pth) -> str {
ret result;
}
+// FIXME use the pretty-printer for this once it has a concept of an
+// abstract stream
fn ty_to_str(&@t typ) -> str {
fn fn_input_to_str(&rec(ast.mode mode, @t ty) input) -> str {
@@ -170,10 +184,14 @@ fn ty_to_str(&@t typ) -> str {
ret s + ty_to_str(input.ty);
}
- fn fn_to_str(option.t[ast.ident] ident,
+ fn fn_to_str(ast.proto proto,
+ option.t[ast.ident] ident,
vec[arg] inputs, @t output) -> str {
auto f = fn_input_to_str;
auto s = "fn";
+ if (proto == ast.proto_iter) {
+ s = "iter";
+ }
alt (ident) {
case (some[ast.ident](?i)) {
s += " ";
@@ -193,7 +211,8 @@ fn ty_to_str(&@t typ) -> str {
}
fn method_to_str(&method m) -> str {
- ret fn_to_str(some[ast.ident](m.ident), m.inputs, m.output) + ";";
+ ret fn_to_str(m.proto, some[ast.ident](m.ident),
+ m.inputs, m.output) + ";";
}
fn field_to_str(&field f) -> str {
@@ -206,6 +225,7 @@ fn ty_to_str(&@t typ) -> str {
}
alt (typ.struct) {
+ case (ty_native) { s = "native"; }
case (ty_nil) { s = "()"; }
case (ty_bool) { s = "bool"; }
case (ty_int) { s = "int"; }
@@ -215,6 +235,7 @@ fn ty_to_str(&@t typ) -> str {
case (ty_str) { s = "str"; }
case (ty_box(?t)) { s = "@" + ty_to_str(t); }
case (ty_vec(?t)) { s = "vec[" + ty_to_str(t) + "]"; }
+ case (ty_type) { s = "type"; }
case (ty_tup(?elems)) {
auto f = ty_to_str;
@@ -228,13 +249,23 @@ fn ty_to_str(&@t typ) -> str {
s = "rec(" + _str.connect(strs, ",") + ")";
}
- case (ty_tag(_)) {
+ case (ty_tag(?id, ?tps)) {
// The user should never see this if the cname is set properly!
- s = "<tag>";
+ s = "<tag#" + util.common.istr(id._0) + ":" +
+ util.common.istr(id._1) + ">";
+ if (_vec.len[@t](tps) > 0u) {
+ auto f = ty_to_str;
+ auto strs = _vec.map[@t,str](f, tps);
+ s += "[" + _str.connect(strs, ",") + "]";
+ }
+ }
+
+ case (ty_fn(?proto, ?inputs, ?output)) {
+ s = fn_to_str(proto, none[ast.ident], inputs, output);
}
- case (ty_fn(?inputs, ?output)) {
- s = fn_to_str(none[ast.ident], inputs, output);
+ case (ty_native_fn(_, ?inputs, ?output)) {
+ s = fn_to_str(ast.proto_fn, none[ast.ident], inputs, output);
}
case (ty_obj(?meths)) {
@@ -280,13 +311,21 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
case (ty_machine(_)) { ret fld.fold_simple_ty(ty); }
case (ty_char) { ret fld.fold_simple_ty(ty); }
case (ty_str) { ret fld.fold_simple_ty(ty); }
- case (ty_tag(_)) { ret fld.fold_simple_ty(ty); }
+ case (ty_type) { ret fld.fold_simple_ty(ty); }
+ case (ty_native) { ret fld.fold_simple_ty(ty); }
case (ty_box(?subty)) {
ret rewrap(ty, ty_box(fold_ty(fld, subty)));
}
case (ty_vec(?subty)) {
ret rewrap(ty, ty_vec(fold_ty(fld, subty)));
}
+ case (ty_tag(?tid, ?subtys)) {
+ let vec[@t] new_subtys = vec();
+ for (@t subty in subtys) {
+ new_subtys += vec(fold_ty(fld, subty));
+ }
+ ret rewrap(ty, ty_tag(tid, new_subtys));
+ }
case (ty_tup(?subtys)) {
let vec[@t] new_subtys = vec();
for (@t subty in subtys) {
@@ -302,13 +341,21 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
}
ret rewrap(ty, ty_rec(new_fields));
}
- case (ty_fn(?args, ?ret_ty)) {
+ case (ty_fn(?proto, ?args, ?ret_ty)) {
let vec[arg] new_args = vec();
for (arg a in args) {
auto new_ty = fold_ty(fld, a.ty);
new_args += vec(rec(mode=a.mode, ty=new_ty));
}
- ret rewrap(ty, ty_fn(new_args, fold_ty(fld, ret_ty)));
+ ret rewrap(ty, ty_fn(proto, new_args, fold_ty(fld, ret_ty)));
+ }
+ case (ty_native_fn(?abi, ?args, ?ret_ty)) {
+ let vec[arg] new_args = vec();
+ for (arg a in args) {
+ auto new_ty = fold_ty(fld, a.ty);
+ new_args += vec(rec(mode=a.mode, ty=new_ty));
+ }
+ ret rewrap(ty, ty_native_fn(abi, new_args, fold_ty(fld, ret_ty)));
}
case (ty_obj(?methods)) {
let vec[method] new_methods = vec();
@@ -317,7 +364,8 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
for (arg a in m.inputs) {
new_args += vec(rec(mode=a.mode, ty=fold_ty(fld, a.ty)));
}
- new_methods += vec(rec(ident=m.ident, inputs=new_args,
+ new_methods += vec(rec(proto=m.proto, ident=m.ident,
+ inputs=new_args,
output=fold_ty(fld, m.output)));
}
ret rewrap(ty, ty_obj(new_methods));
@@ -327,7 +375,7 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
case (ty_param(_)) { ret fld.fold_simple_ty(ty); }
}
- ret ty;
+ fail;
}
// Type utilities
@@ -349,24 +397,44 @@ fn type_is_nil(@t ty) -> bool {
fail;
}
+
fn type_is_structural(@t ty) -> bool {
alt (ty.struct) {
- case (ty_tup(_)) { ret true; }
- case (ty_rec(_)) { ret true; }
- case (ty_tag(_)) { ret true; }
- case (ty_fn(_,_)) { ret true; }
- case (ty_obj(_)) { ret true; }
- case (_) { ret false; }
+ case (ty_tup(_)) { ret true; }
+ case (ty_rec(_)) { ret true; }
+ case (ty_tag(_,_)) { ret true; }
+ case (ty_fn(_,_,_)) { ret true; }
+ case (ty_obj(_)) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+fn type_is_sequence(@t ty) -> bool {
+ alt (ty.struct) {
+ case (ty_str) { ret true; }
+ case (ty_vec(_)) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+fn sequence_element_type(@t ty) -> @t {
+ alt (ty.struct) {
+ case (ty_str) { ret plain_ty(ty_machine(common.ty_u8)); }
+ case (ty_vec(?e)) { ret e; }
}
fail;
}
+
fn type_is_tup_like(@t ty) -> bool {
alt (ty.struct) {
- case (ty_tup(_)) { ret true; }
- case (ty_rec(_)) { ret true; }
- case (ty_tag(_)) { ret true; }
- case (_) { ret false; }
+ case (ty_box(_)) { ret true; }
+ case (ty_tup(_)) { ret true; }
+ case (ty_rec(_)) { ret true; }
+ case (ty_tag(_,_)) { ret true; }
+ case (_) { ret false; }
}
fail;
}
@@ -402,6 +470,17 @@ fn type_is_scalar(@t ty) -> bool {
case (ty_uint) { ret true; }
case (ty_machine(_)) { ret true; }
case (ty_char) { ret true; }
+ case (ty_type) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+// FIXME: should we just return true for native types in
+// type_is_scalar?
+fn type_is_native(@t ty) -> bool {
+ alt (ty.struct) {
+ case (ty_native) { ret true; }
case (_) { ret false; }
}
fail;
@@ -423,6 +502,13 @@ fn type_has_dynamic_size(@t ty) -> bool {
i += 1u;
}
}
+ case (ty_tag(_, ?subtys)) {
+ auto i = 0u;
+ while (i < _vec.len[@t](subtys)) {
+ if (type_has_dynamic_size(subtys.(i))) { ret true; }
+ i += 1u;
+ }
+ }
case (ty_param(_)) { ret true; }
case (_) { /* fall through */ }
}
@@ -547,23 +633,42 @@ fn count_ty_params(@t ty) -> uint {
// Type accessors for substructures of types
fn ty_fn_args(@t fty) -> vec[arg] {
- alt (fty.struct) {
- case (ty.ty_fn(?a, _)) { ret a; }
- }
+ alt (fty.struct) {
+ case (ty.ty_fn(_, ?a, _)) { ret a; }
+ case (ty.ty_native_fn(_, ?a, _)) { ret a; }
+ }
+ fail;
+}
+
+fn ty_fn_proto(@t fty) -> ast.proto {
+ alt (fty.struct) {
+ case (ty.ty_fn(?p, _, _)) { ret p; }
+ }
+ fail;
+}
+
+fn ty_fn_abi(@t fty) -> ast.native_abi {
+ alt (fty.struct) {
+ case (ty.ty_native_fn(?a, _, _)) { ret a; }
+ }
+ fail;
}
fn ty_fn_ret(@t fty) -> @t {
- alt (fty.struct) {
- case (ty.ty_fn(_, ?r)) { ret r; }
- }
+ alt (fty.struct) {
+ case (ty.ty_fn(_, _, ?r)) { ret r; }
+ case (ty.ty_native_fn(_, _, ?r)) { ret r; }
+ }
+ fail;
}
fn is_fn_ty(@t fty) -> bool {
- alt (fty.struct) {
- case (ty.ty_fn(_, _)) { ret true; }
- case (_) { ret false; }
- }
- ret false;
+ alt (fty.struct) {
+ case (ty.ty_fn(_, _, _)) { ret true; }
+ case (ty.ty_native_fn(_, _, _)) { ret true; }
+ case (_) { ret false; }
+ }
+ ret false;
}
@@ -571,7 +676,24 @@ fn is_fn_ty(@t fty) -> bool {
// Given an item, returns the associated type as well as a list of the IDs of
// its type parameters.
-fn item_ty(@ast.item it) -> tup(vec[ast.def_id], @t) {
+type ty_params_and_ty = tup(vec[ast.def_id], @t);
+fn native_item_ty(@ast.native_item it) -> ty_params_and_ty {
+ auto ty_params;
+ auto result_ty;
+ alt (it.node) {
+ case (ast.native_item_fn(_, _, ?tps, _, ?ann)) {
+ ty_params = tps;
+ result_ty = ann_to_type(ann);
+ }
+ }
+ let vec[ast.def_id] ty_param_ids = vec();
+ for (ast.ty_param tp in ty_params) {
+ ty_param_ids += vec(tp.id);
+ }
+ ret tup(ty_param_ids, result_ty);
+}
+
+fn item_ty(@ast.item it) -> ty_params_and_ty {
let vec[ast.ty_param] ty_params;
auto result_ty;
alt (it.node) {
@@ -591,8 +713,13 @@ fn item_ty(@ast.item it) -> tup(vec[ast.def_id], @t) {
result_ty = ann_to_type(ann);
}
case (ast.item_tag(_, _, ?tps, ?did)) {
+ // Create a new generic polytype.
ty_params = tps;
- result_ty = plain_ty(ty_tag(did));
+ let vec[@t] subtys = vec();
+ for (ast.ty_param tp in tps) {
+ subtys += vec(plain_ty(ty_param(tp.id)));
+ }
+ result_ty = plain_ty(ty_tag(did, subtys));
}
case (ast.item_obj(_, _, ?tps, _, ?ann)) {
ty_params = tps;
@@ -628,6 +755,7 @@ fn block_ty(&ast.block b) -> @t {
fn pat_ty(@ast.pat pat) -> @t {
alt (pat.node) {
case (ast.pat_wild(?ann)) { ret ann_to_type(ann); }
+ case (ast.pat_lit(_, ?ann)) { ret ann_to_type(ann); }
case (ast.pat_bind(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.pat_tag(_, _, _, ?ann)) { ret ann_to_type(ann); }
}
@@ -638,7 +766,7 @@ fn expr_ty(@ast.expr expr) -> @t {
alt (expr.node) {
case (ast.expr_vec(_, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_tup(_, ?ann)) { ret ann_to_type(ann); }
- case (ast.expr_rec(_, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_rec(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_bind(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_call(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_binary(_, _, _, ?ann)) { ret ann_to_type(ann); }
@@ -647,6 +775,8 @@ fn expr_ty(@ast.expr expr) -> @t {
case (ast.expr_cast(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_if(_, _, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_for(_, _, _, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_for_each(_, _, _, ?ann))
+ { ret ann_to_type(ann); }
case (ast.expr_while(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_do_while(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_alt(_, _, ?ann)) { ret ann_to_type(ann); }
@@ -657,6 +787,14 @@ fn expr_ty(@ast.expr expr) -> @t {
case (ast.expr_field(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_index(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_path(_, _, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_ext(_, _, _, _, ?ann)) { ret ann_to_type(ann); }
+
+ case (ast.expr_fail) { ret plain_ty(ty_nil); }
+ case (ast.expr_log(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_check_expr(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_ret(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_put(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_be(_)) { ret plain_ty(ty_nil); }
}
fail;
}
@@ -726,7 +864,10 @@ fn is_lval(@ast.expr expr) -> bool {
}
}
-// Type unification
+// Type unification via Robinson's algorithm (Robinson 1965). Implemented as
+// described in Hoder and Voronkov:
+//
+// http://www.cs.man.ac.uk/~hoderk/ubench/unification_full.pdf
fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
-> unify_result {
@@ -746,81 +887,137 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_mismatch, expected, actual);
}
- fn unify_fn(&hashmap[int,@ty.t] bindings,
- @ty.t expected,
- @ty.t actual,
- &unify_handler handler,
- vec[arg] expected_inputs, @t expected_output,
- vec[arg] actual_inputs, @t actual_output)
- -> unify_result {
- auto expected_len = _vec.len[arg](expected_inputs);
- auto actual_len = _vec.len[arg](actual_inputs);
- if (expected_len != actual_len) {
- ret ures_err(terr_arg_count, expected, actual);
- }
+ tag fn_common_res {
+ fn_common_res_err(unify_result);
+ fn_common_res_ok(vec[arg], @t);
+ }
- // TODO: as above, we should have an iter2 iterator.
- let vec[arg] result_ins = vec();
- auto i = 0u;
- while (i < expected_len) {
- auto expected_input = expected_inputs.(i);
- auto actual_input = actual_inputs.(i);
-
- // This should be safe, I think?
- auto result_mode;
- if (mode_is_alias(expected_input.mode) ||
- mode_is_alias(actual_input.mode)) {
- result_mode = ast.alias;
- } else {
- result_mode = ast.val;
+ fn unify_fn_common(@hashmap[int,@ty.t] bindings,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> fn_common_res {
+ auto expected_len = _vec.len[arg](expected_inputs);
+ auto actual_len = _vec.len[arg](actual_inputs);
+ if (expected_len != actual_len) {
+ ret fn_common_res_err(ures_err(terr_arg_count,
+ expected, actual));
+ }
+
+ // TODO: as above, we should have an iter2 iterator.
+ let vec[arg] result_ins = vec();
+ auto i = 0u;
+ while (i < expected_len) {
+ auto expected_input = expected_inputs.(i);
+ auto actual_input = actual_inputs.(i);
+
+ // This should be safe, I think?
+ auto result_mode;
+ if (mode_is_alias(expected_input.mode) ||
+ mode_is_alias(actual_input.mode)) {
+ result_mode = ast.alias;
+ } else {
+ result_mode = ast.val;
+ }
+
+ auto result = unify_step(bindings,
+ actual_input.ty,
+ expected_input.ty,
+ handler);
+
+ alt (result) {
+ case (ures_ok(?rty)) {
+ result_ins += vec(rec(mode=result_mode,
+ ty=rty));
+ }
+
+ case (_) {
+ ret fn_common_res_err(result);
+ }
+ }
+
+ i += 1u;
}
+ // Check the output.
auto result = unify_step(bindings,
- actual_input.ty,
- expected_input.ty,
+ expected_output,
+ actual_output,
handler);
-
alt (result) {
- case (ures_ok(?rty)) {
- result_ins += vec(rec(mode=result_mode,
- ty=rty));
- }
+ case (ures_ok(?rty)) {
+ ret fn_common_res_ok(result_ins, rty);
+ }
- case (_) {
- ret result;
- }
+ case (_) {
+ ret fn_common_res_err(result);
+ }
}
+ }
- i += 1u;
- }
+ fn unify_fn(@hashmap[int,@ty.t] bindings,
+ ast.proto e_proto,
+ ast.proto a_proto,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> unify_result {
- // Check the output.
- auto result_out;
- auto result = unify_step(bindings,
- expected_output,
- actual_output,
- handler);
- alt (result) {
- case (ures_ok(?rty)) {
- result_out = rty;
+ if (e_proto != a_proto) {
+ ret ures_err(terr_mismatch, expected, actual);
}
-
- case (_) {
- ret result;
+ auto t = unify_fn_common(bindings, expected, actual,
+ handler, expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ alt (t) {
+ case (fn_common_res_err(?r)) {
+ ret r;
+ }
+ case (fn_common_res_ok(?result_ins, ?result_out)) {
+ auto t2 = plain_ty(ty.ty_fn(e_proto, result_ins, result_out));
+ ret ures_ok(t2);
+ }
}
- }
+ }
- auto t = plain_ty(ty.ty_fn(result_ins, result_out));
- ret ures_ok(t);
+ fn unify_native_fn(@hashmap[int,@ty.t] bindings,
+ ast.native_abi e_abi,
+ ast.native_abi a_abi,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> unify_result {
+ if (e_abi != a_abi) {
+ ret ures_err(terr_mismatch, expected, actual);
+ }
+ auto t = unify_fn_common(bindings, expected, actual,
+ handler, expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ alt (t) {
+ case (fn_common_res_err(?r)) {
+ ret r;
+ }
+ case (fn_common_res_ok(?result_ins, ?result_out)) {
+ auto t2 = plain_ty(ty.ty_native_fn(e_abi, result_ins,
+ result_out));
+ ret ures_ok(t2);
+ }
+ }
}
- fn unify_obj(&hashmap[int,@ty.t] bindings,
- @ty.t expected,
- @ty.t actual,
- &unify_handler handler,
- vec[method] expected_meths,
- vec[method] actual_meths) -> unify_result {
+ fn unify_obj(@hashmap[int,@ty.t] bindings,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[method] expected_meths,
+ vec[method] actual_meths) -> unify_result {
let vec[method] result_meths = vec();
let uint i = 0u;
let uint expected_len = _vec.len[method](expected_meths);
@@ -830,32 +1027,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_meth_count, expected, actual);
}
- // FIXME: work around buggy typestate logic for 'alt', sigh.
- fn is_ok(&unify_result r) -> bool {
- alt (r) {
- case (ures_ok(?tfn)) {
- ret true;
- }
- case (_) {}
- }
- ret false;
- }
-
- fn append_if_ok(&method e_meth,
- &unify_result r, &mutable vec[method] result_meths) {
- alt (r) {
- case (ures_ok(?tfn)) {
- alt (tfn.struct) {
- case (ty_fn(?ins, ?out)) {
- result_meths += vec(rec(inputs = ins,
- output = out
- with e_meth));
- }
- }
- }
- }
- }
-
while (i < expected_len) {
auto e_meth = expected_meths.(i);
auto a_meth = actual_meths.(i);
@@ -863,40 +1034,69 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_obj_meths(e_meth.ident, a_meth.ident),
expected, actual);
}
- auto r = unify_fn(bindings, expected, actual, handler,
+ auto r = unify_fn(bindings,
+ e_meth.proto, a_meth.proto,
+ expected, actual, handler,
e_meth.inputs, e_meth.output,
a_meth.inputs, a_meth.output);
- if (!is_ok(r)) {
- ret r;
+ alt (r) {
+ case (ures_ok(?tfn)) {
+ alt (tfn.struct) {
+ case (ty_fn(?proto, ?ins, ?out)) {
+ result_meths += vec(rec(inputs = ins,
+ output = out
+ with e_meth));
+ }
+ }
+ }
+ case (_) {
+ ret r;
+ }
}
- append_if_ok(e_meth, r, result_meths);
i += 1u;
}
auto t = plain_ty(ty_obj(result_meths));
ret ures_ok(t);
}
- fn unify_step(&hashmap[int,@ty.t] bindings, @ty.t expected, @ty.t actual,
- &unify_handler handler) -> unify_result {
+ fn resolve(@hashmap[int,@t] bindings, @t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_var(?id)) {
+ alt (bindings.find(id)) {
+ case (some[@t](?typ2)) {
+ ret resolve(bindings, typ2);
+ }
+ case (none[@t]) {
+ // fall through
+ }
+ }
+ }
+ case (_) {
+ // fall through
+ }
+ }
+ ret typ;
+ }
+
+ fn unify_step(@hashmap[int,@ty.t] bindings, @ty.t in_expected,
+ @ty.t in_actual, &unify_handler handler) -> unify_result {
+
+ // Resolve any bindings.
+ auto expected = resolve(bindings, in_expected);
+ auto actual = resolve(bindings, in_actual);
+
// TODO: rewrite this using tuple pattern matching when available, to
// avoid all this rightward drift and spikiness.
+ // TODO: occurs check, to make sure we don't loop forever when
+ // unifying e.g. 'a and option['a]
+
alt (actual.struct) {
// If the RHS is a variable type, then just do the appropriate
// binding.
case (ty.ty_var(?actual_id)) {
- alt (bindings.find(actual_id)) {
- case (some[@ty.t](?actual_ty)) {
- // FIXME: change the binding here?
- // FIXME: "be"
- ret unify_step(bindings, expected, actual_ty,
- handler);
- }
- case (none[@ty.t]) {
- bindings.insert(actual_id, expected);
- ret ures_ok(expected);
- }
- }
+ bindings.insert(actual_id, expected);
+ ret ures_ok(expected);
}
case (ty.ty_local(?actual_id)) {
auto actual_ty = handler.resolve_local(actual_id);
@@ -938,14 +1138,45 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
case (ty.ty_machine(_)) { ret struct_cmp(expected, actual); }
case (ty.ty_char) { ret struct_cmp(expected, actual); }
case (ty.ty_str) { ret struct_cmp(expected, actual); }
+ case (ty.ty_type) { ret struct_cmp(expected, actual); }
+ case (ty.ty_native) { ret struct_cmp(expected, actual); }
- case (ty.ty_tag(?expected_id)) {
+ case (ty.ty_tag(?expected_id, ?expected_tps)) {
alt (actual.struct) {
- case (ty.ty_tag(?actual_id)) {
- if (expected_id._0 == actual_id._0 &&
- expected_id._1 == actual_id._1) {
- ret ures_ok(expected);
+ case (ty.ty_tag(?actual_id, ?actual_tps)) {
+ if (expected_id._0 != actual_id._0 ||
+ expected_id._1 != actual_id._1) {
+ ret ures_err(terr_mismatch, expected, actual);
}
+
+ // TODO: factor this cruft out, see the TODO in the
+ // ty.ty_tup case
+ let vec[@ty.t] result_tps = vec();
+ auto i = 0u;
+ auto expected_len = _vec.len[@ty.t](expected_tps);
+ while (i < expected_len) {
+ auto expected_tp = expected_tps.(i);
+ auto actual_tp = actual_tps.(i);
+
+ auto result = unify_step(bindings,
+ expected_tp,
+ actual_tp,
+ handler);
+
+ alt (result) {
+ case (ures_ok(?rty)) {
+ append[@ty.t](result_tps, rty);
+ }
+ case (_) {
+ ret result;
+ }
+ }
+
+ i += 1u;
+ }
+
+ ret ures_ok(plain_ty(ty.ty_tag(expected_id,
+ result_tps)));
}
case (_) { /* fall through */ }
}
@@ -970,8 +1201,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -995,8 +1224,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -1045,8 +1272,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_ok(plain_ty(ty.ty_tup(result_elems)));
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -1106,20 +1331,19 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_ok(plain_ty(ty.ty_rec(result_fields)));
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
}
}
- case (ty.ty_fn(?expected_inputs, ?expected_output)) {
+ case (ty.ty_fn(?ep, ?expected_inputs, ?expected_output)) {
alt (actual.struct) {
- case (ty.ty_fn(?actual_inputs, ?actual_output)) {
- ret unify_fn(bindings, expected, actual, handler,
- expected_inputs, expected_output,
- actual_inputs, actual_output);
+ case (ty.ty_fn(?ap, ?actual_inputs, ?actual_output)) {
+ ret unify_fn(bindings, ep, ap,
+ expected, actual, handler,
+ expected_inputs, expected_output,
+ actual_inputs, actual_output);
}
case (_) {
@@ -1128,35 +1352,40 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- case (ty.ty_obj(?expected_meths)) {
- alt (actual.struct) {
- case (ty.ty_obj(?actual_meths)) {
- ret unify_obj(bindings, expected, actual, handler,
- expected_meths, actual_meths);
- }
- case (_) {
- ret ures_err(terr_mismatch, expected, actual);
+ case (ty.ty_native_fn(?e_abi, ?expected_inputs,
+ ?expected_output)) {
+ alt (actual.struct) {
+ case (ty.ty_native_fn(?a_abi, ?actual_inputs,
+ ?actual_output)) {
+ ret unify_native_fn(bindings, e_abi, a_abi,
+ expected, actual, handler,
+ expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ }
+ case (_) {
+ ret ures_err(terr_mismatch, expected, actual);
+ }
}
- }
}
- case (ty.ty_var(?expected_id)) {
- alt (bindings.find(expected_id)) {
- case (some[@ty.t](?expected_ty)) {
- // FIXME: change the binding here?
- // FIXME: "be"
- ret unify_step(bindings,
- expected_ty,
- actual,
- handler);
+ case (ty.ty_obj(?expected_meths)) {
+ alt (actual.struct) {
+ case (ty.ty_obj(?actual_meths)) {
+ ret unify_obj(bindings, expected, actual, handler,
+ expected_meths, actual_meths);
}
- case (none[@ty.t]) {
- bindings.insert(expected_id, actual);
- ret ures_ok(actual);
+ case (_) {
+ ret ures_err(terr_mismatch, expected, actual);
}
}
}
+ case (ty.ty_var(?expected_id)) {
+ // Add a binding.
+ bindings.insert(expected_id, actual);
+ ret ures_ok(actual);
+ }
+
case (ty.ty_local(?expected_id)) {
auto expected_ty = handler.resolve_local(expected_id);
auto result = unify_step(bindings,
@@ -1182,13 +1411,43 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
fail;
}
+ // Performs type binding substitution.
+ fn substitute(@hashmap[int,@t] bindings, @t typ) -> @t {
+ state obj folder(@hashmap[int,@t] bindings) {
+ fn fold_simple_ty(@t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_var(?id)) {
+ alt (bindings.find(id)) {
+ case (some[@t](?typ2)) {
+ ret substitute(bindings, typ2);
+ }
+ case (none[@t]) {
+ ret typ;
+ }
+ }
+ }
+ case (_) {
+ ret typ;
+ }
+ }
+ }
+ }
+
+ ret ty.fold_ty(folder(bindings), typ);
+ }
+
fn hash_int(&int x) -> uint { ret x as uint; }
fn eq_int(&int a, &int b) -> bool { ret a == b; }
auto hasher = hash_int;
auto eqer = eq_int;
- auto bindings = map.mk_hashmap[int,@ty.t](hasher, eqer);
+ auto bindings = @map.mk_hashmap[int,@ty.t](hasher, eqer);
- ret unify_step(bindings, expected, actual, handler);
+ auto ures = unify_step(bindings, expected, actual, handler);
+ alt (ures) {
+ case (ures_ok(?t)) { ret ures_ok(substitute(bindings, t)); }
+ case (_) { ret ures; }
+ }
+ fail; // not reached
}
fn type_err_to_str(&ty.type_err err) -> str {
@@ -1231,9 +1490,10 @@ fn type_err_to_str(&ty.type_err err) -> str {
}
}
-// Type parameter resolution, used in translation
+// Type parameter resolution, used in translation and typechecking
-fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
+fn resolve_ty_params(ty_params_and_ty ty_params_and_polyty,
+ @t monoty) -> vec[@t] {
obj resolve_ty_params_handler(@hashmap[ast.def_id,@t] bindings) {
fn resolve_local(ast.def_id id) -> @t { log "resolve local"; fail; }
fn record_local(ast.def_id id, @t ty) { log "record local"; fail; }
@@ -1249,8 +1509,6 @@ fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
}
}
- auto ty_params_and_polyty = item_ty(item);
-
auto bindings = @new_def_hash[@t]();
auto handler = resolve_ty_params_handler(bindings);
@@ -1274,6 +1532,47 @@ fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
ret result_tys;
}
+// Performs type parameter replacement using the supplied mapping from
+// parameter IDs to types.
+fn replace_type_params(@t typ, hashmap[ast.def_id,@t] param_map) -> @t {
+ state obj param_replacer(hashmap[ast.def_id,@t] param_map) {
+ fn fold_simple_ty(@t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_param(?param_def)) {
+ if (param_map.contains_key(param_def)) {
+ ret param_map.get(param_def);
+ } else {
+ ret typ;
+ }
+ }
+ case (_) {
+ ret typ;
+ }
+ }
+ }
+ }
+ auto replacer = param_replacer(param_map);
+ ret fold_ty(replacer, typ);
+}
+
+// Substitutes the type parameters specified by @ty_params with the
+// corresponding types in @bound in the given type. The two vectors must have
+// the same length.
+fn substitute_ty_params(vec[ast.ty_param] ty_params, vec[@t] bound, @t ty)
+ -> @t {
+ auto ty_param_len = _vec.len[ast.ty_param](ty_params);
+ check (ty_param_len == _vec.len[@t](bound));
+
+ auto bindings = common.new_def_hash[@t]();
+ auto i = 0u;
+ while (i < ty_param_len) {
+ bindings.insert(ty_params.(i).id, bound.(i));
+ i += 1u;
+ }
+
+ ret replace_type_params(ty, bindings);
+}
+
// Local Variables:
// mode: rust
// fill-column: 78;
diff --git a/src/comp/middle/typeck.rs b/src/comp/middle/typeck.rs
index d778ffa9..5c7f963c 100644
--- a/src/comp/middle/typeck.rs
+++ b/src/comp/middle/typeck.rs
@@ -25,13 +25,20 @@ import middle.ty.type_is_scalar;
import std._str;
import std._uint;
import std._vec;
+import std.map;
import std.map.hashmap;
import std.option;
import std.option.none;
import std.option.some;
type ty_table = hashmap[ast.def_id, @ty.t];
-type ty_item_table = hashmap[ast.def_id,@ast.item];
+
+tag any_item {
+ any_item_rust(@ast.item);
+ any_item_native(@ast.native_item, ast.native_abi);
+}
+
+type ty_item_table = hashmap[ast.def_id,any_item];
type crate_ctxt = rec(session.session sess,
@ty_table item_types,
@@ -72,6 +79,65 @@ fn generalize_ty(@crate_ctxt cx, @ty.t t) -> @ty.t {
ret ty.fold_ty(generalizer, t);
}
+// Substitutes the user's explicit types for the parameters in a path
+// expression.
+fn substitute_ty_params(&@crate_ctxt ccx,
+ @ty.t typ,
+ vec[@ast.ty] supplied,
+ &span sp) -> @ty.t {
+ state obj ty_substituter(@crate_ctxt ccx,
+ @mutable uint i,
+ vec[@ast.ty] supplied,
+ @hashmap[int,@ty.t] substs) {
+ fn fold_simple_ty(@ty.t typ) -> @ty.t {
+ alt (typ.struct) {
+ case (ty.ty_var(?vid)) {
+ alt (substs.find(vid)) {
+ case (some[@ty.t](?resolved_ty)) {
+ ret resolved_ty;
+ }
+ case (none[@ty.t]) {
+ if (i >= _vec.len[@ast.ty](supplied)) {
+ // Just leave it as an unresolved parameter
+ // for now. (We will error out later.)
+ ret typ;
+ }
+
+ auto result = ast_ty_to_ty_crate(ccx,
+ supplied.(*i));
+ *i += 1u;
+ substs.insert(vid, result);
+ ret result;
+ }
+ }
+ }
+ case (_) { ret typ; }
+ }
+ }
+ }
+
+ fn hash_int(&int x) -> uint { ret x as uint; }
+ fn eq_int(&int a, &int b) -> bool { ret a == b; }
+ auto hasher = hash_int;
+ auto eqer = eq_int;
+ auto substs = @map.mk_hashmap[int,@ty.t](hasher, eqer);
+
+ auto subst_count = @mutable 0u;
+ auto substituter = ty_substituter(ccx, subst_count, supplied, substs);
+
+ auto result = ty.fold_ty(substituter, typ);
+
+ auto supplied_len = _vec.len[@ast.ty](supplied);
+ if ((*subst_count) != supplied_len) {
+ ccx.sess.span_err(sp, "expected " + _uint.to_str(*subst_count, 10u) +
+ " type parameter(s) but found " +
+ _uint.to_str(supplied_len, 10u) + " parameter(s)");
+ fail;
+ }
+
+ ret result;
+}
+
// Parses the programmer's textual representation of a type into our internal
// notion of a type. `getter` is a function that returns the type
// corresponding to a definition ID.
@@ -81,23 +147,6 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
ret rec(mode=arg.mode, ty=ast_ty_to_ty(getter, arg.ty));
}
- fn replace_type_params(@ty.t t, ty_table param_map) -> @ty.t {
- state obj param_replacer(ty_table param_map) {
- fn fold_simple_ty(@ty.t t) -> @ty.t {
- alt (t.struct) {
- case (ty.ty_param(?param_def)) {
- ret param_map.get(param_def);
- }
- case (_) {
- ret t;
- }
- }
- }
- }
- auto replacer = param_replacer(param_map);
- ret ty.fold_ty(replacer, t);
- }
-
fn instantiate(ty_getter getter, ast.def_id id,
vec[@ast.ty] args) -> @ty.t {
// TODO: maybe record cname chains so we can do
@@ -113,7 +162,7 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
auto param = params.(i);
param_map.insert(param.id, ast_ty_to_ty(getter, arg));
}
- ret replace_type_params(ty_and_params.ty, param_map);
+ ret ty.replace_type_params(ty_and_params.ty, param_map);
}
auto mut = ast.imm;
@@ -145,10 +194,10 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
sty = ty.ty_rec(flds);
}
- case (ast.ty_fn(?inputs, ?output)) {
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
auto f = bind ast_arg_to_arg(getter, _);
auto i = _vec.map[ast.ty_arg, arg](f, inputs);
- sty = ty.ty_fn(i, ast_ty_to_ty(getter, output));
+ sty = ty.ty_fn(proto, i, ast_ty_to_ty(getter, output));
}
case (ast.ty_path(?path, ?def)) {
@@ -157,6 +206,9 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
case (ast.def_ty(?id)) {
sty = instantiate(getter, id, path.node.types).struct;
}
+ case (ast.def_native_ty(?id)) {
+ sty = instantiate(getter, id, path.node.types).struct;
+ }
case (ast.def_obj(?id)) {
sty = instantiate(getter, id, path.node.types).struct;
}
@@ -181,7 +233,8 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
auto ins = _vec.map[ast.ty_arg, arg](f, m.inputs);
auto out = ast_ty_to_ty(getter, m.output);
append[ty.method](tmeths,
- rec(ident=m.ident,
+ rec(proto=m.proto,
+ ident=m.ident,
inputs=ins,
output=out));
}
@@ -192,23 +245,36 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
ret @rec(struct=sty, mut=mut, cname=cname);
}
+fn actual_type(@ty.t t, @ast.item item) -> @ty.t {
+ alt (item.node) {
+ case (ast.item_obj(_,_,_,_,_)) {
+ // An obj used as a type name refers to the output type of the
+ // item (constructor).
+ ret middle.ty.ty_fn_ret(t);
+ }
+ case (_) { }
+ }
+
+ ret t;
+}
+
// A convenience function to use a crate_ctxt to resolve names for
// ast_ty_to_ty.
fn ast_ty_to_ty_crate(@crate_ctxt ccx, &@ast.ty ast_ty) -> @ty.t {
fn getter(@crate_ctxt ccx, ast.def_id id) -> ty_and_params {
check (ccx.item_items.contains_key(id));
check (ccx.item_types.contains_key(id));
- auto item = ccx.item_items.get(id);
+ auto it = ccx.item_items.get(id);
auto ty = ccx.item_types.get(id);
- auto params = ty_params_of_item(item);
-
- alt (item.node) {
- case (ast.item_obj(_,_,_,_,_)) {
- // An obj used as a type name refers to the output type of the
- // item (constructor).
- ty = middle.ty.ty_fn_ret(ty);
+ auto params;
+ alt (it) {
+ case (any_item_rust(?item)) {
+ ty = actual_type(ty, item);
+ params = ty_params_of_item(item);
}
- case (_) { }
+ case (any_item_native(?native_item, _)) {
+ params = ty_params_of_native_item(native_item);
+ }
}
ret rec(params = params, ty = ty);
@@ -238,6 +304,18 @@ fn ty_params_of_item(@ast.item item) -> vec[ast.ty_param] {
}
}
+fn ty_params_of_native_item(@ast.native_item item) -> vec[ast.ty_param] {
+ alt (item.node) {
+ case (ast.native_item_fn(_, _, ?p, _, _)) {
+ ret p;
+ }
+ case (_) {
+ let vec[ast.ty_param] r = vec();
+ ret r;
+ }
+ }
+}
+
// Item collection - a pair of bootstrap passes:
//
// 1. Collect the IDs of all type items (typedefs) and store them in a table.
@@ -249,6 +327,34 @@ fn ty_params_of_item(@ast.item item) -> vec[ast.ty_param] {
// We then annotate the AST with the resulting types and return the annotated
// AST, along with a table mapping item IDs to their types.
+fn ty_of_fn_decl(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ fn(&@ast.ty ast_ty) -> @ty.t convert,
+ fn(&ast.arg a) -> arg ty_of_arg,
+ &ast.fn_decl decl,
+ ast.proto proto,
+ ast.def_id def_id) -> @ty.t {
+ auto input_tys = _vec.map[ast.arg,arg](ty_of_arg, decl.inputs);
+ auto output_ty = convert(decl.output);
+ auto t_fn = plain_ty(ty.ty_fn(proto, input_tys, output_ty));
+ item_to_ty.insert(def_id, t_fn);
+ ret t_fn;
+}
+
+fn ty_of_native_fn_decl(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ fn(&@ast.ty ast_ty) -> @ty.t convert,
+ fn(&ast.arg a) -> arg ty_of_arg,
+ &ast.fn_decl decl,
+ ast.native_abi abi,
+ ast.def_id def_id) -> @ty.t {
+ auto input_tys = _vec.map[ast.arg,arg](ty_of_arg, decl.inputs);
+ auto output_ty = convert(decl.output);
+ auto t_fn = plain_ty(ty.ty_native_fn(abi, input_tys, output_ty));
+ item_to_ty.insert(def_id, t_fn);
+ ret t_fn;
+}
+
fn collect_item_types(session.session sess, @ast.crate crate)
-> tup(@ast.crate, @ty_table, @ty_item_table) {
@@ -256,17 +362,20 @@ fn collect_item_types(session.session sess, @ast.crate crate)
@ty_table item_to_ty,
ast.def_id id) -> ty_and_params {
check (id_to_ty_item.contains_key(id));
- auto item = id_to_ty_item.get(id);
- auto ty = ty_of_item(id_to_ty_item, item_to_ty, item);
- auto params = ty_params_of_item(item);
-
- alt (item.node) {
- case (ast.item_obj(_,_,_,_,_)) {
- // An obj used as a type name refers to the output type of the
- // item (constructor).
- ty = middle.ty.ty_fn_ret(ty);
+ auto it = id_to_ty_item.get(id);
+ auto ty;
+ auto params;
+ alt (it) {
+ case (any_item_rust(?item)) {
+ ty = ty_of_item(id_to_ty_item, item_to_ty, item);
+ ty = actual_type(ty, item);
+ params = ty_params_of_item(item);
+ }
+ case (any_item_native(?native_item, ?abi)) {
+ ty = ty_of_native_item(id_to_ty_item, item_to_ty,
+ native_item, abi);
+ params = ty_params_of_native_item(native_item);
}
- case (_) { }
}
ret rec(params = params, ty = ty);
@@ -285,9 +394,10 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto get = bind getter(id_to_ty_item, item_to_ty, _);
auto convert = bind ast_ty_to_ty(get, _);
auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
- auto inputs = _vec.map[ast.arg,arg](f, m.node.meth.inputs);
- auto output = convert(m.node.meth.output);
- ret rec(ident=m.node.ident, inputs=inputs, output=output);
+ auto inputs = _vec.map[ast.arg,arg](f, m.node.meth.decl.inputs);
+ auto output = convert(m.node.meth.decl.output);
+ ret rec(proto=m.node.meth.proto, ident=m.node.ident,
+ inputs=inputs, output=output);
}
fn ty_of_obj(@ty_item_table id_to_ty_item,
@@ -318,7 +428,7 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto t_field = ast_ty_to_ty(g, f.ty);
append[arg](t_inputs, rec(mode=ast.alias, ty=t_field));
}
- auto t_fn = plain_ty(ty.ty_fn(t_inputs, t_obj));
+ auto t_fn = plain_ty(ty.ty_fn(ast.proto_fn, t_inputs, t_obj));
ret t_fn;
}
@@ -336,15 +446,9 @@ fn collect_item_types(session.session sess, @ast.crate crate)
}
case (ast.item_fn(?ident, ?fn_info, _, ?def_id, _)) {
- // TODO: handle ty-params
-
auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
- auto input_tys = _vec.map[ast.arg,arg](f, fn_info.inputs);
- auto output_ty = convert(fn_info.output);
-
- auto t_fn = plain_ty(ty.ty_fn(input_tys, output_ty));
- item_to_ty.insert(def_id, t_fn);
- ret t_fn;
+ ret ty_of_fn_decl(id_to_ty_item, item_to_ty, convert, f,
+ fn_info.decl, fn_info.proto, def_id);
}
case (ast.item_obj(?ident, ?obj_info, _, ?def_id, _)) {
@@ -369,28 +473,67 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ret ty_;
}
- case (ast.item_tag(_, _, _, ?def_id)) {
- auto t = plain_ty(ty.ty_tag(def_id));
+ case (ast.item_tag(_, _, ?tps, ?def_id)) {
+ // Create a new generic polytype.
+ let vec[@ty.t] subtys = vec();
+ for (ast.ty_param tp in tps) {
+ subtys += vec(plain_ty(ty.ty_param(tp.id)));
+ }
+ auto t = plain_ty(ty.ty_tag(def_id, subtys));
item_to_ty.insert(def_id, t);
ret t;
}
case (ast.item_mod(_, _, _)) { fail; }
+ case (ast.item_native_mod(_, _, _)) { fail; }
+ }
+ }
+
+ fn ty_of_native_item(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ @ast.native_item it,
+ ast.native_abi abi) -> @ty.t {
+ alt (it.node) {
+ case (ast.native_item_fn(?ident, ?fn_decl, ?params, ?def_id, _)) {
+ auto get = bind getter(id_to_ty_item, item_to_ty, _);
+ auto convert = bind ast_ty_to_ty(get, _);
+ auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
+ ret ty_of_native_fn_decl(id_to_ty_item, item_to_ty, convert,
+ f, fn_decl, abi, def_id);
+ }
+ case (ast.native_item_ty(_, ?def_id)) {
+ if (item_to_ty.contains_key(def_id)) {
+ // Avoid repeating work.
+ ret item_to_ty.get(def_id);
+ }
+ auto x =
+ @rec(struct=ty.ty_native, mut=ast.imm, cname=none[str]);
+ item_to_ty.insert(def_id, x);
+ ret x;
+ }
}
}
fn get_tag_variant_types(@ty_item_table id_to_ty_item,
@ty_table item_to_ty,
&ast.def_id tag_id,
- &vec[ast.variant] variants) -> vec[ast.variant] {
+ &vec[ast.variant] variants,
+ &vec[ast.ty_param] ty_params)
+ -> vec[ast.variant] {
let vec[ast.variant] result = vec();
+ // Create a set of parameter types shared among all the variants.
+ let vec[@ty.t] ty_param_tys = vec();
+ for (ast.ty_param tp in ty_params) {
+ ty_param_tys += vec(plain_ty(ty.ty_param(tp.id)));
+ }
+
for (ast.variant variant in variants) {
- // Nullary tag constructors get truned into constants; n-ary tag
+ // Nullary tag constructors get turned into constants; n-ary tag
// constructors get turned into functions.
auto result_ty;
if (_vec.len[ast.variant_arg](variant.args) == 0u) {
- result_ty = plain_ty(ty.ty_tag(tag_id));
+ result_ty = plain_ty(ty.ty_tag(tag_id, ty_param_tys));
} else {
// As above, tell ast_ty_to_ty() that trans_ty_item_to_ty()
// should be called to resolve named types.
@@ -401,8 +544,8 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto arg_ty = ast_ty_to_ty(f, va.ty);
args += vec(rec(mode=ast.alias, ty=arg_ty));
}
- auto tag_t = plain_ty(ty.ty_tag(tag_id));
- result_ty = plain_ty(ty.ty_fn(args, tag_t));
+ auto tag_t = plain_ty(ty.ty_tag(tag_id, ty_param_tys));
+ result_ty = plain_ty(ty.ty_fn(ast.proto_fn, args, tag_t));
}
item_to_ty.insert(variant.id, result_ty);
@@ -416,25 +559,40 @@ fn collect_item_types(session.session sess, @ast.crate crate)
// First pass: collect all type item IDs.
auto module = crate.node.module;
- auto id_to_ty_item = @common.new_def_hash[@ast.item]();
+ auto id_to_ty_item = @common.new_def_hash[any_item]();
fn collect(&@ty_item_table id_to_ty_item, @ast.item i)
-> @ty_item_table {
alt (i.node) {
case (ast.item_ty(_, _, _, ?def_id, _)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (ast.item_tag(_, _, _, ?def_id)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (ast.item_obj(_, _, _, ?def_id, _)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (_) { /* empty */ }
}
ret id_to_ty_item;
}
+ fn collect_native(&@ty_item_table id_to_ty_item, @ast.native_item i)
+ -> @ty_item_table {
+ alt (i.node) {
+ case (ast.native_item_ty(_, ?def_id)) {
+ // The abi of types is not used.
+ id_to_ty_item.insert(def_id,
+ any_item_native(i,
+ ast.native_abi_cdecl));
+ }
+ case (_) {
+ }
+ }
+ ret id_to_ty_item;
+ }
auto fld_1 = fold.new_identity_fold[@ty_item_table]();
- fld_1 = @rec(update_env_for_item = bind collect(_, _)
+ fld_1 = @rec(update_env_for_item = bind collect(_, _),
+ update_env_for_native_item = bind collect_native(_, _)
with *fld_1);
fold.fold_crate[@ty_item_table](id_to_ty_item, fld_1, crate);
@@ -445,22 +603,34 @@ fn collect_item_types(session.session sess, @ast.crate crate)
type env = rec(session.session sess,
@ty_item_table id_to_ty_item,
- @ty_table item_to_ty);
+ @ty_table item_to_ty,
+ ast.native_abi abi);
let @env e = @rec(sess=sess,
id_to_ty_item=id_to_ty_item,
- item_to_ty=item_to_ty);
+ item_to_ty=item_to_ty,
+ abi=ast.native_abi_cdecl);
fn convert(&@env e, @ast.item i) -> @env {
+ auto abi = e.abi;
alt (i.node) {
case (ast.item_mod(_, _, _)) {
// ignore item_mod, it has no type.
}
+ case (ast.item_native_mod(_, ?native_mod, _)) {
+ // ignore item_native_mod, it has no type.
+ abi = native_mod.abi;
+ }
case (_) {
// This call populates the ty_table with the converted type of
// the item in passing; we don't need to do anything else.
ty_of_item(e.id_to_ty_item, e.item_to_ty, i);
}
}
+ ret @rec(abi=abi with *e);
+ }
+
+ fn convert_native(&@env e, @ast.native_item i) -> @env {
+ ty_of_native_item(e.id_to_ty_item, e.item_to_ty, i, e.abi);
ret e;
}
@@ -484,9 +654,19 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ret @fold.respan[ast.item_](sp, item);
}
+ fn fold_native_item_fn(&@env e, &span sp, ast.ident i,
+ &ast.fn_decl d, vec[ast.ty_param] ty_params,
+ ast.def_id id, ast.ann a) -> @ast.native_item {
+ check (e.item_to_ty.contains_key(id));
+ auto ty = e.item_to_ty.get(id);
+ auto item = ast.native_item_fn(i, d, ty_params, id,
+ ast.ann_type(ty));
+ ret @fold.respan[ast.native_item_](sp, item);
+ }
+
fn get_ctor_obj_methods(@ty.t t) -> vec[method] {
alt (t.struct) {
- case (ty.ty_fn(_,?tobj)) {
+ case (ty.ty_fn(_,_,?tobj)) {
alt (tobj.struct) {
case (ty.ty_obj(?tm)) {
ret tm;
@@ -521,7 +701,8 @@ fn collect_item_types(session.session sess, @ast.crate crate)
let method meth_ty = meth_tys.(ix);
let ast.method_ m_;
let @ast.method m;
- auto meth_tfn = plain_ty(ty.ty_fn(meth_ty.inputs,
+ auto meth_tfn = plain_ty(ty.ty_fn(meth_ty.proto,
+ meth_ty.inputs,
meth_ty.output));
m_ = rec(ann=ast.ann_type(meth_tfn) with meth.node);
m = @rec(node=m_ with *meth);
@@ -558,7 +739,9 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ast.def_id id) -> @ast.item {
auto variants_t = get_tag_variant_types(e.id_to_ty_item,
e.item_to_ty,
- id, variants);
+ id,
+ variants,
+ ty_params);
auto item = ast.item_tag(i, variants_t, ty_params, id);
ret @fold.respan[ast.item_](sp, item);
}
@@ -566,8 +749,10 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto fld_2 = fold.new_identity_fold[@env]();
fld_2 =
@rec(update_env_for_item = bind convert(_,_),
+ update_env_for_native_item = bind convert_native(_,_),
fold_item_const = bind fold_item_const(_,_,_,_,_,_,_),
fold_item_fn = bind fold_item_fn(_,_,_,_,_,_,_),
+ fold_native_item_fn = bind fold_native_item_fn(_,_,_,_,_,_,_),
fold_item_obj = bind fold_item_obj(_,_,_,_,_,_,_),
fold_item_ty = bind fold_item_ty(_,_,_,_,_,_,_),
fold_item_tag = bind fold_item_tag(_,_,_,_,_,_)
@@ -705,13 +890,17 @@ fn are_compatible(&@fn_ctxt fcx, @ty.t expected, @ty.t actual) -> bool {
// TODO: enforce this via a predicate.
fn demand_pat(&@fn_ctxt fcx, @ty.t expected, @ast.pat pat) -> @ast.pat {
- auto p_1 = ast.pat_wild(ast.ann_none); // FIXME: typestate botch
+ auto p_1;
alt (pat.node) {
case (ast.pat_wild(?ann)) {
auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
p_1 = ast.pat_wild(ast.ann_type(t));
}
+ case (ast.pat_lit(?lit, ?ann)) {
+ auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
+ p_1 = ast.pat_lit(lit, ast.ann_type(t));
+ }
case (ast.pat_bind(?id, ?did, ?ann)) {
auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
fcx.locals.insert(did, t);
@@ -735,12 +924,12 @@ fn demand_pat(&@fn_ctxt fcx, @ty.t expected, @ast.pat pat) -> @ast.pat {
auto subpats_len = _vec.len[@ast.pat](subpats);
alt (variant_ty.struct) {
- case (ty.ty_tag(_)) {
+ case (ty.ty_tag(_, _)) {
// Nullary tag variant.
check (subpats_len == 0u);
p_1 = ast.pat_tag(id, subpats, vdef_opt, ast.ann_type(t));
}
- case (ty.ty_fn(?args, ?tag_ty)) {
+ case (ty.ty_fn(_, ?args, ?tag_ty)) {
let vec[@ast.pat] new_subpats = vec();
auto i = 0u;
for (arg a in args) {
@@ -771,9 +960,7 @@ fn demand_expr(&@fn_ctxt fcx, @ty.t expected, @ast.expr e) -> @ast.expr {
fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
autoderef_kind adk) -> @ast.expr {
- // FIXME: botch to work around typestate bug in rustboot
- let vec[@ast.expr] v = vec();
- auto e_1 = ast.expr_vec(v, ast.ann_none);
+ auto e_1;
alt (e.node) {
case (ast.expr_vec(?es_0, ?ann)) {
@@ -811,20 +998,50 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
}
e_1 = ast.expr_tup(elts_1, ast.ann_type(t));
}
- case (ast.expr_rec(?fields_0, ?ann)) {
+ case (ast.expr_rec(?fields_0, ?base_0, ?ann)) {
+
+ auto base_1 = base_0;
+
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
let vec[ast.field] fields_1 = vec();
alt (t.struct) {
case (ty.ty_rec(?field_tys)) {
- auto i = 0u;
- for (ast.field field_0 in fields_0) {
- check (_str.eq(field_0.ident, field_tys.(i).ident));
- auto e_1 = demand_expr(fcx, field_tys.(i).ty,
- field_0.expr);
- fields_1 += vec(rec(mut=field_0.mut,
- ident=field_0.ident,
- expr=e_1));
- i += 1u;
+ alt (base_0) {
+ case (none[@ast.expr]) {
+ auto i = 0u;
+ for (ast.field field_0 in fields_0) {
+ check (_str.eq(field_0.ident,
+ field_tys.(i).ident));
+ auto e_1 = demand_expr(fcx,
+ field_tys.(i).ty,
+ field_0.expr);
+ fields_1 += vec(rec(mut=field_0.mut,
+ ident=field_0.ident,
+ expr=e_1));
+ i += 1u;
+ }
+ }
+ case (some[@ast.expr](?bx)) {
+
+ base_1 =
+ some[@ast.expr](demand_expr(fcx, t, bx));
+
+ let vec[field] base_fields = vec();
+
+ for (ast.field field_0 in fields_0) {
+
+ for (ty.field ft in field_tys) {
+ if (_str.eq(field_0.ident, ft.ident)) {
+ auto e_1 = demand_expr(fcx, ft.ty,
+ field_0.expr);
+ fields_1 +=
+ vec(rec(mut=field_0.mut,
+ ident=field_0.ident,
+ expr=e_1));
+ }
+ }
+ }
+ }
}
}
case (_) {
@@ -832,7 +1049,7 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
fail;
}
}
- e_1 = ast.expr_rec(fields_1, ast.ann_type(t));
+ e_1 = ast.expr_rec(fields_1, base_1, ast.ann_type(t));
}
case (ast.expr_bind(?sube, ?es, ?ann)) {
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
@@ -868,6 +1085,7 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
auto t = demand_full(fcx, e.span, expected,
ann_to_type(ann), adk);
auto then_1 = demand_block(fcx, expected, then_0);
+
auto else_1;
alt (else_0) {
case (none[@ast.expr]) { else_1 = none[@ast.expr]; }
@@ -882,6 +1100,10 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
e_1 = ast.expr_for(decl, seq, bloc, ast.ann_type(t));
}
+ case (ast.expr_for_each(?decl, ?seq, ?bloc, ?ann)) {
+ auto t = demand(fcx, e.span, expected, ann_to_type(ann));
+ e_1 = ast.expr_for_each(decl, seq, bloc, ast.ann_type(t));
+ }
case (ast.expr_while(?cond, ?bloc, ?ann)) {
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
e_1 = ast.expr_while(cond, bloc, ast.ann_type(t));
@@ -924,6 +1146,21 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
ann_to_type(ann), adk);
e_1 = ast.expr_path(pth, d, ast.ann_type(t));
}
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, ?ann)) {
+ auto t = demand_full(fcx, e.span, expected,
+ ann_to_type(ann), adk);
+ e_1 = ast.expr_ext(p, args, body, expanded, ast.ann_type(t));
+ }
+ case (ast.expr_fail) { e_1 = e.node; }
+ case (ast.expr_log(_)) { e_1 = e.node; }
+ case (ast.expr_ret(_)) { e_1 = e.node; }
+ case (ast.expr_put(_)) { e_1 = e.node; }
+ case (ast.expr_be(_)) { e_1 = e.node; }
+ case (ast.expr_check_expr(_)) { e_1 = e.node; }
+ case (_) {
+ fcx.ccx.sess.unimpl("type unification for expression variant");
+ fail;
+ }
}
ret @fold.respan[ast.expr_](e.span, e_1);
@@ -989,6 +1226,9 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
case (ast.pat_wild(_)) {
new_pat = ast.pat_wild(ast.ann_type(next_ty_var(fcx.ccx)));
}
+ case (ast.pat_lit(?lt, _)) {
+ new_pat = ast.pat_lit(lt, ast.ann_type(check_lit(lt)));
+ }
case (ast.pat_bind(?id, ?def_id, _)) {
auto ann = ast.ann_type(next_ty_var(fcx.ccx));
new_pat = ast.pat_bind(id, def_id, ann);
@@ -1000,7 +1240,7 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
auto last_id = p.node.idents.(len - 1u);
alt (t.struct) {
// N-ary variants have function types.
- case (ty.ty_fn(?args, ?tag_ty)) {
+ case (ty.ty_fn(_, ?args, ?tag_ty)) {
auto arg_len = _vec.len[arg](args);
auto subpats_len = _vec.len[@ast.pat](subpats);
if (arg_len != subpats_len) {
@@ -1024,7 +1264,9 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
}
// Nullary variants have tag types.
- case (ty.ty_tag(?tid)) {
+ case (ty.ty_tag(?tid, _)) {
+ // TODO: ty params
+
auto subpats_len = _vec.len[@ast.pat](subpats);
if (subpats_len > 0u) {
// TODO: pluralize properly
@@ -1038,7 +1280,8 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
fail; // TODO: recover
}
- auto ann = ast.ann_type(plain_ty(ty.ty_tag(tid)));
+ let vec[@ty.t] tys = vec(); // FIXME
+ auto ann = ast.ann_type(plain_ty(ty.ty_tag(tid, tys)));
new_pat = ast.pat_tag(p, subpats, vdef_opt, ann);
}
}
@@ -1049,6 +1292,90 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
}
fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
+ // A generic function to factor out common logic from call and bind
+ // expressions.
+ fn check_call_or_bind(&@fn_ctxt fcx, &@ast.expr f,
+ &vec[option.t[@ast.expr]] args)
+ -> tup(@ast.expr, vec[option.t[@ast.expr]]) {
+
+ // Check the function.
+ auto f_0 = check_expr(fcx, f);
+
+ // Check the arguments and generate the argument signature.
+ let vec[option.t[@ast.expr]] args_0 = vec();
+ let vec[arg] arg_tys_0 = vec();
+ for (option.t[@ast.expr] a_opt in args) {
+ alt (a_opt) {
+ case (some[@ast.expr](?a)) {
+ auto a_0 = check_expr(fcx, a);
+ args_0 += vec(some[@ast.expr](a_0));
+
+ // FIXME: this breaks aliases. We need a ty_fn_arg.
+ auto arg_ty = rec(mode=ast.val, ty=expr_ty(a_0));
+ append[arg](arg_tys_0, arg_ty);
+ }
+ case (none[@ast.expr]) {
+ args_0 += vec(none[@ast.expr]);
+
+ // FIXME: breaks aliases too?
+ auto typ = next_ty_var(fcx.ccx);
+ append[arg](arg_tys_0, rec(mode=ast.val, ty=typ));
+ }
+ }
+ }
+
+ auto rt_0 = next_ty_var(fcx.ccx);
+ auto t_0;
+ alt (expr_ty(f_0).struct) {
+ case (ty.ty_fn(?proto, _, _)) {
+ t_0 = plain_ty(ty.ty_fn(proto, arg_tys_0, rt_0));
+ }
+ case (ty.ty_native_fn(?abi, _, _)) {
+ t_0 = plain_ty(ty.ty_native_fn(abi, arg_tys_0, rt_0));
+ }
+ case (_) {
+ log "check_call_or_bind(): fn expr doesn't have fn type";
+ fail;
+ }
+ }
+
+ // Unify and write back to the function.
+ auto f_1 = demand_expr(fcx, t_0, f_0);
+
+ // Take the argument types out of the resulting function type.
+ auto t_1 = expr_ty(f_1);
+
+ if (!ty.is_fn_ty(t_1)) {
+ fcx.ccx.sess.span_err(f_1.span,
+ "mismatched types: callee has " +
+ "non-function type: " +
+ ty_to_str(t_1));
+ }
+
+ let vec[arg] arg_tys_1 = ty.ty_fn_args(t_1);
+ let @ty.t rt_1 = ty.ty_fn_ret(t_1);
+
+ // Unify and write back to the arguments.
+ auto i = 0u;
+ let vec[option.t[@ast.expr]] args_1 = vec();
+ while (i < _vec.len[option.t[@ast.expr]](args_0)) {
+ alt (args_0.(i)) {
+ case (some[@ast.expr](?e_0)) {
+ auto arg_ty_1 = arg_tys_1.(i);
+ auto e_1 = demand_expr(fcx, arg_ty_1.ty, e_0);
+ append[option.t[@ast.expr]](args_1, some[@ast.expr](e_1));
+ }
+ case (none[@ast.expr]) {
+ append[option.t[@ast.expr]](args_1, none[@ast.expr]);
+ }
+ }
+
+ i += 1u;
+ }
+
+ ret tup(f_1, args_1);
+ }
+
alt (expr.node) {
case (ast.expr_lit(?lit, _)) {
auto ty = check_lit(lit);
@@ -1103,6 +1430,9 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
}
}
+ case (ast._mutable) {
+ oper_t = @rec(mut=ast.mut with *oper_t);
+ }
case (_) { oper_t = strip_boxes(oper_t); }
}
ret @fold.respan[ast.expr_](expr.span,
@@ -1132,13 +1462,18 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
check (fcx.ccx.item_types.contains_key(id));
t = generalize_ty(fcx.ccx, fcx.ccx.item_types.get(id));
}
+ case (ast.def_native_fn(?id)) {
+ check (fcx.ccx.item_types.contains_key(id));
+ t = generalize_ty(fcx.ccx, fcx.ccx.item_types.get(id));
+ }
case (ast.def_const(?id)) {
check (fcx.ccx.item_types.contains_key(id));
t = fcx.ccx.item_types.get(id);
}
case (ast.def_variant(_, ?variant_id)) {
check (fcx.ccx.item_types.contains_key(variant_id));
- t = fcx.ccx.item_types.get(variant_id);
+ t = generalize_ty(fcx.ccx,
+ fcx.ccx.item_types.get(variant_id));
}
case (ast.def_binding(?id)) {
check (fcx.locals.contains_key(id));
@@ -1161,11 +1496,92 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
}
+ // Substitute type parameters if the user provided some.
+ if (_vec.len[@ast.ty](pth.node.types) > 0u) {
+ t = substitute_ty_params(fcx.ccx, t, pth.node.types,
+ expr.span);
+ }
+
ret @fold.respan[ast.expr_](expr.span,
ast.expr_path(pth, defopt,
ast.ann_type(t)));
}
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, _)) {
+ auto exp_ = check_expr(fcx, expanded);
+ auto t = expr_ty(exp_);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_ext(p, args, body, exp_,
+ ast.ann_type(t)));
+ }
+
+ case (ast.expr_fail) {
+ ret expr;
+ }
+
+ case (ast.expr_ret(?expr_opt)) {
+ alt (expr_opt) {
+ case (none[@ast.expr]) {
+ auto nil = plain_ty(ty.ty_nil);
+ if (!are_compatible(fcx, fcx.ret_ty, nil)) {
+ fcx.ccx.sess.err("ret; in function "
+ + "returning non-nil");
+ }
+
+ ret expr;
+ }
+
+ case (some[@ast.expr](?e)) {
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_ret(some(expr_1)));
+ }
+ }
+ }
+
+ case (ast.expr_put(?expr_opt)) {
+ alt (expr_opt) {
+ case (none[@ast.expr]) {
+ auto nil = plain_ty(ty.ty_nil);
+ if (!are_compatible(fcx, fcx.ret_ty, nil)) {
+ fcx.ccx.sess.err("put; in function "
+ + "putting non-nil");
+ }
+
+ ret expr;
+ }
+
+ case (some[@ast.expr](?e)) {
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_put(some(expr_1)));
+ }
+ }
+ }
+
+ case (ast.expr_be(?e)) {
+ /* FIXME: prove instead of check */
+ check (ast.is_call_expr(e));
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_be(expr_1));
+ }
+
+ case (ast.expr_log(?e)) {
+ auto expr_t = check_expr(fcx, e);
+ ret @fold.respan[ast.expr_](expr.span, ast.expr_log(expr_t));
+ }
+
+ case (ast.expr_check_expr(?e)) {
+ auto expr_t = check_expr(fcx, e);
+ demand(fcx, expr.span, plain_ty(ty.ty_bool), expr_ty(expr_t));
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_check_expr(expr_t));
+ }
+
case (ast.expr_assign(?lhs, ?rhs, _)) {
auto lhs_0 = check_expr(fcx, lhs);
auto rhs_0 = check_expr(fcx, rhs);
@@ -1238,6 +1654,17 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
body_1, ann));
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
+ auto decl_1 = check_decl_local(fcx, decl);
+ auto seq_1 = check_expr(fcx, seq);
+ auto body_1 = check_block(fcx, body);
+
+ auto ann = ast.ann_type(plain_ty(ty.ty_nil));
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_for_each(decl_1, seq_1,
+ body_1, ann));
+ }
+
case (ast.expr_while(?cond, ?body, _)) {
auto cond_0 = check_expr(fcx, cond);
auto cond_1 = demand_expr(fcx, plain_ty(ty.ty_bool), cond_0);
@@ -1324,96 +1751,71 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
case (ast.expr_bind(?f, ?args, _)) {
- auto f_0 = check_expr(fcx, f);
- auto t_0 = expr_ty(f_0);
-
- if (!ty.is_fn_ty(t_0)) {
- fcx.ccx.sess.span_err(f_0.span,
- "mismatched types: bind callee has " +
- "non-function type: " +
- ty_to_str(t_0));
- }
-
- let vec[arg] arg_tys_0 = ty.ty_fn_args(t_0);
- let @ty.t rt_0 = ty.ty_fn_ret(t_0);
- let vec[option.t[@ast.expr]] args_1 = vec();
-
- let uint i = 0u;
-
- let vec[arg] residual_args = vec();
- for (option.t[@ast.expr] a in args) {
- alt (a) {
- case (none[@ast.expr]) {
- append[arg](residual_args,
- arg_tys_0.(i));
- append[option.t[@ast.expr]](args_1,
- none[@ast.expr]);
- }
- case (some[@ast.expr](?sa)) {
- auto arg_1 = check_expr(fcx, sa);
- auto arg_t = expr_ty(arg_1);
- demand_expr(fcx, arg_tys_0.(i).ty, arg_1);
- append[option.t[@ast.expr]](args_1,
- some[@ast.expr](arg_1));
+ // Call the generic checker.
+ auto result = check_call_or_bind(fcx, f, args);
+
+ // Pull the argument and return types out.
+ auto proto_1;
+ let vec[ty.arg] arg_tys_1 = vec();
+ auto rt_1;
+ alt (expr_ty(result._0).struct) {
+ case (ty.ty_fn(?proto, ?arg_tys, ?rt)) {
+ proto_1 = proto;
+ rt_1 = rt;
+
+ // For each blank argument, add the type of that argument
+ // to the resulting function type.
+ auto i = 0u;
+ while (i < _vec.len[option.t[@ast.expr]](args)) {
+ alt (args.(i)) {
+ case (some[@ast.expr](_)) { /* no-op */ }
+ case (none[@ast.expr]) {
+ arg_tys_1 += vec(arg_tys.(i));
+ }
+ }
+ i += 1u;
}
}
- i += 1u;
+ case (_) {
+ log "LHS of bind expr didn't have a function type?!";
+ fail;
+ }
}
- let @ty.t t_1 = plain_ty(ty.ty_fn(residual_args, rt_0));
+ auto t_1 = plain_ty(ty.ty_fn(proto_1, arg_tys_1, rt_1));
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_bind(f_0, args_1,
+ ast.expr_bind(result._0, result._1,
ast.ann_type(t_1)));
-
}
case (ast.expr_call(?f, ?args, _)) {
-
- // Check the function.
- auto f_0 = check_expr(fcx, f);
-
- // Check the arguments and generate the argument signature.
- let vec[@ast.expr] args_0 = vec();
- let vec[arg] arg_tys_0 = vec();
- for (@ast.expr a in args) {
- auto a_0 = check_expr(fcx, a);
- append[@ast.expr](args_0, a_0);
-
- // FIXME: this breaks aliases. We need a ty_fn_arg.
- append[arg](arg_tys_0, rec(mode=ast.val, ty=expr_ty(a_0)));
+ let vec[option.t[@ast.expr]] args_opt_0 = vec();
+ for (@ast.expr arg in args) {
+ args_opt_0 += vec(some[@ast.expr](arg));
}
- auto rt_0 = next_ty_var(fcx.ccx);
- auto t_0 = plain_ty(ty.ty_fn(arg_tys_0, rt_0));
-
- // Unify and write back to the function.
- auto f_1 = demand_expr(fcx, t_0, f_0);
- // Take the argument types out of the resulting function type.
- auto t_1 = expr_ty(f_1);
+ // Call the generic checker.
+ auto result = check_call_or_bind(fcx, f, args_opt_0);
- if (!ty.is_fn_ty(t_1)) {
- fcx.ccx.sess.span_err(f_1.span,
- "mismatched types: callee has " +
- "non-function type: " +
- ty_to_str(t_1));
- }
-
- let vec[arg] arg_tys_1 = ty.ty_fn_args(t_1);
- let @ty.t rt_1 = ty.ty_fn_ret(t_1);
-
- // Unify and write back to the arguments.
- auto i = 0u;
+ // Pull out the arguments.
let vec[@ast.expr] args_1 = vec();
- while (i < _vec.len[@ast.expr](args_0)) {
- auto arg_ty_1 = arg_tys_1.(i);
- auto e = demand_expr(fcx, arg_ty_1.ty, args_0.(i));
- append[@ast.expr](args_1, e);
+ for (option.t[@ast.expr] arg in result._1) {
+ args_1 += vec(option.get[@ast.expr](arg));
+ }
- i += 1u;
+ // Pull the return type out of the type of the function.
+ auto rt_1 = plain_ty(ty.ty_nil); // FIXME: typestate botch
+ alt (expr_ty(result._0).struct) {
+ case (ty.ty_fn(_,_,?rt)) { rt_1 = rt; }
+ case (ty.ty_native_fn(_, _, ?rt)) { rt_1 = rt; }
+ case (_) {
+ log "LHS of call expr didn't have a function type?!";
+ fail;
+ }
}
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_call(f_1, args_1,
+ ast.expr_call(result._0, args_1,
ast.ann_type(rt_1)));
}
@@ -1478,7 +1880,10 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
ast.expr_tup(elts_1, ann));
}
- case (ast.expr_rec(?fields, _)) {
+ case (ast.expr_rec(?fields, ?base, _)) {
+
+ auto base_1 = base;
+
let vec[ast.field] fields_1 = vec();
let vec[field] fields_t = vec();
@@ -1492,9 +1897,52 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
append[field](fields_t, rec(ident=f.ident, ty=expr_t));
}
- auto ann = ast.ann_type(plain_ty(ty.ty_rec(fields_t)));
+ auto ann = ast.ann_none;
+
+ alt (base) {
+ case (none[@ast.expr]) {
+ ann = ast.ann_type(plain_ty(ty.ty_rec(fields_t)));
+ }
+
+ case (some[@ast.expr](?bexpr)) {
+ auto bexpr_1 = check_expr(fcx, bexpr);
+ auto bexpr_t = expr_ty(bexpr_1);
+
+ let vec[field] base_fields = vec();
+
+ alt (bexpr_t.struct) {
+ case (ty.ty_rec(?flds)) {
+ base_fields = flds;
+ }
+ case (_) {
+ fcx.ccx.sess.span_err
+ (expr.span,
+ "record update non-record base");
+ }
+ }
+
+ ann = ast.ann_type(bexpr_t);
+
+ for (ty.field f in fields_t) {
+ auto found = false;
+ for (ty.field bf in base_fields) {
+ if (_str.eq(f.ident, bf.ident)) {
+ demand(fcx, expr.span, f.ty, bf.ty);
+ found = true;
+ }
+ }
+ if (!found) {
+ fcx.ccx.sess.span_err
+ (expr.span,
+ "unknown field in record update: "
+ + f.ident);
+ }
+ }
+ }
+ }
+
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_rec(fields_1, ann));
+ ast.expr_rec(fields_1, base_1, ann));
}
case (ast.expr_field(?base, ?field, _)) {
@@ -1537,7 +1985,8 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
"bad index on obj");
}
auto meth = methods.(ix);
- auto t = plain_ty(ty.ty_fn(meth.inputs, meth.output));
+ auto t = plain_ty(ty.ty_fn(meth.proto,
+ meth.inputs, meth.output));
auto ann = ast.ann_type(t);
ret @fold.respan[ast.expr_](expr.span,
ast.expr_field(base_1,
@@ -1664,43 +2113,6 @@ fn check_stmt(&@fn_ctxt fcx, &@ast.stmt stmt) -> @ast.stmt {
ret stmt;
}
- case (ast.stmt_ret(?expr_opt)) {
- alt (expr_opt) {
- case (none[@ast.expr]) {
- auto nil = plain_ty(ty.ty_nil);
- if (!are_compatible(fcx, fcx.ret_ty, nil)) {
- fcx.ccx.sess.err("ret; in function "
- + "returning non-nil");
- }
-
- ret stmt;
- }
-
- case (some[@ast.expr](?expr)) {
- auto expr_0 = check_expr(fcx, expr);
- auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
- ret @fold.respan[ast.stmt_](stmt.span,
- ast.stmt_ret(some(expr_1)));
- }
- }
- }
-
- case (ast.stmt_log(?expr)) {
- auto expr_t = check_expr(fcx, expr);
- ret @fold.respan[ast.stmt_](stmt.span, ast.stmt_log(expr_t));
- }
-
- case (ast.stmt_check_expr(?expr)) {
- auto expr_t = check_expr(fcx, expr);
- demand(fcx, expr.span, plain_ty(ty.ty_bool), expr_ty(expr_t));
- ret @fold.respan[ast.stmt_](stmt.span,
- ast.stmt_check_expr(expr_t));
- }
-
- case (ast.stmt_fail) {
- ret stmt;
- }
-
case (ast.stmt_expr(?expr)) {
auto expr_t = check_expr(fcx, expr);
ret @fold.respan[ast.stmt_](stmt.span, ast.stmt_expr(expr_t));
@@ -1744,9 +2156,8 @@ fn check_const(&@crate_ctxt ccx, &span sp, ast.ident ident, @ast.ty t,
ret @fold.respan[ast.item_](sp, item);
}
-fn check_fn(&@crate_ctxt ccx, ast.effect effect,
- bool is_iter, vec[ast.arg] inputs,
- @ast.ty output, &ast.block body) -> ast._fn {
+fn check_fn(&@crate_ctxt ccx, &ast.fn_decl decl, ast.proto proto,
+ &ast.block body) -> ast._fn {
auto local_ty_table = @common.new_def_hash[@ty.t]();
// FIXME: duplicate work: the item annotation already has the arg types
@@ -1760,12 +2171,12 @@ fn check_fn(&@crate_ctxt ccx, ast.effect effect,
}
// Store the type of each argument in the table.
- for (ast.arg arg in inputs) {
+ for (ast.arg arg in decl.inputs) {
auto input_ty = ast_ty_to_ty_crate(ccx, arg.ty);
local_ty_table.insert(arg.id, input_ty);
}
- let @fn_ctxt fcx = @rec(ret_ty = ast_ty_to_ty_crate(ccx, output),
+ let @fn_ctxt fcx = @rec(ret_ty = ast_ty_to_ty_crate(ccx, decl.output),
locals = local_ty_table,
ccx = ccx);
@@ -1773,8 +2184,9 @@ fn check_fn(&@crate_ctxt ccx, ast.effect effect,
auto block_t = check_block(fcx, body);
auto block_wb = writeback(fcx, block_t);
- auto fn_t = rec(effect=effect, is_iter=is_iter,
- inputs=inputs, output=output, body=block_wb);
+ auto fn_t = rec(decl=decl,
+ proto=proto,
+ body=block_wb);
ret fn_t;
}
@@ -1787,13 +2199,13 @@ fn check_item_fn(&@crate_ctxt ccx, &span sp, ast.ident ident, &ast._fn f,
// again here, we can extract them.
let vec[arg] inputs = vec();
- for (ast.arg arg in f.inputs) {
+ for (ast.arg arg in f.decl.inputs) {
auto input_ty = ast_ty_to_ty_crate(ccx, arg.ty);
inputs += vec(rec(mode=arg.mode, ty=input_ty));
}
- auto output_ty = ast_ty_to_ty_crate(ccx, f.output);
- auto fn_sty = ty.ty_fn(inputs, output_ty);
+ auto output_ty = ast_ty_to_ty_crate(ccx, f.decl.output);
+ auto fn_sty = ty.ty_fn(f.proto, inputs, output_ty);
auto fn_ann = ast.ann_type(plain_ty(fn_sty));
auto item = ast.item_fn(ident, f, ty_params, id, fn_ann);
@@ -1825,7 +2237,7 @@ fn check_crate(session.session sess, @ast.crate crate) -> @ast.crate {
auto fld = fold.new_identity_fold[@crate_ctxt]();
fld = @rec(update_env_for_item = bind update_obj_fields(_, _),
- fold_fn = bind check_fn(_,_,_,_,_,_),
+ fold_fn = bind check_fn(_,_,_,_),
fold_item_fn = bind check_item_fn(_,_,_,_,_,_,_)
with *fld);
ret fold.fold_crate[@crate_ctxt](ccx, fld, result._0);