aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Anderson <[email protected]>2011-03-07 21:21:01 -0500
committerBrian Anderson <[email protected]>2011-03-07 21:21:01 -0500
commit9fc4db6b89213afdf45c02fc2bd2be62b0ddc40c (patch)
tree6c84574116273f91cbe89abd256b9f809adf97de
parentAllow the else part of an expr_if to be either expr_if or expr_block (diff)
parentrustc: Cast the LLVM representations of tag types when constructing boxes. Un... (diff)
downloadrust-9fc4db6b89213afdf45c02fc2bd2be62b0ddc40c.tar.xz
rust-9fc4db6b89213afdf45c02fc2bd2be62b0ddc40c.zip
Merge branch 'master' into recursive-elseif
Conflicts: src/Makefile src/comp/front/ast.rs src/comp/front/parser.rs src/comp/middle/fold.rs src/comp/middle/trans.rs
-rw-r--r--AUTHORS.txt1
-rw-r--r--doc/rust.texi48
-rw-r--r--src/Makefile229
-rw-r--r--src/README1
-rw-r--r--src/boot/be/abi.ml34
-rw-r--r--src/boot/be/elf.ml100
-rw-r--r--src/boot/be/x86.ml2
-rw-r--r--src/boot/driver/lib.ml4
-rw-r--r--src/boot/driver/main.ml29
-rw-r--r--src/boot/fe/cexp.ml1
-rw-r--r--src/boot/me/trans.ml4
-rw-r--r--src/boot/me/typestate.ml357
-rw-r--r--src/boot/util/common.ml1
-rw-r--r--src/comp/back/abi.rs10
-rw-r--r--src/comp/back/x86.rs101
-rw-r--r--src/comp/driver/rustc.rs102
-rw-r--r--src/comp/front/ast.rs179
-rw-r--r--src/comp/front/eval.rs436
-rw-r--r--src/comp/front/extfmt.rs553
-rw-r--r--src/comp/front/lexer.rs24
-rw-r--r--src/comp/front/parser.rs1052
-rw-r--r--src/comp/front/pretty.rs87
-rw-r--r--src/comp/lib/llvm.rs94
-rw-r--r--src/comp/middle/fold.rs407
-rw-r--r--src/comp/middle/resolve.rs142
-rw-r--r--src/comp/middle/trans.rs3331
-rw-r--r--src/comp/middle/ty.rs679
-rw-r--r--src/comp/middle/typeck.rs844
-rw-r--r--src/comp/pretty/pp.rs207
-rw-r--r--src/comp/pretty/pprust.rs708
-rw-r--r--src/comp/rustc.rc9
-rw-r--r--src/comp/util/common.rs4
-rw-r--r--src/lib/_str.rs21
-rw-r--r--src/lib/io.rs (renamed from src/lib/_io.rs)57
-rw-r--r--src/lib/sha1.rs284
-rw-r--r--src/lib/std.rc5
-rw-r--r--src/rt/memory_region.cpp5
-rw-r--r--src/rt/rust.cpp8
-rw-r--r--src/rt/rust_crate_cache.cpp5
-rw-r--r--src/rt/rust_internal.h6
-rw-r--r--src/rt/rust_task.cpp56
-rw-r--r--src/rt/rust_task.h1
-rw-r--r--src/rt/rust_upcall.cpp15
-rw-r--r--src/rt/test/rust_test_runtime.cpp1
-rw-r--r--src/test/compile-fail/reserved-dec.rs5
-rw-r--r--src/test/compile-fail/reserved-f128.rs5
-rw-r--r--src/test/compile-fail/reserved-f16.rs5
-rw-r--r--src/test/compile-fail/reserved-f80.rs5
-rw-r--r--src/test/compile-fail/reserved-m128.rs5
-rw-r--r--src/test/compile-fail/reserved-m32.rs5
-rw-r--r--src/test/compile-fail/reserved-m64.rs5
-rw-r--r--src/test/compile-fail/tail-non-call.rs10
-rw-r--r--src/test/compile-fail/tail-typeck.rs13
-rw-r--r--src/test/run-pass/alt-pattern-lit.rs17
-rw-r--r--src/test/run-pass/arith-unsigned.rs24
-rw-r--r--src/test/run-pass/generic-box.rs8
-rw-r--r--src/test/run-pass/generic-fn-box.rs9
-rw-r--r--src/test/run-pass/generic-recursive-tag.rs7
-rw-r--r--src/test/run-pass/generic-tag.rs2
-rw-r--r--src/test/run-pass/generic-type-synonym.rs2
-rw-r--r--src/test/run-pass/lib-io.rs6
-rw-r--r--src/test/run-pass/lib-sha1.rs115
-rw-r--r--src/test/run-pass/native2.rs20
-rw-r--r--src/test/run-pass/path.rs8
-rw-r--r--src/test/run-pass/syntax-extension-fmt.rs15
-rw-r--r--src/test/run-pass/typestate-cfg-nesting.rs26
66 files changed, 8198 insertions, 2363 deletions
diff --git a/AUTHORS.txt b/AUTHORS.txt
index ceaa0f82..a6e9935d 100644
--- a/AUTHORS.txt
+++ b/AUTHORS.txt
@@ -15,6 +15,7 @@ Jason Orendorff <[email protected]>
Jeff Balogh <[email protected]>
Jeff Mulzelaar <[email protected]>
Jeffrey Yasskin <[email protected]>
+Marijn Haverbeke <[email protected]>
Matt Brubeck <[email protected]>
Michael Bebenita <[email protected]>
Or Brostovski <[email protected]>
diff --git a/doc/rust.texi b/doc/rust.texi
index adf84a12..48a639dd 100644
--- a/doc/rust.texi
+++ b/doc/rust.texi
@@ -592,10 +592,12 @@ or interrupted by ignored characters.
Most tokens in Rust follow rules similar to the C family.
-Most tokens (including identifiers, whitespace, keywords, operators and
-structural symbols) are drawn from the ASCII-compatible range of
-Unicode. String and character literals, however, may include the full range of
-Unicode characters.
+Most tokens (including whitespace, keywords, operators and structural symbols)
+are drawn from the ASCII-compatible range of Unicode. Identifiers are drawn
+from Unicode characters specified by the @code{XID_start} and
+@code{XID_continue} rules given by UAX #31@footnote{Unicode Standard Annex
+#31: Unicode Identifier and Pattern Syntax}. String and character literals may
+include the full range of Unicode characters.
@emph{TODO: formalize this section much more}.
@@ -638,18 +640,22 @@ token or a syntactic extension token. Multi-line comments may be nested.
@c * Ref.Lex.Ident:: Identifier tokens.
@cindex Identifier token
-Identifiers follow the pattern of C identifiers: they begin with a
-@emph{letter} or @emph{underscore}, and continue with any combination of
-@emph{letters}, @emph{decimal digits} and underscores, and must not be equal
-to any keyword or reserved token. @xref{Ref.Lex.Key}. @xref{Ref.Lex.Res}.
+Identifiers follow the rules given by Unicode Standard Annex #31, in the form
+closed under NFKC normalization, @emph{excluding} those tokens that are
+otherwise defined as keywords or reserved
+tokens. @xref{Ref.Lex.Key}. @xref{Ref.Lex.Res}.
-A @emph{letter} is a Unicode character in the ranges U+0061-U+007A and
-U+0041-U+005A (@code{'a'}-@code{'z'} and @code{'A'}-@code{'Z'}).
+That is: an identifier starts with any character having derived property
+@code{XID_Start} and continues with zero or more characters having derived
+property @code{XID_Continue}; and such an identifier is NFKC-normalized during
+lexing, such that all subsequent comparison of identifiers is performed on the
+NFKC-normalized forms.
-An @dfn{underscore} is the character U+005F ('_').
+@emph{TODO: define relationship between Unicode and Rust versions}.
-A @dfn{decimal digit} is a character in the range U+0030-U+0039
-(@code{'0'}-@code{'9'}).
+@footnote{This identifier syntax is a superset of the identifier syntaxes of C
+and Java, and is modeled on Python PEP #3131, which formed the definition of
+identifiers in Python 3.0 and later.}
@node Ref.Lex.Key
@subsection Ref.Lex.Key
@@ -1984,22 +1990,22 @@ module system).
An example of a @code{tag} item and its use:
@example
tag animal @{
- dog();
- cat();
+ dog;
+ cat;
@}
-let animal a = dog();
-a = cat();
+let animal a = dog;
+a = cat;
@end example
An example of a @emph{recursive} @code{tag} item and its use:
@example
tag list[T] @{
- nil();
+ nil;
cons(T, @@list[T]);
@}
-let list[int] a = cons(7, cons(13, nil()));
+let list[int] a = cons(7, cons(13, nil));
@end example
@@ -3395,9 +3401,9 @@ control enters the block.
An example of a pattern @code{alt} statement:
@example
-type list[X] = tag(nil(), cons(X, @@list[X]));
+type list[X] = tag(nil, cons(X, @@list[X]));
-let list[int] x = cons(10, cons(11, nil()));
+let list[int] x = cons(10, cons(11, nil));
alt (x) @{
case (cons(a, cons(b, _))) @{
diff --git a/src/Makefile b/src/Makefile
index ac7dfcbb..8855a2d1 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -32,6 +32,19 @@ CFG_RUSTC_FLAGS := -nowarn
# embedded into the executable, so use a no-op command.
DSYMUTIL := true
+ifeq ($(CFG_OSTYPE), FreeBSD)
+ CFG_RUNTIME := librustrt.so
+ CFG_STDLIB := libstd.so
+ CFG_GCC_CFLAGS += -fPIC -march=i686 -I/usr/local/include
+ CFG_GCC_LINK_FLAGS += -shared -fPIC -lpthread -lrt
+ ifeq ($(CFG_CPUTYPE), x86_64)
+ CFG_GCC_CFLAGS += -m32
+ CFG_GCC_LINK_FLAGS += -m32
+ endif
+ CFG_NATIVE := 1
+ CFG_UNIXY := 1
+endif
+
ifeq ($(CFG_OSTYPE), Linux)
CFG_RUNTIME := librustrt.so
CFG_STDLIB := libstd.so
@@ -43,13 +56,6 @@ ifeq ($(CFG_OSTYPE), Linux)
endif
CFG_NATIVE := 1
CFG_UNIXY := 1
- CFG_VALGRIND := $(shell which valgrind)
- ifdef CFG_VALGRIND
- CFG_VALGRIND += --leak-check=full \
- --error-exitcode=1 \
- --quiet --vex-iropt-level=0 \
- --suppressions=etc/x86.supp
- endif
endif
ifeq ($(CFG_OSTYPE), Darwin)
@@ -117,6 +123,13 @@ ifdef CFG_UNIXY
CFG_GCC_LINK_FLAGS += -m32
endif
endif
+ CFG_VALGRIND := $(shell which valgrind)
+ ifdef CFG_VALGRIND
+ CFG_VALGRIND += --leak-check=full \
+ --error-exitcode=1 \
+ --quiet --vex-iropt-level=0 \
+ --suppressions=etc/x86.supp
+ endif
endif
ifdef CFG_GCC
@@ -388,6 +401,7 @@ TASK_XFAILS := test/run-pass/task-comm-8.rs \
TEST_XFAILS_BOOT := $(TASK_XFAILS) \
$(NOMINAL_TAG_XFAILS) \
$(CONST_TAG_XFAILS) \
+ test/run-pass/arith-unsigned.rs \
test/run-pass/child-outlives-parent.rs \
test/run-pass/clone-with-exterior.rs \
test/run-pass/constrained-type.rs \
@@ -395,7 +409,7 @@ TEST_XFAILS_BOOT := $(TASK_XFAILS) \
test/run-pass/obj-as.rs \
test/run-pass/vec-slice.rs \
test/run-pass/fn-lval.rs \
- test/run-pass/generic-recursive-tag.rs \
+ test/run-pass/generic-fn-box.rs \
test/run-pass/generic-tup.rs \
test/run-pass/iter-ret.rs \
test/run-pass/lib-io.rs \
@@ -414,101 +428,104 @@ TEST_XFAILS_BOOT := $(TASK_XFAILS) \
test/compile-fail/bad-recv.rs \
test/compile-fail/bad-send.rs \
test/compile-fail/infinite-vec-type-recursion.rs \
+ test/compile-fail/tail-non-call.rs \
test/compile-fail/writing-through-read-alias.rs
-# Same strategy here for the time being: just list the ones that
-# work and assume the others don't. Invert this when we're closer
-# to actually bootstrapping.
-
-TEST_XFAILS_RUSTC := $(filter-out \
- $(addprefix test/run-pass/, \
- alt-path.rs \
- alt-pattern-simple.rs \
- alt-tag.rs \
- arith-0.rs \
- arith-1.rs \
- arith-2.rs \
- autoderef-full-lval.rs \
- bind-exterior.rs \
- bind-interior.rs \
- bind-thunk.rs \
- bind-trivial.rs \
- bitwise.rs \
- bool-not.rs \
- box.rs \
- box-in-tup.rs \
- cast.rs \
- char.rs \
- complex.rs \
- const.rs \
- dead-code-one-arm-if.rs \
- deep.rs \
- deref.rs \
- div-mod.rs \
- drop-bind-thunk-args.rs \
- drop-on-ret.rs \
- else-if.rs \
- fact.rs \
- fn-lval.rs \
- fun-call-variants.rs \
- fun-indirect-call.rs \
- generic-fn.rs \
- generic-fn-infer.rs \
- generic-drop-glue.rs \
- generic-tup.rs \
- generic-type.rs \
- hello.rs \
- int.rs \
- i32-sub.rs \
- i8-incr.rs \
- import2.rs \
- import3.rs \
- import4.rs \
- import5.rs \
- import6.rs \
- import7.rs \
- import8.rs \
- item-name-overload.rs \
- large-records.rs \
- lazy-init.rs \
- lazy-and-or.rs \
- leak-box-as-tydesc.rs \
- linear-for-loop.rs \
- multiline-comment.rs \
- mutual-recursion-group.rs \
- obj-drop.rs \
- obj-recursion.rs \
- obj-with-vec.rs \
- operator-associativity.rs \
- opeq.rs \
- output-slot-variants.rs \
- over-constrained-vregs.rs \
- readalias.rs \
- rec.rs \
- rec-auto.rs \
- rec-tup.rs \
- return-nil.rs \
- simple-obj.rs \
- stateful-obj.rs \
- str-idx.rs \
- type-in-nested-module.rs \
- type-param.rs \
- tup.rs \
- u32-decr.rs \
- u8-incr.rs \
- u8-incr-decr.rs \
- uint.rs \
- unit.rs \
- use.rs \
- tag.rs \
- vec.rs \
- vec-drop.rs \
- vec-in-tup.rs \
- vec-late-init.rs \
- while-and-do-while.rs \
- while-flow-graph.rs \
- writealias.rs \
+TEST_XFAILS_RUSTC := $(addprefix test/run-pass/, \
+ acyclic-unwind.rs \
+ alt-pattern-drop.rs \
+ alt-type-simple.rs \
+ append-units.rs \
+ basic-1.rs \
+ basic-2.rs \
+ basic.rs \
+ bind-obj-ctor.rs \
+ child-outlives-parent.rs \
+ clone-with-exterior.rs \
+ comm.rs \
+ constrained-type.rs \
+ destructor-ordering.rs \
+ drop-parametric-closure-with-bound-box.rs \
+ export-non-interference.rs \
+ foreach-nested-2.rs \
+ foreach-nested.rs \
+ foreach-put-structured.rs \
+ foreach-simple-outer-slot.rs \
+ generic-fn-twice.rs \
+ generic-iter-frame.rs \
+ generic-recursive-tag.rs \
+ generic-tag-alt.rs \
+ generic-tag-values.rs \
+ iter-range.rs \
+ iter-ret.rs \
+ lazychan.rs \
+ lib-bitv.rs \
+ lib-deque.rs \
+ lib-int.rs \
+ lib-io.rs \
+ lib-map.rs \
+ lib-rand.rs \
+ lib-sha1.rs \
+ lib-sort.rs \
+ lib-str.rs \
+ lib-task.rs \
+ lib-uint.rs \
+ lib-vec-str-conversions.rs \
+ lib-vec.rs \
+ many.rs \
+ mlist-cycle.rs \
+ mlist.rs \
+ mutable-alias-vec.rs \
+ obj-as.rs \
+ obj-dtor.rs \
+ obj-return-polytypes.rs \
+ pred.rs \
+ preempt.rs \
+ rt-circular-buffer.rs \
+ size-and-align.rs \
+ spawn-fn.rs \
+ spawn-module-qualified.rs \
+ spawn.rs \
+ str-append.rs \
+ syntax-extension-fmt.rs \
+ syntax-extension-shell.rs \
+ task-comm-0.rs \
+ task-comm-1.rs \
+ task-comm-10.rs \
+ task-comm-11.rs \
+ task-comm-12.rs \
+ task-comm-13-thread.rs \
+ task-comm-13.rs \
+ task-comm-15.rs \
+ task-comm-2.rs \
+ task-comm-3.rs \
+ task-comm-4.rs \
+ task-comm-5.rs \
+ task-comm-6.rs \
+ task-comm-7.rs \
+ task-comm-8.rs \
+ task-comm-9.rs \
+ task-comm.rs \
+ task-killjoin.rs \
+ task-life-0.rs \
+ threads.rs \
+ type-sizes.rs \
+ typestate-cfg-nesting.rs \
+ use-import-export.rs \
+ user.rs \
+ utf8.rs \
+ vec-alloc-append.rs \
+ vec-append.rs \
+ vec-slice.rs \
+ while-prelude-drop.rs \
+ while-with-break.rs \
+ yield.rs \
+ yield2.rs \
+ multi.rc \
+ native-mod.rc \
+ native.rc \
) \
+ $(filter-out \
$(addprefix test/compile-fail/, \
alt-tag-nullary.rs \
alt-tag-unary.rs \
@@ -517,6 +534,7 @@ TEST_XFAILS_RUSTC := $(filter-out \
bad-expr-path.rs \
bad-expr-path2.rs \
bogus-tag.rs \
+ fru-extra-field.rs \
import.rs \
import2.rs \
import3.rs \
@@ -526,11 +544,20 @@ TEST_XFAILS_RUSTC := $(filter-out \
multiline-comment-line-tracking.rs \
output-type-mismatch.rs \
rec-missing-fields.rs \
+ reserved-dec.rs \
+ reserved-f128.rs \
+ reserved-f16.rs \
+ reserved-f80.rs \
+ reserved-m128.rs \
+ reserved-m32.rs \
+ reserved-m64.rs \
+ tail-non-call.rs \
+ tail-typeck.rs \
type-shadow.rs \
while-type-error.rs \
wrong-ret-type.rs \
), \
- $(wildcard test/*/*.rs test/*/*.rc))
+ $(wildcard test/*fail/*.rs test/*fail/*.rc))
ifdef MINGW_CROSS
diff --git a/src/README b/src/README
index 3618ee18..05d701bd 100644
--- a/src/README
+++ b/src/README
@@ -8,7 +8,6 @@ boot/fe - Front end (lexer, parser, AST)
boot/me - Middle end (resolve, check, layout, trans)
boot/be - Back end (IL, RA, insns, asm, objfiles)
boot/util - Ubiquitous helpers
-boot/llvm - LLVM-based alternative back end
boot/driver - Compiler driver
comp/ The self-hosted compiler ("rustc": incomplete)
diff --git a/src/boot/be/abi.ml b/src/boot/be/abi.ml
index 035d1f05..89e308bf 100644
--- a/src/boot/be/abi.ml
+++ b/src/boot/be/abi.ml
@@ -110,23 +110,33 @@ let indirect_args_elt_closure = 0;;
(* Current worst case is by vec grow glue *)
let worst_case_glue_call_args = 8;;
+(*
+ * ABI tags used to inform the runtime which sort of frame to set up for new
+ * spawned functions. FIXME: There is almost certainly a better abstraction to
+ * use.
+ *)
+let abi_x86_rustboot_cdecl = 1;;
+let abi_x86_rustc_fastcall = 2;;
+
type abi =
- {
- abi_word_sz: int64;
- abi_word_bits: Il.bits;
- abi_word_ty: Common.ty_mach;
+ {
+ abi_word_sz: int64;
+ abi_word_bits: Il.bits;
+ abi_word_ty: Common.ty_mach;
+
+ abi_tag: int;
- abi_has_pcrel_data: bool;
- abi_has_pcrel_code: bool;
+ abi_has_pcrel_data: bool;
+ abi_has_pcrel_code: bool;
- abi_n_hardregs: int;
- abi_str_of_hardreg: (int -> string);
+ abi_n_hardregs: int;
+ abi_str_of_hardreg: (int -> string);
- abi_emit_target_specific: (Il.emitter -> Il.quad -> unit);
- abi_constrain_vregs: (Il.quad -> (Il.vreg,Bits.t) Hashtbl.t -> unit);
+ abi_emit_target_specific: (Il.emitter -> Il.quad -> unit);
+ abi_constrain_vregs: (Il.quad -> (Il.vreg,Bits.t) Hashtbl.t -> unit);
- abi_emit_fn_prologue: (Il.emitter
- -> Common.size (* framesz *)
+ abi_emit_fn_prologue: (Il.emitter
+ -> Common.size (* framesz *)
-> Common.size (* callsz *)
-> Common.nabi
-> Common.fixup (* grow_task *)
diff --git a/src/boot/be/elf.ml b/src/boot/be/elf.ml
index 406508e4..99b68042 100644
--- a/src/boot/be/elf.ml
+++ b/src/boot/be/elf.ml
@@ -44,7 +44,7 @@ type ei_data =
;;
-let elf_identification ei_class ei_data =
+let elf_identification sess ei_class ei_data =
SEQ
[|
STRING "\x7fELF";
@@ -58,9 +58,16 @@ let elf_identification ei_class ei_data =
ELFDATANONE -> 0
| ELFDATA2LSB -> 1
| ELFDATA2MSB -> 2);
+
1; (* EI_VERSION = EV_CURRENT *)
- 0; (* EI_PAD #7 *)
- 0; (* EI_PAD #8 *)
+
+ (* EI_OSABI *)
+ (match sess.Session.sess_targ with
+ FreeBSD_x86_elf -> 9
+ | _ -> 0);
+
+ 0; (* EI_ABIVERSION *)
+
0; (* EI_PAD #9 *)
0; (* EI_PAD #A *)
0; (* EI_PAD #B *)
@@ -117,7 +124,7 @@ let elf32_header
in
DEF
(elf_header_fixup,
- SEQ [| elf_identification ELFCLASS32 ei_data;
+ SEQ [| elf_identification sess ELFCLASS32 ei_data;
WORD (TY_u16, (IMM (match e_type with
ET_NONE -> 0L
| ET_REL -> 1L
@@ -480,6 +487,7 @@ let elf32_linux_x86_file
~(entry_name:string)
~(text_frags:(string option, frag) Hashtbl.t)
~(data_frags:(string option, frag) Hashtbl.t)
+ ~(bss_frags:(string option, frag) Hashtbl.t)
~(rodata_frags:(string option, frag) Hashtbl.t)
~(required_fixups:(string, fixup) Hashtbl.t)
~(dwarf:Dwarf.debug_records)
@@ -644,7 +652,7 @@ let elf32_linux_x86_file
(* let gotpltndx = 8L in *) (* Section index of .got.plt *)
(* let relapltndx = 9L in *) (* Section index of .rela.plt *)
let datandx = 10L in (* Section index of .data *)
- (* let bssndx = 11L in *) (* Section index of .bss *)
+ let bssndx = 11L in (* Section index of .bss *)
(* let dynamicndx = 12L in *) (* Section index of .dynamic *)
let shstrtabndx = 13L in (* Section index of .shstrtab *)
@@ -991,6 +999,22 @@ let elf32_linux_x86_file
(strtab_entry, symtab_entry)
in
+ let bss_sym name st_bind fixup =
+ let name_fixup = new_fixup ("bss symbol name fixup: '" ^ name ^ "'") in
+ let strtab_entry = DEF (name_fixup, ZSTRING name) in
+ let symtab_entry =
+ symbol
+ ~string_table_fixup: dynstr_section_fixup
+ ~name_string_fixup: name_fixup
+ ~sym_target_fixup: (Some fixup)
+ ~st_bind
+ ~st_type: STT_OBJECT
+ ~st_shndx: bssndx
+ in
+ incr n_syms;
+ (strtab_entry, symtab_entry)
+ in
+
let rodata_sym name st_bind fixup =
let name_fixup = new_fixup ("rodata symbol name fixup: '" ^ name ^ "'") in
let strtab_entry = DEF (name_fixup, ZSTRING name) in
@@ -1212,6 +1236,12 @@ let elf32_linux_x86_file
Hashtbl.fold (frags_of_symbol data_sym STB_GLOBAL) data_frags ([],[],[])
in
+ let (bss_strtab_frags,
+ bss_symtab_frags,
+ bss_body_frags) =
+ Hashtbl.fold (frags_of_symbol bss_sym STB_GLOBAL) bss_frags ([],[],[])
+ in
+
let (_,
require_strtab_frags,
require_symtab_frags,
@@ -1277,7 +1307,8 @@ let elf32_linux_x86_file
global_text_symtab_frags @
local_text_symtab_frags @
rodata_symtab_frags @
- data_symtab_frags))
+ data_symtab_frags @
+ bss_symtab_frags))
in
let dynstr_frags = (null_strtab_frag ::
@@ -1286,11 +1317,16 @@ let elf32_linux_x86_file
local_text_strtab_frags @
rodata_strtab_frags @
data_strtab_frags @
+ bss_strtab_frags @
(Array.to_list dynamic_needed_strtab_frags)))
in
let interp_section =
- DEF (interp_section_fixup, ZSTRING "/lib/ld-linux.so.2")
+
+ DEF (interp_section_fixup, ZSTRING
+ (if sess.Session.sess_targ = FreeBSD_x86_elf
+ then "/libexec/ld-elf.so.1"
+ else "/lib/ld-linux.so.2"))
in
let text_section =
@@ -1307,7 +1343,7 @@ let elf32_linux_x86_file
in
let bss_section =
DEF (bss_section_fixup,
- SEQ [| |])
+ SEQ (Array.of_list bss_body_frags))
in
let dynsym_section =
DEF (dynsym_section_fixup,
@@ -1486,6 +1522,7 @@ let emit_file
let text_frags = Hashtbl.create 4 in
let rodata_frags = Hashtbl.create 4 in
let data_frags = Hashtbl.create 4 in
+ let bss_frags = Hashtbl.create 4 in
let required_fixups = Hashtbl.create 4 in
(*
@@ -1584,7 +1621,9 @@ let emit_file
let needed_libs =
[|
- "libc.so.6";
+ if sess.Session.sess_targ = FreeBSD_x86_elf
+ then "libc.so.7"
+ else "libc.so.6";
"librustrt.so"
|]
in
@@ -1604,6 +1643,27 @@ let emit_file
htab_put text_frags None code;
htab_put rodata_frags None data;
+ if sess.Session.sess_targ = FreeBSD_x86_elf
+ then
+ (*
+ * FreeBSD wants some extra symbols in .bss so its libc can fill
+ * them in, I think.
+ *)
+ List.iter
+ (fun x -> htab_put bss_frags (Some x) (WORD (TY_u32, (IMM 0L))))
+ [
+ "environ";
+ "optind";
+ "optarg";
+ "_CurrentRuneLocale";
+ "__stack_chk_guard";
+ "__mb_sb_limit";
+ "__isthreaded";
+ "__stdinp";
+ "__stderrp";
+ "__stdoutp";
+ ];
+
Hashtbl.iter
begin
fun _ tab ->
@@ -1616,6 +1676,7 @@ let emit_file
end
sem.Semant.ctxt_native_required
in
+
let all_frags =
elf32_linux_x86_file
~sess
@@ -1623,6 +1684,7 @@ let emit_file
~entry_name: "_start"
~text_frags
~data_frags
+ ~bss_frags
~dwarf
~sem
~rodata_frags
@@ -1640,16 +1702,16 @@ let sniff
: asm_reader option =
try
let stat = Unix.stat filename in
- if (stat.Unix.st_kind = Unix.S_REG) &&
- (stat.Unix.st_size > 4)
- then
- let ar = new_asm_reader sess filename in
- let _ = log sess "sniffing ELF file" in
- if (ar.asm_get_zstr_padded 4) = elf_magic
- then (ar.asm_seek 0; Some ar)
- else None
- else
- None
+ if (stat.Unix.st_kind = Unix.S_REG) &&
+ (stat.Unix.st_size > 4)
+ then
+ let ar = new_asm_reader sess filename in
+ let _ = log sess "sniffing ELF file" in
+ if (ar.asm_get_zstr_padded 4) = elf_magic
+ then (ar.asm_seek 0; Some ar)
+ else None
+ else
+ None
with
_ -> None
;;
diff --git a/src/boot/be/x86.ml b/src/boot/be/x86.ml
index 30b49ed1..49b660be 100644
--- a/src/boot/be/x86.ml
+++ b/src/boot/be/x86.ml
@@ -1851,6 +1851,8 @@ let (abi:Abi.abi) =
Abi.abi_word_bits = word_bits;
Abi.abi_word_ty = word_ty;
+ Abi.abi_tag = Abi.abi_x86_rustboot_cdecl;
+
Abi.abi_has_pcrel_data = false;
Abi.abi_has_pcrel_code = true;
diff --git a/src/boot/driver/lib.ml b/src/boot/driver/lib.ml
index a4769e83..00b1b834 100644
--- a/src/boot/driver/lib.ml
+++ b/src/boot/driver/lib.ml
@@ -249,6 +249,7 @@ let get_ar
Win32_x86_pe -> Pe.sniff
| MacOS_x86_macho -> Macho.sniff
| Linux_x86_elf -> Elf.sniff
+ | FreeBSD_x86_elf -> Elf.sniff
in
sniff sess filename
end
@@ -270,6 +271,7 @@ let get_sects
Win32_x86_pe -> Pe.get_sections
| MacOS_x86_macho -> Macho.get_sections
| Linux_x86_elf -> Elf.get_sections
+ | FreeBSD_x86_elf -> Elf.get_sections
in
Some (ar, (get_sections sess ar))
end
@@ -350,6 +352,7 @@ let get_mod
Win32_x86_pe -> ".dll"
| MacOS_x86_macho -> ".dylib"
| Linux_x86_elf -> ".so"
+ | FreeBSD_x86_elf -> ".so"
in
let rec meta_matches i f_meta =
if i >= (Array.length meta)
@@ -447,6 +450,7 @@ let infer_lib_name
Win32_x86_pe -> ident ^ ".dll"
| MacOS_x86_macho -> "lib" ^ ident ^ ".dylib"
| Linux_x86_elf -> "lib" ^ ident ^ ".so"
+ | FreeBSD_x86_elf -> "lib" ^ ident ^ ".so"
;;
diff --git a/src/boot/driver/main.ml b/src/boot/driver/main.ml
index 2bbc832b..9705f1ee 100644
--- a/src/boot/driver/main.ml
+++ b/src/boot/driver/main.ml
@@ -8,12 +8,21 @@ let _ =
let (targ:Common.target) =
match Sys.os_type with
- "Unix" when Unix.system "test `uname -s` = 'Darwin'" = Unix.WEXITED 0 ->
- MacOS_x86_macho
- | "Unix" -> Linux_x86_elf
- | "Win32" -> Win32_x86_pe
+
+ | "Win32"
| "Cygwin" -> Win32_x86_pe
- | _ -> Linux_x86_elf
+
+ | "Unix"
+ when Unix.system "test `uname -s` = 'Linux'" = Unix.WEXITED 0 ->
+ Linux_x86_elf
+ | "Unix"
+ when Unix.system "test `uname -s` = 'Darwin'" = Unix.WEXITED 0 ->
+ MacOS_x86_macho
+ | "Unix"
+ when Unix.system "test `uname -s` = 'FreeBSD'" = Unix.WEXITED 0 ->
+ FreeBSD_x86_elf
+ | _ ->
+ Linux_x86_elf
;;
let (abi:Abi.abi) = X86.abi;;
@@ -96,6 +105,7 @@ let default_output_filename (sess:Session.sess) : filename option =
else
base ^ (match sess.Session.sess_targ with
Linux_x86_elf -> ""
+ | FreeBSD_x86_elf -> ""
| MacOS_x86_macho -> ""
| Win32_x86_pe -> ".exe")
in
@@ -144,16 +154,21 @@ let flag f opt desc =
let argspecs =
[
- ("-t", Arg.Symbol (["linux-x86-elf"; "win32-x86-pe"; "macos-x86-macho"],
+ ("-t", Arg.Symbol (["linux-x86-elf";
+ "win32-x86-pe";
+ "macos-x86-macho";
+ "freebsd-x86-elf"],
fun s -> (sess.Session.sess_targ <-
(match s with
"win32-x86-pe" -> Win32_x86_pe
| "macos-x86-macho" -> MacOS_x86_macho
+ | "freebsd-x86-elf" -> FreeBSD_x86_elf
| _ -> Linux_x86_elf))),
(" target (default: " ^ (match sess.Session.sess_targ with
Win32_x86_pe -> "win32-x86-pe"
| Linux_x86_elf -> "linux-x86-elf"
| MacOS_x86_macho -> "macos-x86-macho"
+ | FreeBSD_x86_elf -> "freebsd-x86-elf"
) ^ ")"));
("-o", Arg.String (fun s -> sess.Session.sess_out <- Some s),
"file to output (default: "
@@ -320,6 +335,7 @@ let parse_input_crate
let depfile =
match sess.Session.sess_targ with
Linux_x86_elf
+ | FreeBSD_x86_elf
| MacOS_x86_macho -> outfile ^ ".d"
| Win32_x86_pe -> (Filename.chop_extension outfile) ^ ".d"
in
@@ -473,6 +489,7 @@ let main_pipeline _ =
Win32_x86_pe -> Pe.emit_file
| MacOS_x86_macho -> Macho.emit_file
| Linux_x86_elf -> Elf.emit_file
+ | FreeBSD_x86_elf -> Elf.emit_file
in
Session.time_inner "emit" sess
(fun _ -> emitter sess crate code data sem_cx dwarf);
diff --git a/src/boot/fe/cexp.ml b/src/boot/fe/cexp.ml
index 56f3e878..0f216fc2 100644
--- a/src/boot/fe/cexp.ml
+++ b/src/boot/fe/cexp.ml
@@ -628,6 +628,7 @@ let parse_crate_file
let (os, arch, libc) =
match sess.Session.sess_targ with
Linux_x86_elf -> ("linux", "x86", "libc.so.6")
+ | FreeBSD_x86_elf -> ("freebsd", "x86", "libc.so.7")
| Win32_x86_pe -> ("win32", "x86", "msvcrt.dll")
| MacOS_x86_macho -> ("macos", "x86", "libc.dylib")
in
diff --git a/src/boot/me/trans.ml b/src/boot/me/trans.ml
index 4f717219..bbf49e83 100644
--- a/src/boot/me/trans.ml
+++ b/src/boot/me/trans.ml
@@ -2727,6 +2727,7 @@ let trans_visitor
[|
Il.Cell new_task;
exit_task_glue_fptr;
+ (imm (Int64.of_int abi.Abi.abi_tag));
fptr_operand;
callsz
|];
@@ -2739,6 +2740,7 @@ let trans_visitor
[|
Il.Cell new_task;
exit_task_glue_fptr;
+ (imm (Int64.of_int abi.Abi.abi_tag));
fptr_operand;
callsz
|];
@@ -6183,6 +6185,8 @@ let trans_visitor
tab_sz cx.ctxt_required_rust_sym_num;
tab_sz cx.ctxt_required_c_sym_num;
tab_sz cx.ctxt_required_lib_num;
+
+ Asm.WORD (word_ty_mach, Asm.IMM (Int64.of_int abi.Abi.abi_tag));
|]))
in
diff --git a/src/boot/me/typestate.ml b/src/boot/me/typestate.ml
index ea0204f3..8b7840a2 100644
--- a/src/boot/me/typestate.ml
+++ b/src/boot/me/typestate.ml
@@ -24,7 +24,6 @@ type typestate_tables =
ts_prestates: (node_id,Bits.t) Hashtbl.t;
ts_poststates: (node_id,Bits.t) Hashtbl.t;
ts_graph: node_graph;
- ts_siblings: sibling_map;
ts_stmts: Ast.stmt Stack.t;
ts_maxid: int ref;
}
@@ -38,7 +37,6 @@ let new_tables _ =
ts_poststates = Hashtbl.create 0;
ts_prestates = Hashtbl.create 0;
ts_graph = Hashtbl.create 0;
- ts_siblings = Hashtbl.create 0;
ts_stmts = Stack.create ();
ts_maxid = ref 0 }
;;
@@ -790,279 +788,148 @@ let show_node cx graph s i =
s (int_of_node i) (lset_fmt (Hashtbl.find graph i)))
;;
-let graph_sequence_building_visitor
- (cx:ctxt)
- (tables_stack:typestate_tables Stack.t)
- (inner:Walk.visitor)
- : Walk.visitor =
+let add_flow_edges (graph:node_graph) (n:node_id) (dsts:node_id list) : unit =
+ if Hashtbl.mem graph n
+ then
+ let existing = Hashtbl.find graph n in
+ Hashtbl.replace graph n (lset_union existing dsts)
+ else
+ Hashtbl.add graph n dsts
+;;
- let tables _ = Stack.top tables_stack in
+let rec build_flow_graph_for_stmt
+ (graph:node_graph)
+ (predecessors:node_id list)
+ (s:Ast.stmt)
+ : node_id list =
- (* Flow each stmt to its sequence-successor. *)
- let visit_stmts stmts =
- let ts = tables () in
- let graph = ts.ts_graph in
- let sibs = ts.ts_siblings in
- let len = Array.length stmts in
- for i = 0 to len - 2
- do
- let stmt = stmts.(i) in
- let next = stmts.(i+1) in
- log cx "sequential stmt edge %d -> %d"
- (int_of_node stmt.id) (int_of_node next.id);
- htab_put graph stmt.id [next.id];
- htab_put sibs stmt.id next.id;
- done;
- (* Flow last node to nowhere. *)
- if len > 0
- then htab_put graph stmts.(len-1).id []
+ let connect ps qs =
+ List.iter
+ (fun pred -> add_flow_edges graph pred qs)
+ ps
in
- let visit_stmt_pre s =
- (* Sequence the prelude nodes on special stmts. *)
- begin
- match s.node with
- Ast.STMT_while sw ->
- let (stmts, _) = sw.Ast.while_lval in
- visit_stmts stmts
- | _ -> ()
- end;
- inner.Walk.visit_stmt_pre s
+ let seq ps (ss:Ast.stmt array) =
+ build_flow_graph_for_stmts graph ps ss
in
- let visit_block_pre b =
- visit_stmts b.node;
- inner.Walk.visit_block_pre b
+ let blk ps b =
+ connect ps [b.id];
+ seq [b.id] b.node
in
- { inner with
- Walk.visit_stmt_pre = visit_stmt_pre;
- Walk.visit_block_pre = visit_block_pre }
-;;
-
-let add_flow_edges (graph:node_graph) (n:node_id) (dsts:node_id list) : unit =
- let existing = Hashtbl.find graph n in
- Hashtbl.replace graph n (lset_union existing dsts)
-;;
+ let first ss =
+ if Array.length ss = 0
+ then []
+ else [ss.(0).id]
+ in
-let remove_flow_edges
- (graph:node_graph)
- (n:node_id)
- (dsts:node_id list)
- : unit =
- let existing = Hashtbl.find graph n in
- Hashtbl.replace graph n (lset_diff existing dsts)
-;;
+ connect [s.id] [];
+ let outs =
+ match s.node with
+ | Ast.STMT_while sw ->
+ let (pre_loop_stmts, _) = sw.Ast.while_lval in
+ let body = sw.Ast.while_body in
+ let preloop_end = seq [s.id] pre_loop_stmts in
+ connect predecessors [s.id];
+ connect (blk preloop_end body) (first pre_loop_stmts);
+ preloop_end
+
+ | Ast.STMT_for sf ->
+ let body_end = blk [s.id] sf.Ast.for_body in
+ connect predecessors [s.id];
+ connect body_end (first sf.Ast.for_body.node);
+ body_end
+
+ | Ast.STMT_for_each sfe ->
+ let head_end = blk [s.id] sfe.Ast.for_each_head in
+ let body_end = blk head_end sfe.Ast.for_each_body in
+ connect predecessors [s.id];
+ connect body_end (first sfe.Ast.for_each_head.node);
+ body_end
+
+ | Ast.STMT_if sif ->
+ connect predecessors [s.id];
+ (blk [s.id] sif.Ast.if_then) @
+ (match sif.Ast.if_else with
+ None -> [s.id]
+ | Some els -> blk [s.id] els)
+
+ | Ast.STMT_alt_tag sat ->
+ connect predecessors [s.id];
+ Array.fold_left
+ (fun ends {node=(_, b); id=_} -> (blk [s.id] b) @ ends)
+ [] sat.Ast.alt_tag_arms
+
+ | Ast.STMT_block b ->
+ blk predecessors b
+
+ | Ast.STMT_fail
+ | Ast.STMT_ret _ ->
+ connect predecessors [s.id];
+ []
+
+ | _ ->
+ connect predecessors [s.id];
+ [s.id]
+ in
+ connect outs [];
+ outs
-let last_id (nodes:('a identified) array) : node_id =
- let len = Array.length nodes in
- nodes.(len-1).id
+and build_flow_graph_for_stmts
+ (graph:node_graph)
+ (predecessors:node_id list)
+ (ss:Ast.stmt array)
+ : node_id list =
+ Array.fold_left (build_flow_graph_for_stmt graph) predecessors ss
;;
-let last_id_or_block_id (block:Ast.block) : node_id =
- let len = Array.length block.node in
- if len = 0
- then block.id
- else last_id block.node
-;;
-let graph_general_block_structure_building_visitor
+let graph_building_visitor
(cx:ctxt)
(tables_stack:typestate_tables Stack.t)
(inner:Walk.visitor)
: Walk.visitor =
let tables _ = Stack.top tables_stack in
+ let graph _ = (tables()).ts_graph in
+ let blk b =
+ add_flow_edges (graph()) b.id [];
+ ignore (build_flow_graph_for_stmts (graph()) [b.id] b.node)
+ in
- let visit_stmt_pre s =
- let ts = tables () in
- let stmts = ts.ts_stmts in
- Stack.push s stmts;
- inner.Walk.visit_stmt_pre s
+ let visit_mod_item_pre n p i =
+ begin
+ match i.node.Ast.decl_item with
+ Ast.MOD_ITEM_fn fn -> blk fn.Ast.fn_body
+ | _ -> ()
+ end;
+ inner.Walk.visit_mod_item_pre n p i
in
- let visit_stmt_post s =
- let ts = tables () in
- let stmts = ts.ts_stmts in
- inner.Walk.visit_stmt_post s;
- ignore (Stack.pop stmts)
+ let visit_obj_fn_pre obj ident fn =
+ blk fn.node.Ast.fn_body;
+ inner.Walk.visit_obj_fn_pre obj ident fn
in
- let show_node =
- fun n id -> show_node cx (tables()).ts_graph n id
+ let visit_obj_drop_pre obj b =
+ blk b;
+ inner.Walk.visit_obj_drop_pre obj b
in
let visit_block_pre b =
- begin
- let ts = tables () in
- let graph = ts.ts_graph in
- let sibs = ts.ts_siblings in
- let stmts = ts.ts_stmts in
- let len = Array.length b.node in
- let _ = htab_put graph b.id
- (if len > 0 then [b.node.(0).id] else [])
- in
-
- (*
- * If block has len,
- * then flow block to block.node.(0) and block.node.(len-1) to dsts
- * else flow block to dsts
- *
- * so AST:
- *
- * block#n{ stmt#0 ... stmt#k };
- * stmt#j;
- *
- * turns into graph:
- *
- * block#n -> stmt#0 -> ... -> stmt#k -> stmt#j
- *
- *)
- if Stack.is_empty stmts
- then ()
- else
- let s = Stack.top stmts in
- add_flow_edges graph s.id [b.id];
- match htab_search sibs s.id with
- None -> ()
- | Some sib_id ->
- if len > 0
- then
- add_flow_edges graph (last_id b.node) [sib_id]
- else
- add_flow_edges graph b.id [sib_id]
- end;
- show_node "block" b.id;
+ if Hashtbl.mem cx.ctxt_block_is_loop_body b.id
+ then blk b;
inner.Walk.visit_block_pre b
in
{ inner with
- Walk.visit_stmt_pre = visit_stmt_pre;
- Walk.visit_stmt_post = visit_stmt_post;
+ Walk.visit_mod_item_pre = visit_mod_item_pre;
+ Walk.visit_obj_fn_pre = visit_obj_fn_pre;
+ Walk.visit_obj_drop_pre = visit_obj_drop_pre;
Walk.visit_block_pre = visit_block_pre }
-;;
-
-
-let graph_special_block_structure_building_visitor
- (cx:ctxt)
- (tables_stack:typestate_tables Stack.t)
- (inner:Walk.visitor)
- : Walk.visitor =
- let tables _ = Stack.top tables_stack in
-
- let visit_stmt_pre s =
- begin
- match s.node with
-
- Ast.STMT_if sif ->
- let ts = tables () in
- let graph = ts.ts_graph in
- let cond_id = s.id in
- let succ = Hashtbl.find graph cond_id in
- let then_id = sif.Ast.if_then.id in
- let then_end_id = last_id_or_block_id sif.Ast.if_then in
- let show_node = show_node cx graph in
- let succ = List.filter (fun x -> not (x = then_id)) succ in
- show_node "initial cond" cond_id;
- show_node "initial then" then_id;
- show_node "initial then_end" then_end_id;
- begin
- match sif.Ast.if_else with
- None ->
- Hashtbl.replace graph cond_id (then_id :: succ);
- (* Kill residual messed-up block wiring.*)
- remove_flow_edges graph then_end_id [then_id];
- show_node "cond" cond_id;
- show_node "then" then_id;
- show_node "then_end" then_end_id;
-
- | Some e ->
- let else_id = e.id in
- let succ =
- List.filter (fun x -> not (x = else_id)) succ
- in
- let else_end_id = last_id_or_block_id e in
- show_node "initial else" else_id;
- show_node "initial else_end" else_end_id;
- Hashtbl.replace graph cond_id [then_id; else_id];
- Hashtbl.replace graph then_end_id succ;
- Hashtbl.replace graph else_end_id succ;
-
- (* Kill residual messed-up block wiring.*)
- remove_flow_edges graph then_end_id [then_id];
- remove_flow_edges graph else_id [then_id];
- remove_flow_edges graph else_end_id [then_id];
- show_node "cond" cond_id;
- show_node "then" then_id;
- show_node "then_end" then_end_id;
- show_node "else" else_id;
- show_node "else_end" else_end_id;
- end;
-
- | Ast.STMT_while sw ->
- (* There are a bunch of rewirings to do on 'while' nodes. *)
-
- begin
- let ts = tables () in
- let graph = ts.ts_graph in
- let dsts = Hashtbl.find graph s.id in
- let body = sw.Ast.while_body in
- let succ_stmts =
- List.filter (fun x -> not (x = body.id)) dsts
- in
-
- let (pre_loop_stmts, _) = sw.Ast.while_lval in
- let loop_head_id =
- (* Splice loop prelude into flow graph, save loop-head
- * node.
- *)
- let slen = Array.length pre_loop_stmts in
- if slen > 0
- then
- begin
- let pre_loop_begin = pre_loop_stmts.(0).id in
- let pre_loop_end = last_id pre_loop_stmts in
- remove_flow_edges graph s.id [body.id];
- add_flow_edges graph s.id [pre_loop_begin];
- add_flow_edges graph pre_loop_end [body.id];
- pre_loop_end
- end
- else
- body.id
- in
-
- (* Always flow s into the loop prelude; prelude may end
- * loop.
- *)
- remove_flow_edges graph s.id succ_stmts;
- add_flow_edges graph loop_head_id succ_stmts;
-
- (* Flow loop-end to loop-head. *)
- let loop_end = last_id_or_block_id body in
- add_flow_edges graph loop_end [loop_head_id]
- end
-
- | Ast.STMT_alt_tag at ->
- let ts = tables () in
- let graph = ts.ts_graph in
- let dsts = Hashtbl.find graph s.id in
- let arm_blocks =
- let arm_block_id { node = (_, block); id = _ } = block.id in
- Array.to_list (Array.map arm_block_id at.Ast.alt_tag_arms)
- in
- let succ_stmts =
- List.filter (fun x -> not (List.mem x arm_blocks)) dsts
- in
- remove_flow_edges graph s.id succ_stmts
-
- | _ -> ()
- end;
- inner.Walk.visit_stmt_post s
- in
- { inner with
- Walk.visit_stmt_pre = visit_stmt_pre }
;;
let find_roots
@@ -1631,13 +1498,7 @@ let process_crate
(condition_assigning_visitor cx tables_stack scopes
Walk.empty_visitor)));
(table_managed
- (graph_sequence_building_visitor cx tables_stack
- Walk.empty_visitor));
- (table_managed
- (graph_general_block_structure_building_visitor cx tables_stack
- Walk.empty_visitor));
- (table_managed
- (graph_special_block_structure_building_visitor cx tables_stack
+ (graph_building_visitor cx tables_stack
Walk.empty_visitor));
|]
in
diff --git a/src/boot/util/common.ml b/src/boot/util/common.ml
index f9b18246..c76da0de 100644
--- a/src/boot/util/common.ml
+++ b/src/boot/util/common.ml
@@ -56,6 +56,7 @@ type target =
Linux_x86_elf
| Win32_x86_pe
| MacOS_x86_macho
+ | FreeBSD_x86_elf
;;
type ty_mach =
diff --git a/src/comp/back/abi.rs b/src/comp/back/abi.rs
index db17b942..dd058590 100644
--- a/src/comp/back/abi.rs
+++ b/src/comp/back/abi.rs
@@ -44,7 +44,8 @@ const int obj_field_vtbl = 0;
const int obj_field_box = 1;
const int obj_body_elt_tydesc = 0;
-const int obj_body_elt_fields = 1;
+const int obj_body_elt_typarams = 1;
+const int obj_body_elt_fields = 2;
const int fn_field_code = 0;
const int fn_field_box = 1;
@@ -59,6 +60,9 @@ const int worst_case_glue_call_args = 7;
const int n_upcall_glues = 7;
+const int abi_x86_rustboot_cdecl = 1;
+const int abi_x86_rustc_fastcall = 2;
+
fn memcpy_glue_name() -> str {
ret "rust_memcpy_glue";
}
@@ -67,6 +71,10 @@ fn bzero_glue_name() -> str {
ret "rust_bzero_glue";
}
+fn vec_append_glue_name() -> str {
+ ret "rust_vec_append_glue";
+}
+
fn upcall_glue_name(int n) -> str {
ret "rust_upcall_" + util.common.istr(n);
}
diff --git a/src/comp/back/x86.rs b/src/comp/back/x86.rs
index ac52fca5..10227df7 100644
--- a/src/comp/back/x86.rs
+++ b/src/comp/back/x86.rs
@@ -41,20 +41,117 @@ fn store_esp_to_runtime_sp() -> vec[str] {
ret vec("movl %esp, " + wstr(abi.task_field_runtime_sp) + "(%ecx)");
}
+/*
+ * This is a bit of glue-code. It should be emitted once per
+ * compilation unit.
+ *
+ * - save regs on C stack
+ * - align sp on a 16-byte boundary
+ * - save sp to task.runtime_sp (runtime_sp is thus always aligned)
+ * - load saved task sp (switch stack)
+ * - restore saved task regs
+ * - return to saved task pc
+ *
+ * Our incoming stack looks like this:
+ *
+ * *esp+4 = [arg1 ] = task ptr
+ * *esp = [retpc ]
+ */
+
fn rust_activate_glue() -> vec[str] {
ret vec("movl 4(%esp), %ecx # ecx = rust_task")
+ save_callee_saves()
+ store_esp_to_runtime_sp()
+ load_esp_from_rust_sp()
- // This 'add' instruction is a bit surprising.
- // See lengthy comment in boot/be/x86.ml activate_glue.
+ /*
+ * There are two paths we can arrive at this code from:
+ *
+ *
+ * 1. We are activating a task for the first time. When we switch
+ * into the task stack and 'ret' to its first instruction, we'll
+ * start doing whatever the first instruction says. Probably
+ * saving registers and starting to establish a frame. Harmless
+ * stuff, doesn't look at task->rust_sp again except when it
+ * clobbers it during a later upcall.
+ *
+ *
+ * 2. We are resuming a task that was descheduled by the yield glue
+ * below. When we switch into the task stack and 'ret', we'll be
+ * ret'ing to a very particular instruction:
+ *
+ * "esp <- task->rust_sp"
+ *
+ * this is the first instruction we 'ret' to after this glue,
+ * because it is the first instruction following *any* upcall,
+ * and the task we are activating was descheduled mid-upcall.
+ *
+ * Unfortunately for us, we have already restored esp from
+ * task->rust_sp and are about to eat the 5 words off the top of
+ * it.
+ *
+ *
+ * | ... | <-- where esp will be once we restore + ret, below,
+ * | retpc | and where we'd *like* task->rust_sp to wind up.
+ * | ebp |
+ * | edi |
+ * | esi |
+ * | ebx | <-- current task->rust_sp == current esp
+ *
+ *
+ * This is a problem. If we return to "esp <- task->rust_sp" it
+ * will push esp back down by 5 words. This manifests as a rust
+ * stack that grows by 5 words on each yield/reactivate. Not
+ * good.
+ *
+ * So what we do here is just adjust task->rust_sp up 5 words as
+ * well, to mirror the movement in esp we're about to
+ * perform. That way the "esp <- task->rust_sp" we 'ret' to below
+ * will be a no-op. Esp won't move, and the task's stack won't
+ * grow.
+ */
+ vec("addl $20, " + wstr(abi.task_field_rust_sp) + "(%ecx)")
+
+ /*
+ * In most cases, the function we're returning to (activating)
+ * will have saved any caller-saves before it yielded via upcalling,
+ * so no work to do here. With one exception: when we're initially
+ * activating, the task needs to be in the fastcall 2nd parameter
+ * expected by the rust main function. That's edx.
+ */
+ + vec("mov %ecx, %edx")
+
+ restore_callee_saves()
+ vec("ret");
}
+/* More glue code, this time the 'bottom half' of yielding.
+ *
+ * We arrived here because an upcall decided to deschedule the
+ * running task. So the upcall's return address got patched to the
+ * first instruction of this glue code.
+ *
+ * When the upcall does 'ret' it will come here, and its esp will be
+ * pointing to the last argument pushed on the C stack before making
+ * the upcall: the 0th argument to the upcall, which is always the
+ * task ptr performing the upcall. That's where we take over.
+ *
+ * Our goal is to complete the descheduling
+ *
+ * - Switch over to the task stack temporarily.
+ *
+ * - Save the task's callee-saves onto the task stack.
+ * (the task is now 'descheduled', safe to set aside)
+ *
+ * - Switch *back* to the C stack.
+ *
+ * - Restore the C-stack callee-saves.
+ *
+ * - Return to the caller on the C stack that activated the task.
+ *
+ */
+
fn rust_yield_glue() -> vec[str] {
ret vec("movl 0(%esp), %ecx # ecx = rust_task")
+ load_esp_from_rust_sp()
diff --git a/src/comp/driver/rustc.rs b/src/comp/driver/rustc.rs
index 004c9d4c..7ad0cdc7 100644
--- a/src/comp/driver/rustc.rs
+++ b/src/comp/driver/rustc.rs
@@ -2,6 +2,7 @@
import front.parser;
import front.token;
+import front.eval;
import middle.trans;
import middle.resolve;
import middle.typeck;
@@ -13,6 +14,30 @@ import std.option.none;
import std._str;
import std._vec;
+fn default_environment(session.session sess,
+ str argv0,
+ str input) -> eval.env {
+
+ auto libc = "libc.so";
+ alt (sess.get_targ_cfg().os) {
+ case (session.os_win32) { libc = "msvcrt.dll"; }
+ case (session.os_macos) { libc = "libc.dylib"; }
+ case (session.os_linux) { libc = "libc.so.6"; }
+ }
+
+ ret
+ vec(
+ // Target bindings.
+ tup("target_os", eval.val_str(std.os.target_os())),
+ tup("target_arch", eval.val_str("x86")),
+ tup("target_libc", eval.val_str(libc)),
+
+ // Build bindings.
+ tup("build_compiler", eval.val_str(argv0)),
+ tup("build_input", eval.val_str(input))
+ );
+}
+
impure fn parse_input(session.session sess,
parser.parser p,
str input) -> @front.ast.crate {
@@ -25,20 +50,30 @@ impure fn parse_input(session.session sess,
fail;
}
-impure fn compile_input(session.session sess, str input, str output,
+impure fn compile_input(session.session sess,
+ eval.env env,
+ str input, str output,
bool shared) {
- auto p = parser.new_parser(sess, 0, input);
+ auto p = parser.new_parser(sess, env, 0, input);
auto crate = parse_input(sess, p, input);
crate = resolve.resolve_crate(sess, crate);
crate = typeck.check_crate(sess, crate);
trans.trans_crate(sess, crate, output, shared);
}
+impure fn pretty_print_input(session.session sess,
+ eval.env env,
+ str input) {
+ auto p = front.parser.new_parser(sess, env, 0, input);
+ auto crate = front.parser.parse_crate_from_source_file(p);
+ pretty.pprust.print_ast(crate.node.module);
+}
+
fn warn_wrong_compiler() {
log "This is the rust 'self-hosted' compiler.";
log "The one written in rust.";
log "It is currently incomplete.";
- log "You may want rustboot insteaad, the compiler next door.";
+ log "You may want rustboot instead, the compiler next door.";
}
fn usage(session.session sess, str argv0) {
@@ -48,6 +83,7 @@ fn usage(session.session sess, str argv0) {
log " -o <filename> write output to <filename>";
log " -nowarn suppress wrong-compiler warning";
log " -shared compile a shared-library crate";
+ log " -pp pretty-print the input instead of compiling";
log " -h display this message";
log "";
log "";
@@ -74,6 +110,7 @@ impure fn main(vec[str] args) {
let option.t[str] output_file = none[str];
let bool do_warn = true;
let bool shared = false;
+ let bool pretty = false;
auto i = 1u;
auto len = _vec.len[str](args);
@@ -86,24 +123,21 @@ impure fn main(vec[str] args) {
do_warn = false;
} else if (_str.eq(arg, "-shared")) {
shared = true;
- } else {
- // FIXME: rust could use an elif construct.
- if (_str.eq(arg, "-o")) {
- if (i+1u < len) {
- output_file = some(args.(i+1u));
- i += 1u;
- } else {
- usage(sess, args.(0));
- sess.err("-o requires an argument");
- }
+ } else if (_str.eq(arg, "-pp")) {
+ pretty = true;
+ } else if (_str.eq(arg, "-o")) {
+ if (i+1u < len) {
+ output_file = some(args.(i+1u));
+ i += 1u;
} else {
- if (_str.eq(arg, "-h")) {
- usage(sess, args.(0));
- } else {
- usage(sess, args.(0));
- sess.err("unrecognized option: " + arg);
- }
+ usage(sess, args.(0));
+ sess.err("-o requires an argument");
}
+ } else if (_str.eq(arg, "-h")) {
+ usage(sess, args.(0));
+ } else {
+ usage(sess, args.(0));
+ sess.err("unrecognized option: " + arg);
}
} else {
alt (input_file) {
@@ -115,8 +149,6 @@ impure fn main(vec[str] args) {
input_file = some[str](arg);
}
}
- // FIXME: dummy node to work around typestate mis-wiring bug.
- i = i;
}
i += 1u;
}
@@ -131,23 +163,29 @@ impure fn main(vec[str] args) {
sess.err("no input filename");
}
case (some[str](?ifile)) {
- alt (output_file) {
- case (none[str]) {
- let vec[str] parts = _str.split(ifile, '.' as u8);
- parts = _vec.pop[str](parts);
- parts += ".bc";
- auto ofile = _str.concat(parts);
- compile_input(sess, ifile, ofile, shared);
- }
- case (some[str](?ofile)) {
- compile_input(sess, ifile, ofile, shared);
+
+ auto env = default_environment(sess, args.(0), ifile);
+ if (pretty) {
+ pretty_print_input(sess, env, ifile);
+ }
+ else {
+ alt (output_file) {
+ case (none[str]) {
+ let vec[str] parts = _str.split(ifile, '.' as u8);
+ parts = _vec.pop[str](parts);
+ parts += ".bc";
+ auto ofile = _str.concat(parts);
+ compile_input(sess, env, ifile, ofile, shared);
+ }
+ case (some[str](?ofile)) {
+ compile_input(sess, env, ifile, ofile, shared);
+ }
}
}
}
}
}
-
// Local Variables:
// mode: rust
// fill-column: 78;
diff --git a/src/comp/front/ast.rs b/src/comp/front/ast.rs
index fb068dba..f9d609d1 100644
--- a/src/comp/front/ast.rs
+++ b/src/comp/front/ast.rs
@@ -5,6 +5,7 @@ import std._vec;
import util.common.span;
import util.common.spanned;
import util.common.ty_mach;
+import util.common.filename;
type ident = str;
@@ -36,11 +37,29 @@ tag def {
def_ty_arg(def_id);
def_binding(def_id);
def_use(def_id);
+ def_native_ty(def_id);
+ def_native_fn(def_id);
}
type crate = spanned[crate_];
type crate_ = rec(_mod module);
+tag crate_directive_ {
+ cdir_expr(@expr);
+ // FIXME: cdir_let should be eliminated
+ // and redirected to the use of const stmt_decls inside
+ // crate directive blocks.
+ cdir_let(ident, @expr, vec[@crate_directive]);
+ cdir_src_mod(ident, option.t[filename]);
+ cdir_dir_mod(ident, option.t[filename], vec[@crate_directive]);
+ cdir_view_item(@view_item);
+ cdir_meta(vec[@meta_item]);
+ cdir_syntax(path);
+ cdir_auth(path, effect);
+}
+type crate_directive = spanned[crate_directive_];
+
+
type meta_item = spanned[meta_item_];
type meta_item_ = rec(ident name, str value);
@@ -55,6 +74,7 @@ type pat = spanned[pat_];
tag pat_ {
pat_wild(ann);
pat_bind(ident, def_id, ann);
+ pat_lit(@lit, ann);
pat_tag(path, vec[@pat], option.t[variant_def], ann);
}
@@ -63,6 +83,11 @@ tag mutability {
imm;
}
+tag opacity {
+ op_abstract;
+ op_transparent;
+}
+
tag layer {
layer_value;
layer_state;
@@ -75,6 +100,11 @@ tag effect {
eff_unsafe;
}
+tag proto {
+ proto_iter;
+ proto_fn;
+}
+
tag binop {
add;
sub;
@@ -97,12 +127,49 @@ tag binop {
gt;
}
+fn binop_to_str(binop op) -> str {
+ alt (op) {
+ case (add) {ret "+";}
+ case (sub) {ret "-";}
+ case (mul) {ret "*";}
+ case (div) {ret "/";}
+ case (rem) {ret "%";}
+ case (and) {ret "&&";}
+ case (or) {ret "||";}
+ case (bitxor) {ret "^";}
+ case (bitand) {ret "&";}
+ case (bitor) {ret "|";}
+ case (lsl) {ret "<<";}
+ case (lsr) {ret ">>";}
+ case (asr) {ret ">>>";}
+ case (eq) {ret "==";}
+ case (lt) {ret "<";}
+ case (le) {ret "<=";}
+ case (ne) {ret "!=";}
+ case (ge) {ret ">=";}
+ case (gt) {ret ">";}
+ }
+}
+
+
tag unop {
box;
deref;
bitnot;
not;
neg;
+ _mutable;
+}
+
+fn unop_to_str(unop op) -> str {
+ alt (op) {
+ case (box) {ret "@";}
+ case (deref) {ret "*";}
+ case (bitnot) {ret "~";}
+ case (not) {ret "!";}
+ case (neg) {ret "-";}
+ case (_mutable) {ret "mutable";}
+ }
}
tag mode {
@@ -113,11 +180,9 @@ tag mode {
type stmt = spanned[stmt_];
tag stmt_ {
stmt_decl(@decl);
- stmt_ret(option.t[@expr]);
- stmt_log(@expr);
- stmt_check_expr(@expr);
- stmt_fail;
stmt_expr(@expr);
+ // These only exist in crate-level blocks.
+ stmt_crate_directive(@crate_directive);
}
type local = rec(option.t[@ty] ty,
@@ -142,7 +207,7 @@ type expr = spanned[expr_];
tag expr_ {
expr_vec(vec[@expr], ann);
expr_tup(vec[elt], ann);
- expr_rec(vec[field], ann);
+ expr_rec(vec[field], option.t[@expr], ann);
expr_call(@expr, vec[@expr], ann);
expr_bind(@expr, vec[option.t[@expr]], ann);
expr_binary(binop, @expr, @expr, ann);
@@ -152,6 +217,7 @@ tag expr_ {
expr_if(@expr, block, option.t[@expr], ann);
expr_while(@expr, block, ann);
expr_for(@decl, @expr, block, ann);
+ expr_for_each(@decl, @expr, block, ann);
expr_do_while(block, @expr, ann);
expr_alt(@expr, vec[arm], ann);
expr_block(block, ann);
@@ -160,6 +226,13 @@ tag expr_ {
expr_field(@expr, ident, ann);
expr_index(@expr, @expr, ann);
expr_path(path, option.t[def], ann);
+ expr_ext(path, vec[@expr], option.t[@expr], @expr, ann);
+ expr_fail;
+ expr_ret(option.t[@expr]);
+ expr_put(option.t[@expr]);
+ expr_be(@expr);
+ expr_log(@expr);
+ expr_check_expr(@expr);
}
type lit = spanned[lit_];
@@ -179,7 +252,8 @@ tag lit_ {
type ty_field = rec(ident ident, @ty ty);
type ty_arg = rec(mode mode, @ty ty);
// TODO: effect
-type ty_method = rec(ident ident, vec[ty_arg] inputs, @ty output);
+type ty_method = rec(proto proto, ident ident,
+ vec[ty_arg] inputs, @ty output);
type ty = spanned[ty_];
tag ty_ {
ty_nil;
@@ -193,17 +267,28 @@ tag ty_ {
ty_vec(@ty);
ty_tup(vec[@ty]);
ty_rec(vec[ty_field]);
- ty_fn(vec[ty_arg], @ty); // TODO: effect
+ ty_fn(proto, vec[ty_arg], @ty); // TODO: effect
ty_obj(vec[ty_method]);
ty_path(path, option.t[def]);
ty_mutable(@ty);
+ ty_type;
+ ty_constr(@ty, vec[@constr]);
+}
+
+tag constr_arg_ {
+ carg_base;
+ carg_ident(ident);
}
+type constr_arg = spanned[constr_arg_];
+type constr_ = rec(path path, vec[@constr_arg] args);
+type constr = spanned[constr_];
type arg = rec(mode mode, @ty ty, ident ident, def_id id);
-type _fn = rec(effect effect,
- bool is_iter,
- vec[arg] inputs,
- @ty output,
+type fn_decl = rec(effect effect,
+ vec[arg] inputs,
+ @ty output);
+type _fn = rec(fn_decl decl,
+ proto proto,
block body);
@@ -212,8 +297,8 @@ type method = spanned[method_];
type obj_field = rec(@ty ty, ident ident, def_id id, ann ann);
type _obj = rec(vec[obj_field] fields,
- vec[@method] methods);
-
+ vec[@method] methods,
+ option.t[block] dtor);
tag mod_index_entry {
mie_view_item(@view_item);
@@ -221,11 +306,28 @@ tag mod_index_entry {
mie_tag_variant(@item /* tag item */, uint /* variant index */);
}
+tag native_mod_index_entry {
+ nmie_view_item(@view_item);
+ nmie_item(@native_item);
+}
+
type mod_index = hashmap[ident,mod_index_entry];
type _mod = rec(vec[@view_item] view_items,
vec[@item] items,
mod_index index);
+tag native_abi {
+ native_abi_rust;
+ native_abi_cdecl;
+}
+
+type native_mod = rec(str native_name,
+ native_abi abi,
+ vec[@view_item] view_items,
+ vec[@native_item] items,
+ native_mod_index index);
+type native_mod_index = hashmap[ident,native_mod_index_entry];
+
type variant_arg = rec(@ty ty, def_id id);
type variant = rec(str name, vec[variant_arg] args, def_id id, ann ann);
@@ -233,6 +335,7 @@ type view_item = spanned[view_item_];
tag view_item_ {
view_item_use(ident, vec[@meta_item], def_id);
view_item_import(ident, vec[ident], def_id, option.t[def]);
+ view_item_export(ident);
}
type item = spanned[item_];
@@ -240,11 +343,18 @@ tag item_ {
item_const(ident, @ty, @expr, def_id, ann);
item_fn(ident, _fn, vec[ty_param], def_id, ann);
item_mod(ident, _mod, def_id);
+ item_native_mod(ident, native_mod, def_id);
item_ty(ident, @ty, vec[ty_param], def_id, ann);
item_tag(ident, vec[variant], vec[ty_param], def_id);
item_obj(ident, _obj, vec[ty_param], def_id, ann);
}
+type native_item = spanned[native_item_];
+tag native_item_ {
+ native_item_ty(ident, def_id);
+ native_item_fn(ident, fn_decl, vec[ty_param], def_id, ann);
+}
+
fn index_view_item(mod_index index, @view_item it) {
alt (it.node) {
case(ast.view_item_use(?id, _, _)) {
@@ -253,6 +363,11 @@ fn index_view_item(mod_index index, @view_item it) {
case(ast.view_item_import(?def_ident,_,_,_)) {
index.insert(def_ident, ast.mie_view_item(it));
}
+ case(ast.view_item_export(_)) {
+ // NB: don't index these, they might collide with
+ // the import or use that they're exporting. Have
+ // to do linear search for exports.
+ }
}
}
@@ -267,6 +382,9 @@ fn index_item(mod_index index, @item it) {
case (ast.item_mod(?id, _, _)) {
index.insert(id, ast.mie_item(it));
}
+ case (ast.item_native_mod(?id, _, _)) {
+ index.insert(id, ast.mie_item(it));
+ }
case (ast.item_ty(?id, _, _, _, _)) {
index.insert(id, ast.mie_item(it));
}
@@ -285,6 +403,41 @@ fn index_item(mod_index index, @item it) {
}
}
+fn index_native_item(native_mod_index index, @native_item it) {
+ alt (it.node) {
+ case (ast.native_item_ty(?id, _)) {
+ index.insert(id, ast.nmie_item(it));
+ }
+ case (ast.native_item_fn(?id, _, _, _, _)) {
+ index.insert(id, ast.nmie_item(it));
+ }
+ }
+}
+
+fn index_native_view_item(native_mod_index index, @view_item it) {
+ alt (it.node) {
+ case(ast.view_item_import(?def_ident,_,_,_)) {
+ index.insert(def_ident, ast.nmie_view_item(it));
+ }
+ case(ast.view_item_export(_)) {
+ // NB: don't index these, they might collide with
+ // the import or use that they're exporting. Have
+ // to do linear search for exports.
+ }
+ }
+}
+
+fn is_call_expr(@expr e) -> bool {
+ alt (e.node) {
+ case (expr_call(_, _, _)) {
+ ret true;
+ }
+ case (_) {
+ ret false;
+ }
+ }
+}
+
//
// Local Variables:
// mode: rust
diff --git a/src/comp/front/eval.rs b/src/comp/front/eval.rs
new file mode 100644
index 00000000..881797c9
--- /dev/null
+++ b/src/comp/front/eval.rs
@@ -0,0 +1,436 @@
+import std._vec;
+import std._str;
+import std.option;
+import std.option.some;
+import std.option.none;
+import std.map.hashmap;
+
+import driver.session;
+import ast.ident;
+import front.parser.parser;
+import front.parser.spanned;
+import front.parser.new_parser;
+import front.parser.parse_mod_items;
+import util.common;
+import util.common.filename;
+import util.common.append;
+import util.common.span;
+import util.common.new_str_hash;
+
+
+// Simple dynamic-typed value type for eval_expr.
+tag val {
+ val_bool(bool);
+ val_int(int);
+ val_str(str);
+}
+
+type env = vec[tup(ident, val)];
+
+fn mk_env() -> env {
+ let env e = vec();
+ ret e;
+}
+
+fn val_is_bool(val v) -> bool {
+ alt (v) {
+ case (val_bool(_)) { ret true; }
+ case (_) { }
+ }
+ ret false;
+}
+
+fn val_is_int(val v) -> bool {
+ alt (v) {
+ case (val_bool(_)) { ret true; }
+ case (_) { }
+ }
+ ret false;
+}
+
+fn val_is_str(val v) -> bool {
+ alt (v) {
+ case (val_str(_)) { ret true; }
+ case (_) { }
+ }
+ ret false;
+}
+
+fn val_as_bool(val v) -> bool {
+ alt (v) {
+ case (val_bool(?b)) { ret b; }
+ case (_) { }
+ }
+ fail;
+}
+
+fn val_as_int(val v) -> int {
+ alt (v) {
+ case (val_int(?i)) { ret i; }
+ case (_) { }
+ }
+ fail;
+}
+
+fn val_as_str(val v) -> str {
+ alt (v) {
+ case (val_str(?s)) { ret s; }
+ case (_) { }
+ }
+ fail;
+}
+
+fn lookup(session.session sess, env e, span sp, ident i) -> val {
+ for (tup(ident, val) pair in e) {
+ if (_str.eq(i, pair._0)) {
+ ret pair._1;
+ }
+ }
+ sess.span_err(sp, "unknown variable: " + i);
+ fail;
+}
+
+fn eval_lit(session.session sess, env e, span sp, @ast.lit lit) -> val {
+ alt (lit.node) {
+ case (ast.lit_bool(?b)) { ret val_bool(b); }
+ case (ast.lit_int(?i)) { ret val_int(i); }
+ case (ast.lit_str(?s)) { ret val_str(s); }
+ case (_) {
+ sess.span_err(sp, "evaluating unsupported literal");
+ }
+ }
+ fail;
+}
+
+fn eval_expr(session.session sess, env e, @ast.expr x) -> val {
+ alt (x.node) {
+ case (ast.expr_path(?pth, _, _)) {
+ if (_vec.len[ident](pth.node.idents) == 1u &&
+ _vec.len[@ast.ty](pth.node.types) == 0u) {
+ ret lookup(sess, e, x.span, pth.node.idents.(0));
+ }
+ sess.span_err(x.span, "evaluating structured path-name");
+ }
+
+ case (ast.expr_lit(?lit, _)) {
+ ret eval_lit(sess, e, x.span, lit);
+ }
+
+ case (ast.expr_unary(?op, ?a, _)) {
+ auto av = eval_expr(sess, e, a);
+ alt (op) {
+ case (ast.not) {
+ if (val_is_bool(av)) {
+ ret val_bool(!val_as_bool(av));
+ }
+ sess.span_err(x.span, "bad types in '!' expression");
+ }
+ case (_) {
+ sess.span_err(x.span, "evaluating unsupported unop");
+ }
+ }
+ }
+
+ case (ast.expr_binary(?op, ?a, ?b, _)) {
+ auto av = eval_expr(sess, e, a);
+ auto bv = eval_expr(sess, e, b);
+ alt (op) {
+ case (ast.add) {
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_int(val_as_int(av) + val_as_int(bv));
+ }
+ if (val_is_str(av) && val_is_str(bv)) {
+ ret val_str(val_as_str(av) + val_as_str(bv));
+ }
+ sess.span_err(x.span, "bad types in '+' expression");
+ }
+
+ case (ast.sub) {
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_int(val_as_int(av) - val_as_int(bv));
+ }
+ sess.span_err(x.span, "bad types in '-' expression");
+ }
+
+ case (ast.mul) {
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_int(val_as_int(av) * val_as_int(bv));
+ }
+ sess.span_err(x.span, "bad types in '*' expression");
+ }
+
+ case (ast.div) {
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_int(val_as_int(av) / val_as_int(bv));
+ }
+ sess.span_err(x.span, "bad types in '/' expression");
+ }
+
+ case (ast.rem) {
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_int(val_as_int(av) % val_as_int(bv));
+ }
+ sess.span_err(x.span, "bad types in '%' expression");
+ }
+
+ case (ast.and) {
+ if (val_is_bool(av) && val_is_bool(bv)) {
+ ret val_bool(val_as_bool(av) && val_as_bool(bv));
+ }
+ sess.span_err(x.span, "bad types in '&&' expression");
+ }
+
+ case (ast.or) {
+ if (val_is_bool(av) && val_is_bool(bv)) {
+ ret val_bool(val_as_bool(av) || val_as_bool(bv));
+ }
+ sess.span_err(x.span, "bad types in '||' expression");
+ }
+
+ case (ast.eq) {
+ ret val_bool(val_eq(sess, x.span, av, bv));
+ }
+
+ case (ast.ne) {
+ ret val_bool(! val_eq(sess, x.span, av, bv));
+ }
+
+ case (_) {
+ sess.span_err(x.span, "evaluating unsupported binop");
+ }
+ }
+ }
+ case (_) {
+ sess.span_err(x.span, "evaluating unsupported expression");
+ }
+ }
+ fail;
+}
+
+fn val_eq(session.session sess, span sp, val av, val bv) -> bool {
+ if (val_is_bool(av) && val_is_bool(bv)) {
+ ret val_as_bool(av) == val_as_bool(bv);
+ }
+ if (val_is_int(av) && val_is_int(bv)) {
+ ret val_as_int(av) == val_as_int(bv);
+ }
+ if (val_is_str(av) && val_is_str(bv)) {
+ ret _str.eq(val_as_str(av),
+ val_as_str(bv));
+ }
+ sess.span_err(sp, "bad types in comparison");
+ fail;
+}
+
+impure fn eval_crate_directives(parser p,
+ env e,
+ vec[@ast.crate_directive] cdirs,
+ str prefix,
+ &mutable vec[@ast.view_item] view_items,
+ &mutable vec[@ast.item] items,
+ hashmap[ast.ident,
+ ast.mod_index_entry] index) {
+
+ for (@ast.crate_directive sub_cdir in cdirs) {
+ eval_crate_directive(p, e, sub_cdir, prefix,
+ view_items, items, index);
+ }
+}
+
+
+impure fn eval_crate_directives_to_mod(parser p,
+ env e,
+ vec[@ast.crate_directive] cdirs,
+ str prefix) -> ast._mod {
+ let vec[@ast.view_item] view_items = vec();
+ let vec[@ast.item] items = vec();
+ auto index = new_str_hash[ast.mod_index_entry]();
+
+ eval_crate_directives(p, e, cdirs, prefix,
+ view_items, items, index);
+
+ ret rec(view_items=view_items, items=items, index=index);
+}
+
+
+impure fn eval_crate_directive_block(parser p,
+ env e,
+ &ast.block blk,
+ str prefix,
+ &mutable vec[@ast.view_item] view_items,
+ &mutable vec[@ast.item] items,
+ hashmap[ast.ident,
+ ast.mod_index_entry] index) {
+
+ for (@ast.stmt s in blk.node.stmts) {
+ alt (s.node) {
+ case (ast.stmt_crate_directive(?cdir)) {
+ eval_crate_directive(p, e, cdir, prefix,
+ view_items, items, index);
+ }
+ case (_) {
+ auto sess = p.get_session();
+ sess.span_err(s.span,
+ "unsupported stmt in crate-directive block");
+ }
+ }
+ }
+}
+
+impure fn eval_crate_directive_expr(parser p,
+ env e,
+ @ast.expr x,
+ str prefix,
+ &mutable vec[@ast.view_item] view_items,
+ &mutable vec[@ast.item] items,
+ hashmap[ast.ident,
+ ast.mod_index_entry] index) {
+ auto sess = p.get_session();
+
+ alt (x.node) {
+
+ case (ast.expr_if(?cond, ?thn, ?elopt, _)) {
+ auto cv = eval_expr(sess, e, cond);
+ if (!val_is_bool(cv)) {
+ sess.span_err(x.span, "bad cond type in 'if'");
+ }
+
+ if (val_as_bool(cv)) {
+ ret eval_crate_directive_block(p, e, thn, prefix,
+ view_items, items,
+ index);
+ }
+
+ alt (elopt) {
+ case (some[@ast.expr](?els)) {
+ ret eval_crate_directive_expr(p, e, els, prefix,
+ view_items, items,
+ index);
+ }
+ case (_) {
+ // Absent-else is ok.
+ }
+ }
+ }
+
+ case (ast.expr_alt(?v, ?arms, _)) {
+ auto vv = eval_expr(sess, e, v);
+ for (ast.arm arm in arms) {
+ alt (arm.pat.node) {
+ case (ast.pat_lit(?lit, _)) {
+ auto pv = eval_lit(sess, e,
+ arm.pat.span, lit);
+ if (val_eq(sess, arm.pat.span, vv, pv)) {
+ ret eval_crate_directive_block
+ (p, e, arm.block, prefix,
+ view_items, items, index);
+ }
+ }
+ case (ast.pat_wild(_)) {
+ ret eval_crate_directive_block
+ (p, e, arm.block, prefix,
+ view_items, items, index);
+ }
+ case (_) {
+ sess.span_err(arm.pat.span,
+ "bad pattern type in 'alt'");
+ }
+ }
+ }
+ sess.span_err(x.span, "no cases matched in 'alt'");
+ }
+
+ case (ast.expr_block(?block, _)) {
+ ret eval_crate_directive_block(p, e, block, prefix,
+ view_items, items,
+ index);
+ }
+
+ case (_) {
+ sess.span_err(x.span, "unsupported expr type");
+ }
+ }
+}
+
+impure fn eval_crate_directive(parser p,
+ env e,
+ @ast.crate_directive cdir,
+ str prefix,
+ &mutable vec[@ast.view_item] view_items,
+ &mutable vec[@ast.item] items,
+ hashmap[ast.ident,
+ ast.mod_index_entry] index) {
+ alt (cdir.node) {
+
+ case (ast.cdir_let(?id, ?x, ?cdirs)) {
+ auto v = eval_expr(p.get_session(), e, x);
+ auto e0 = vec(tup(id, v)) + e;
+ eval_crate_directives(p, e0, cdirs, prefix,
+ view_items, items, index);
+ }
+
+ case (ast.cdir_expr(?x)) {
+ eval_crate_directive_expr(p, e, x, prefix,
+ view_items, items, index);
+ }
+
+ case (ast.cdir_src_mod(?id, ?file_opt)) {
+
+ auto file_path = id + ".rs";
+ alt (file_opt) {
+ case (some[filename](?f)) {
+ file_path = f;
+ }
+ case (none[filename]) {}
+ }
+
+ auto full_path = prefix + std.os.path_sep() + file_path;
+
+ auto p0 = new_parser(p.get_session(), e, 0, full_path);
+ auto m0 = parse_mod_items(p0, token.EOF);
+ auto im = ast.item_mod(id, m0, p.next_def_id());
+ auto i = @spanned(cdir.span, cdir.span, im);
+ ast.index_item(index, i);
+ append[@ast.item](items, i);
+ }
+
+ case (ast.cdir_dir_mod(?id, ?dir_opt, ?cdirs)) {
+
+ auto path = id;
+ alt (dir_opt) {
+ case (some[filename](?d)) {
+ path = d;
+ }
+ case (none[filename]) {}
+ }
+
+ auto full_path = prefix + std.os.path_sep() + path;
+ auto m0 = eval_crate_directives_to_mod(p, e, cdirs, full_path);
+ auto im = ast.item_mod(id, m0, p.next_def_id());
+ auto i = @spanned(cdir.span, cdir.span, im);
+ ast.index_item(index, i);
+ append[@ast.item](items, i);
+ }
+
+ case (ast.cdir_view_item(?vi)) {
+ append[@ast.view_item](view_items, vi);
+ ast.index_view_item(index, vi);
+ }
+
+ case (ast.cdir_meta(?mi)) {}
+ case (ast.cdir_syntax(?pth)) {}
+ case (ast.cdir_auth(?pth, ?eff)) {}
+ }
+}
+
+
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C ../.. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/comp/front/extfmt.rs b/src/comp/front/extfmt.rs
new file mode 100644
index 00000000..255614d0
--- /dev/null
+++ b/src/comp/front/extfmt.rs
@@ -0,0 +1,553 @@
+/* The 'fmt' extension is modeled on the posix printf system.
+ *
+ * A posix conversion ostensibly looks like this:
+ *
+ * %[parameter][flags][width][.precision][length]type
+ *
+ * Given the different numeric type bestiary we have, we omit the 'length'
+ * parameter and support slightly different conversions for 'type':
+ *
+ * %[parameter][flags][width][.precision]type
+ *
+ * we also only support translating-to-rust a tiny subset of the possible
+ * combinations at the moment.
+ */
+
+import util.common;
+
+import std._str;
+import std._vec;
+import std.option;
+import std.option.none;
+import std.option.some;
+
+export expand_syntax_ext;
+
+tag signedness {
+ signed;
+ unsigned;
+}
+
+tag caseness {
+ case_upper;
+ case_lower;
+}
+
+tag ty {
+ ty_bool;
+ ty_str;
+ ty_char;
+ ty_int(signedness);
+ ty_bits;
+ ty_hex(caseness);
+ // FIXME: More types
+}
+
+tag flag {
+ flag_left_justify;
+ flag_left_zero_pad;
+ flag_left_space_pad;
+ flag_plus_if_positive;
+ flag_alternate;
+}
+
+tag count {
+ count_is(int);
+ count_is_param(int);
+ count_is_next_param;
+ count_implied;
+}
+
+// A formatted conversion from an expression to a string
+type conv = rec(option.t[int] param,
+ vec[flag] flags,
+ count width,
+ count precision,
+ ty ty);
+
+// A fragment of the output sequence
+tag piece {
+ piece_string(str);
+ piece_conv(conv);
+}
+
+// TODO: Need to thread parser through here to handle errors correctly
+fn expand_syntax_ext(vec[@ast.expr] args,
+ option.t[@ast.expr] body) -> @ast.expr {
+
+ if (_vec.len[@ast.expr](args) == 0u) {
+ log "malformed #fmt call";
+ fail;
+ }
+
+ auto fmt = expr_to_str(args.(0));
+
+ // log "Format string:";
+ // log fmt;
+
+ auto pieces = parse_fmt_string(fmt);
+ auto args_len = _vec.len[@ast.expr](args);
+ auto fmt_args = _vec.slice[@ast.expr](args, 1u, args_len - 1u);
+ ret pieces_to_expr(pieces, args);
+}
+
+fn expr_to_str(@ast.expr expr) -> str {
+ alt (expr.node) {
+ case (ast.expr_lit(?l, _)) {
+ alt (l.node) {
+ case (ast.lit_str(?s)) {
+ ret s;
+ }
+ }
+ }
+ }
+ log "malformed #fmt call";
+ fail;
+}
+
+fn parse_fmt_string(str s) -> vec[piece] {
+ let vec[piece] pieces = vec();
+ auto lim = _str.byte_len(s);
+ auto buf = "";
+
+ fn flush_buf(str buf, &vec[piece] pieces) -> str {
+ if (_str.byte_len(buf) > 0u) {
+ auto piece = piece_string(buf);
+ pieces += piece;
+ }
+ ret "";
+ }
+
+ auto i = 0u;
+ while (i < lim) {
+ auto curr = _str.substr(s, i, 1u);
+ if (_str.eq(curr, "%")) {
+ i += 1u;
+ if (i >= lim) {
+ log "unterminated conversion at end of string";
+ fail;
+ }
+ auto curr2 = _str.substr(s, i, 1u);
+ if (_str.eq(curr2, "%")) {
+ i += 1u;
+ } else {
+ buf = flush_buf(buf, pieces);
+ auto res = parse_conversion(s, i, lim);
+ pieces += res._0;
+ i = res._1;
+ }
+ } else {
+ buf += curr;
+ i += 1u;
+ }
+ }
+ buf = flush_buf(buf, pieces);
+ ret pieces;
+}
+
+fn peek_num(str s, uint i, uint lim) -> option.t[tup(uint, uint)] {
+ if (i >= lim) {
+ ret none[tup(uint, uint)];
+ }
+
+ auto c = s.(i);
+ if (!('0' as u8 <= c && c <= '9' as u8)) {
+ ret option.none[tup(uint, uint)];
+ }
+
+ auto n = (c - ('0' as u8)) as uint;
+ alt (peek_num(s, i + 1u, lim)) {
+ case (none[tup(uint, uint)]) {
+ ret some[tup(uint, uint)](tup(n, i + 1u));
+ }
+ case (some[tup(uint, uint)](?next)) {
+ auto m = next._0;
+ auto j = next._1;
+ ret some[tup(uint, uint)](tup(n * 10u + m, j));
+ }
+ }
+
+}
+
+fn parse_conversion(str s, uint i, uint lim) -> tup(piece, uint) {
+ auto parm = parse_parameter(s, i, lim);
+ auto flags = parse_flags(s, parm._1, lim);
+ auto width = parse_count(s, flags._1, lim);
+ auto prec = parse_precision(s, width._1, lim);
+ auto ty = parse_type(s, prec._1, lim);
+ ret tup(piece_conv(rec(param = parm._0,
+ flags = flags._0,
+ width = width._0,
+ precision = prec._0,
+ ty = ty._0)),
+ ty._1);
+}
+
+fn parse_parameter(str s, uint i, uint lim) -> tup(option.t[int], uint) {
+ if (i >= lim) {
+ ret tup(none[int], i);
+ }
+
+ auto num = peek_num(s, i, lim);
+ alt (num) {
+ case (none[tup(uint, uint)]) {
+ ret tup(none[int], i);
+ }
+ case (some[tup(uint, uint)](?t)) {
+ auto n = t._0;
+ auto j = t._1;
+ if (j < lim && s.(j) == '$' as u8) {
+ ret tup(some[int](n as int), j + 1u);
+ }
+ else {
+ ret tup(none[int], i);
+ }
+ }
+ }
+}
+
+fn parse_flags(str s, uint i, uint lim) -> tup(vec[flag], uint) {
+ let vec[flag] noflags = vec();
+
+ if (i >= lim) {
+ ret tup(noflags, i);
+ }
+
+ fn more_(flag f, str s, uint i, uint lim) -> tup(vec[flag], uint) {
+ auto next = parse_flags(s, i + 1u, lim);
+ auto rest = next._0;
+ auto j = next._1;
+ let vec[flag] curr = vec(f);
+ ret tup(curr + rest, j);
+ }
+
+ auto more = bind more_(_, s, i, lim);
+
+ auto f = s.(i);
+ if (f == ('-' as u8)) {
+ ret more(flag_left_justify);
+ } else if (f == ('0' as u8)) {
+ ret more(flag_left_zero_pad);
+ } else if (f == (' ' as u8)) {
+ ret more(flag_left_space_pad);
+ } else if (f == ('+' as u8)) {
+ ret more(flag_plus_if_positive);
+ } else if (f == ('#' as u8)) {
+ ret more(flag_alternate);
+ } else {
+ ret tup(noflags, i);
+ }
+}
+
+fn parse_count(str s, uint i, uint lim) -> tup(count, uint) {
+ if (i >= lim) {
+ ret tup(count_implied, i);
+ }
+
+ if (s.(i) == ('*' as u8)) {
+ auto param = parse_parameter(s, i + 1u, lim);
+ auto j = param._1;
+ alt (param._0) {
+ case (none[int]) {
+ ret tup(count_is_next_param, j);
+ }
+ case (some[int](?n)) {
+ ret tup(count_is_param(n), j);
+ }
+ }
+ } else {
+ auto num = peek_num(s, i, lim);
+ alt (num) {
+ case (none[tup(uint, uint)]) {
+ ret tup(count_implied, i);
+ }
+ case (some[tup(uint, uint)](?num)) {
+ ret tup(count_is(num._0 as int), num._1);
+ }
+ }
+ }
+}
+
+fn parse_precision(str s, uint i, uint lim) -> tup(count, uint) {
+ if (i >= lim) {
+ ret tup(count_implied, i);
+ }
+
+ if (s.(i) == '.' as u8) {
+ ret parse_count(s, i + 1u, lim);
+ } else {
+ ret tup(count_implied, i);
+ }
+}
+
+fn parse_type(str s, uint i, uint lim) -> tup(ty, uint) {
+ if (i >= lim) {
+ log "missing type in conversion";
+ fail;
+ }
+
+ auto t;
+ auto tstr = _str.substr(s, i, 1u);
+ if (_str.eq(tstr, "b")) {
+ t = ty_bool;
+ } else if (_str.eq(tstr, "s")) {
+ t = ty_str;
+ } else if (_str.eq(tstr, "c")) {
+ t = ty_char;
+ } else if (_str.eq(tstr, "d")
+ || _str.eq(tstr, "i")) {
+ // TODO: Do we really want two signed types here?
+ // How important is it to be printf compatible?
+ t = ty_int(signed);
+ } else if (_str.eq(tstr, "u")) {
+ t = ty_int(unsigned);
+ } else if (_str.eq(tstr, "x")) {
+ t = ty_hex(case_lower);
+ } else if (_str.eq(tstr, "X")) {
+ t = ty_hex(case_upper);
+ } else if (_str.eq(tstr, "t")) {
+ t = ty_bits;
+ } else {
+ log "unknown type in conversion";
+ fail;
+ }
+
+ ret tup(t, i + 1u);
+}
+
+fn pieces_to_expr(vec[piece] pieces, vec[@ast.expr] args) -> @ast.expr {
+
+ fn make_new_lit(common.span sp, ast.lit_ lit) -> @ast.expr {
+ auto sp_lit = @parser.spanned[ast.lit_](sp, sp, lit);
+ auto expr = ast.expr_lit(sp_lit, ast.ann_none);
+ ret @parser.spanned[ast.expr_](sp, sp, expr);
+ }
+
+ fn make_new_str(common.span sp, str s) -> @ast.expr {
+ auto lit = ast.lit_str(s);
+ ret make_new_lit(sp, lit);
+ }
+
+ fn make_new_uint(common.span sp, uint u) -> @ast.expr {
+ auto lit = ast.lit_uint(u);
+ ret make_new_lit(sp, lit);
+ }
+
+ fn make_add_expr(common.span sp,
+ @ast.expr lhs, @ast.expr rhs) -> @ast.expr {
+ auto binexpr = ast.expr_binary(ast.add, lhs, rhs, ast.ann_none);
+ ret @parser.spanned[ast.expr_](sp, sp, binexpr);
+ }
+
+ fn make_call(common.span sp, vec[ast.ident] fn_path,
+ vec[@ast.expr] args) -> @ast.expr {
+ let vec[ast.ident] path_idents = fn_path;
+ let vec[@ast.ty] path_types = vec();
+ auto path = rec(idents = path_idents, types = path_types);
+ auto sp_path = parser.spanned[ast.path_](sp, sp, path);
+ auto pathexpr = ast.expr_path(sp_path, none[ast.def], ast.ann_none);
+ auto sp_pathexpr = @parser.spanned[ast.expr_](sp, sp, pathexpr);
+ auto callexpr = ast.expr_call(sp_pathexpr, args, ast.ann_none);
+ auto sp_callexpr = @parser.spanned[ast.expr_](sp, sp, callexpr);
+ ret sp_callexpr;
+ }
+
+ fn make_new_conv(conv cnv, @ast.expr arg) -> @ast.expr {
+
+ auto unsupported = "conversion not supported in #fmt string";
+
+ alt (cnv.param) {
+ case (option.none[int]) {
+ }
+ case (_) {
+ log unsupported;
+ fail;
+ }
+ }
+
+ if (_vec.len[flag](cnv.flags) != 0u) {
+ log unsupported;
+ fail;
+ }
+
+ alt (cnv.width) {
+ case (count_implied) {
+ }
+ case (_) {
+ log unsupported;
+ fail;
+ }
+ }
+
+ alt (cnv.precision) {
+ case (count_implied) {
+ }
+ case (_) {
+ log unsupported;
+ fail;
+ }
+ }
+
+ alt (cnv.ty) {
+ case (ty_str) {
+ ret arg;
+ }
+ case (ty_int(?sign)) {
+ alt (sign) {
+ case (signed) {
+ let vec[str] path = vec("std", "_int", "to_str");
+ auto radix_expr = make_new_uint(arg.span, 10u);
+ let vec[@ast.expr] args = vec(arg, radix_expr);
+ ret make_call(arg.span, path, args);
+ }
+ case (unsigned) {
+ let vec[str] path = vec("std", "_uint", "to_str");
+ auto radix_expr = make_new_uint(arg.span, 10u);
+ let vec[@ast.expr] args = vec(arg, radix_expr);
+ ret make_call(arg.span, path, args);
+ }
+ }
+ }
+ case (_) {
+ log unsupported;
+ fail;
+ }
+ }
+ }
+
+ fn log_conv(conv c) {
+ alt (c.param) {
+ case (some[int](?p)) {
+ log "param: " + std._int.to_str(p, 10u);
+ }
+ case (_) {
+ log "param: none";
+ }
+ }
+ for (flag f in c.flags) {
+ alt (f) {
+ case (flag_left_justify) {
+ log "flag: left justify";
+ }
+ case (flag_left_zero_pad) {
+ log "flag: left zero pad";
+ }
+ case (flag_left_space_pad) {
+ log "flag: left space pad";
+ }
+ case (flag_plus_if_positive) {
+ log "flag: plus if positive";
+ }
+ case (flag_alternate) {
+ log "flag: alternate";
+ }
+ }
+ }
+ alt (c.width) {
+ case (count_is(?i)) {
+ log "width: count is " + std._int.to_str(i, 10u);
+ }
+ case (count_is_param(?i)) {
+ log "width: count is param " + std._int.to_str(i, 10u);
+ }
+ case (count_is_next_param) {
+ log "width: count is next param";
+ }
+ case (count_implied) {
+ log "width: count is implied";
+ }
+ }
+ alt (c.precision) {
+ case (count_is(?i)) {
+ log "prec: count is " + std._int.to_str(i, 10u);
+ }
+ case (count_is_param(?i)) {
+ log "prec: count is param " + std._int.to_str(i, 10u);
+ }
+ case (count_is_next_param) {
+ log "prec: count is next param";
+ }
+ case (count_implied) {
+ log "prec: count is implied";
+ }
+ }
+ alt (c.ty) {
+ case (ty_bool) {
+ log "type: bool";
+ }
+ case (ty_str) {
+ log "type: str";
+ }
+ case (ty_char) {
+ log "type: char";
+ }
+ case (ty_int(?s)) {
+ alt (s) {
+ case (signed) {
+ log "type: signed";
+ }
+ case (unsigned) {
+ log "type: unsigned";
+ }
+ }
+ }
+ case (ty_bits) {
+ log "type: bits";
+ }
+ case (ty_hex(?cs)) {
+ alt (cs) {
+ case (case_upper) {
+ log "type: uhex";
+ }
+ case (case_lower) {
+ log "type: lhex";
+ }
+ }
+ }
+ }
+ }
+
+ auto sp = args.(0).span;
+ auto n = 0u;
+ auto tmp_expr = make_new_str(sp, "");
+
+ for (piece p in pieces) {
+ alt (p) {
+ case (piece_string(?s)) {
+ auto s_expr = make_new_str(sp, s);
+ tmp_expr = make_add_expr(sp, tmp_expr, s_expr);
+ }
+ case (piece_conv(?conv)) {
+ if (n >= _vec.len[@ast.expr](args)) {
+ log "too many conversions in #fmt string";
+ fail;
+ }
+
+ // TODO: Remove debug logging
+ // log "Building conversion:";
+ // log_conv(conv);
+
+ n += 1u;
+ auto arg_expr = args.(n);
+ auto c_expr = make_new_conv(conv, arg_expr);
+ tmp_expr = make_add_expr(sp, tmp_expr, c_expr);
+ }
+ }
+ }
+
+ // TODO: Remove this debug logging
+ // log "dumping expanded ast:";
+ // log pretty.print_expr(tmp_expr);
+ ret tmp_expr;
+}
+
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C ../.. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/comp/front/lexer.rs b/src/comp/front/lexer.rs
index a45b1113..0e15e3d8 100644
--- a/src/comp/front/lexer.rs
+++ b/src/comp/front/lexer.rs
@@ -1,4 +1,4 @@
-import std._io.stdio_reader;
+import std.io.stdio_reader;
import std._str;
import std.map;
import std.map.hashmap;
@@ -90,7 +90,6 @@ fn new_reader(stdio_reader rdr, str filename) -> reader
}
auto keywords = new_str_hash[token.token]();
- auto reserved = new_str_hash[()]();
keywords.insert("mod", token.MOD);
keywords.insert("use", token.USE);
@@ -191,6 +190,16 @@ fn new_reader(stdio_reader rdr, str filename) -> reader
keywords.insert("f32", token.MACH(common.ty_f32));
keywords.insert("f64", token.MACH(common.ty_f64));
+ auto reserved = new_str_hash[()]();
+
+ reserved.insert("f16", ()); // IEEE 754-2008 'binary16' interchange fmt
+ reserved.insert("f80", ()); // IEEE 754-1985 'extended'
+ reserved.insert("f128", ()); // IEEE 754-2008 'binary128'
+ reserved.insert("m32", ()); // IEEE 754-2008 'decimal32'
+ reserved.insert("m64", ()); // IEEE 754-2008 'decimal64'
+ reserved.insert("m128", ()); // IEEE 754-2008 'decimal128'
+ reserved.insert("dec", ()); // One of m32, m64, m128
+
ret reader(rdr, filename, rdr.getc() as char, rdr.getc() as char,
1u, 0u, 1u, 0u, keywords, reserved);
}
@@ -425,6 +434,12 @@ impure fn next_token(reader rdr) -> token.token {
ret kwds.get(accum_str);
}
+ auto rsvd = rdr.get_reserved();
+ if (rsvd.contains_key(accum_str)) {
+ log "reserved keyword";
+ fail;
+ }
+
ret token.IDENT(accum_str);
}
@@ -650,12 +665,9 @@ impure fn next_token(reader rdr) -> token.token {
case ('%') {
ret binop(rdr, token.PERCENT);
}
-
}
- log "lexer stopping at ";
- log c;
- ret token.EOF;
+ fail;
}
diff --git a/src/comp/front/parser.rs b/src/comp/front/parser.rs
index e629683c..bb7b8558 100644
--- a/src/comp/front/parser.rs
+++ b/src/comp/front/parser.rs
@@ -1,4 +1,4 @@
-import std._io;
+import std.io;
import std._vec;
import std._str;
import std.option;
@@ -8,6 +8,7 @@ import std.map.hashmap;
import driver.session;
import util.common;
+import util.common.filename;
import util.common.append;
import util.common.span;
import util.common.new_str_hash;
@@ -17,6 +18,11 @@ tag restriction {
RESTRICT_NO_CALL_EXPRS;
}
+tag file_type {
+ CRATE_FILE;
+ SOURCE_FILE;
+}
+
state type parser =
state obj {
fn peek() -> token.token;
@@ -24,28 +30,37 @@ state type parser =
impure fn err(str s);
impure fn restrict(restriction r);
fn get_restriction() -> restriction;
+ fn get_file_type() -> file_type;
+ fn get_env() -> eval.env;
fn get_session() -> session.session;
fn get_span() -> common.span;
fn next_def_id() -> ast.def_id;
+ fn get_prec_table() -> vec[op_spec];
};
impure fn new_parser(session.session sess,
- ast.crate_num crate, str path) -> parser {
+ eval.env env,
+ ast.crate_num crate,
+ str path) -> parser {
state obj stdio_parser(session.session sess,
+ eval.env env,
+ file_type ftype,
mutable token.token tok,
mutable common.pos lo,
mutable common.pos hi,
mutable ast.def_num def,
mutable restriction res,
ast.crate_num crate,
- lexer.reader rdr)
+ lexer.reader rdr,
+ vec[op_spec] precs)
{
fn peek() -> token.token {
- // log token.to_str(tok);
ret tok;
}
impure fn bump() {
+ // log rdr.get_filename()
+ // + ":" + common.istr(lo.line as int);
tok = lexer.next_token(rdr);
lo = rdr.get_mark_pos();
hi = rdr.get_curr_pos();
@@ -78,12 +93,29 @@ impure fn new_parser(session.session sess,
def += 1;
ret tup(crate, def);
}
+
+ fn get_file_type() -> file_type {
+ ret ftype;
+ }
+
+ fn get_env() -> eval.env {
+ ret env;
+ }
+
+ fn get_prec_table() -> vec[op_spec] {
+ ret precs;
+ }
}
- auto srdr = _io.new_stdio_reader(path);
+ auto ftype = SOURCE_FILE;
+ if (_str.ends_with(path, ".rc")) {
+ ftype = CRATE_FILE;
+ }
+ auto srdr = io.new_stdio_reader(path);
auto rdr = lexer.new_reader(srdr, path);
auto npos = rdr.get_curr_pos();
- ret stdio_parser(sess, lexer.next_token(rdr),
- npos, npos, 0, UNRESTRICTED, crate, rdr);
+ ret stdio_parser(sess, env, ftype, lexer.next_token(rdr),
+ npos, npos, 0, UNRESTRICTED, crate, rdr,
+ prec_table());
}
impure fn unexpected(parser p, token.token t) {
@@ -121,9 +153,23 @@ impure fn parse_ident(parser p) -> ast.ident {
}
-impure fn parse_str_lit(parser p) -> ast.ident {
+/* FIXME: gross hack copied from rustboot to make certain configuration-based
+ * decisions work at build-time. We should probably change it to use a
+ * lexical sytnax-extension or something similar. For now we just imitate
+ * rustboot.
+ */
+impure fn parse_str_lit_or_env_ident(parser p) -> ast.ident {
alt (p.peek()) {
case (token.LIT_STR(?s)) { p.bump(); ret s; }
+ case (token.IDENT(?i)) {
+ auto v = eval.lookup(p.get_session(), p.get_env(),
+ p.get_span(), i);
+ if (!eval.val_is_str(v)) {
+ p.err("expecting string-valued variable");
+ }
+ p.bump();
+ ret eval.val_as_str(v);
+ }
case (_) {
p.err("expecting string literal");
fail;
@@ -132,7 +178,8 @@ impure fn parse_str_lit(parser p) -> ast.ident {
}
-impure fn parse_ty_fn(parser p, ast.span lo) -> ast.ty_ {
+impure fn parse_ty_fn(ast.proto proto, parser p,
+ ast.span lo) -> ast.ty_ {
impure fn parse_fn_input_ty(parser p) -> rec(ast.mode mode, @ast.ty ty) {
auto mode;
if (p.peek() == token.BINOP(token.AND)) {
@@ -158,6 +205,10 @@ impure fn parse_ty_fn(parser p, ast.span lo) -> ast.ty_ {
auto inputs = parse_seq[rec(ast.mode mode, @ast.ty ty)](token.LPAREN,
token.RPAREN, some(token.COMMA), f, p);
+ // FIXME: dropping constrs on the floor at the moment.
+ // pick them up when they're used by typestate pass.
+ parse_constrs(p);
+
let @ast.ty output;
if (p.peek() == token.RARROW) {
p.bump();
@@ -166,20 +217,33 @@ impure fn parse_ty_fn(parser p, ast.span lo) -> ast.ty_ {
output = @spanned(lo, inputs.span, ast.ty_nil);
}
- ret ast.ty_fn(inputs.node, output);
+ ret ast.ty_fn(proto, inputs.node, output);
+}
+
+impure fn parse_proto(parser p) -> ast.proto {
+ alt (p.peek()) {
+ case (token.ITER) { p.bump(); ret ast.proto_iter; }
+ case (token.FN) { p.bump(); ret ast.proto_fn; }
+ case (?t) { unexpected(p, t); }
+ }
+ fail;
}
impure fn parse_ty_obj(parser p, &mutable ast.span hi) -> ast.ty_ {
expect(p, token.OBJ);
impure fn parse_method_sig(parser p) -> ast.ty_method {
auto flo = p.get_span();
- expect(p, token.FN);
+
+ // FIXME: do something with this, currently it's dropped on the floor.
+ let ast.effect eff = parse_effect(p);
+ let ast.proto proto = parse_proto(p);
auto ident = parse_ident(p);
- auto f = parse_ty_fn(p, flo);
+ auto f = parse_ty_fn(proto, p, flo);
expect(p, token.SEMI);
alt (f) {
- case (ast.ty_fn(?inputs, ?output)) {
- ret rec(ident=ident, inputs=inputs, output=output);
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
+ ret rec(proto=proto, ident=ident,
+ inputs=inputs, output=output);
}
}
fail;
@@ -200,10 +264,72 @@ impure fn parse_ty_field(parser p) -> ast.ty_field {
ret rec(ident=id, ty=ty);
}
+impure fn parse_constr_arg(parser p) -> @ast.constr_arg {
+ auto lo = p.get_span();
+ auto carg = ast.carg_base;
+ if (p.peek() == token.BINOP(token.STAR)) {
+ p.bump();
+ } else {
+ carg = ast.carg_ident(parse_ident(p));
+ }
+ ret @spanned(lo, lo, carg);
+}
+
+impure fn parse_ty_constr(parser p) -> @ast.constr {
+ auto lo = p.get_span();
+ auto path = parse_path(p, GREEDY);
+ auto pf = parse_constr_arg;
+ auto args = parse_seq[@ast.constr_arg](token.LPAREN,
+ token.RPAREN,
+ some(token.COMMA), pf, p);
+ auto hi = args.span;
+ ret @spanned(lo, hi, rec(path=path, args=args.node));
+}
+
+impure fn parse_constrs(parser p) -> common.spanned[vec[@ast.constr]] {
+ auto lo = p.get_span();
+ auto hi = lo;
+ let vec[@ast.constr] constrs = vec();
+ if (p.peek() == token.COLON) {
+ p.bump();
+ let bool more = true;
+ while (more) {
+ alt (p.peek()) {
+ case (token.IDENT(_)) {
+ auto constr = parse_ty_constr(p);
+ hi = constr.span;
+ append[@ast.constr](constrs, constr);
+ if (p.peek() == token.COMMA) {
+ p.bump();
+ more = false;
+ }
+ }
+ case (_) { more = false; }
+ }
+ }
+ }
+ ret spanned(lo, hi, constrs);
+}
+
+impure fn parse_ty_constrs(@ast.ty t, parser p) -> @ast.ty {
+ if (p.peek() == token.COLON) {
+ auto constrs = parse_constrs(p);
+ ret @spanned(t.span, constrs.span,
+ ast.ty_constr(t, constrs.node));
+ }
+ ret t;
+}
+
impure fn parse_ty(parser p) -> @ast.ty {
auto lo = p.get_span();
auto hi = lo;
let ast.ty_ t;
+
+ // FIXME: do something with these; currently they're
+ // dropped on the floor.
+ let ast.effect eff = parse_effect(p);
+ let ast.layer lyr = parse_layer(p);
+
alt (p.peek()) {
case (token.BOOL) { p.bump(); t = ast.ty_bool; }
case (token.INT) { p.bump(); t = ast.ty_int; }
@@ -275,9 +401,20 @@ impure fn parse_ty(parser p) -> @ast.ty {
case (token.FN) {
auto flo = p.get_span();
p.bump();
- t = parse_ty_fn(p, flo);
+ t = parse_ty_fn(ast.proto_fn, p, flo);
+ alt (t) {
+ case (ast.ty_fn(_, _, ?out)) {
+ hi = out.span;
+ }
+ }
+ }
+
+ case (token.ITER) {
+ auto flo = p.get_span();
+ p.bump();
+ t = parse_ty_fn(ast.proto_iter, p, flo);
alt (t) {
- case (ast.ty_fn(_, ?out)) {
+ case (ast.ty_fn(_, _, ?out)) {
hi = out.span;
}
}
@@ -297,7 +434,8 @@ impure fn parse_ty(parser p) -> @ast.ty {
fail;
}
}
- ret @spanned(lo, hi, t);
+
+ ret parse_ty_constrs(@spanned(lo, hi, t), p);
}
impure fn parse_arg(parser p) -> ast.arg {
@@ -341,9 +479,9 @@ impure fn parse_seq[T](token.token bra,
ret spanned(lo, hi, v);
}
-impure fn parse_lit(parser p) -> option.t[ast.lit] {
+impure fn parse_lit(parser p) -> ast.lit {
auto lo = p.get_span();
- let ast.lit_ lit;
+ let ast.lit_ lit = ast.lit_nil;
alt (p.peek()) {
case (token.LIT_INT(?i)) {
p.bump();
@@ -369,12 +507,11 @@ impure fn parse_lit(parser p) -> option.t[ast.lit] {
p.bump();
lit = ast.lit_str(s);
}
- case (_) {
- lit = ast.lit_nil; // FIXME: typestate bug requires this
- ret none[ast.lit];
+ case (?t) {
+ unexpected(p, t);
}
}
- ret some(spanned(lo, lo, lit));
+ ret spanned(lo, lo, lit);
}
fn is_ident(token.token t) -> bool {
@@ -520,14 +657,37 @@ impure fn parse_bottom_expr(parser p) -> @ast.expr {
case (token.REC) {
p.bump();
- auto pf = parse_field;
- auto fs =
- parse_seq[ast.field](token.LPAREN,
- token.RPAREN,
- some(token.COMMA),
- pf, p);
- hi = fs.span;
- ex = ast.expr_rec(fs.node, ast.ann_none);
+ expect(p, token.LPAREN);
+ auto fields = vec(parse_field(p));
+
+ auto more = true;
+ auto base = none[@ast.expr];
+ while (more) {
+ alt (p.peek()) {
+ case (token.RPAREN) {
+ hi = p.get_span();
+ p.bump();
+ more = false;
+ }
+ case (token.WITH) {
+ p.bump();
+ base = some[@ast.expr](parse_expr(p));
+ hi = p.get_span();
+ expect(p, token.RPAREN);
+ more = false;
+ }
+ case (token.COMMA) {
+ p.bump();
+ fields += parse_field(p);
+ }
+ case (?t) {
+ unexpected(p, t);
+ }
+ }
+
+ }
+
+ ex = ast.expr_rec(fields, base, ast.ann_none);
}
case (token.BIND) {
@@ -554,22 +714,124 @@ impure fn parse_bottom_expr(parser p) -> @ast.expr {
ex = ast.expr_bind(e, es.node, ast.ann_none);
}
- case (_) {
- alt (parse_lit(p)) {
- case (some[ast.lit](?lit)) {
- hi = lit.span;
- ex = ast.expr_lit(@lit, ast.ann_none);
+ case (token.POUND) {
+ p.bump();
+ auto pth = parse_path(p, GREEDY);
+ auto pf = parse_expr;
+ auto es = parse_seq[@ast.expr](token.LPAREN,
+ token.RPAREN,
+ some(token.COMMA),
+ pf, p);
+ hi = es.span;
+ ex = expand_syntax_ext(p, es.span, pth, es.node,
+ none[@ast.expr]);
+ }
+
+ case (token.FAIL) {
+ p.bump();
+ ex = ast.expr_fail;
+ }
+
+ case (token.LOG) {
+ p.bump();
+ auto e = parse_expr(p);
+ auto hi = e.span;
+ ex = ast.expr_log(e);
+ }
+
+ case (token.CHECK) {
+ p.bump();
+ alt (p.peek()) {
+ case (token.LPAREN) {
+ auto e = parse_expr(p);
+ auto hi = e.span;
+ ex = ast.expr_check_expr(e);
}
- case (none[ast.lit]) {
- p.err("expecting expression");
+ case (_) {
+ p.get_session().unimpl("constraint-check stmt");
}
}
}
+
+ case (token.RET) {
+ p.bump();
+ alt (p.peek()) {
+ case (token.SEMI) {
+ ex = ast.expr_ret(none[@ast.expr]);
+ }
+ case (_) {
+ auto e = parse_expr(p);
+ hi = e.span;
+ ex = ast.expr_ret(some[@ast.expr](e));
+ }
+ }
+ }
+
+ case (token.PUT) {
+ p.bump();
+ alt (p.peek()) {
+ case (token.SEMI) {
+ ex = ast.expr_put(none[@ast.expr]);
+ }
+ case (_) {
+ auto e = parse_expr(p);
+ hi = e.span;
+ ex = ast.expr_put(some[@ast.expr](e));
+ }
+ }
+ }
+
+ case (token.BE) {
+ p.bump();
+ auto e = parse_expr(p);
+ // FIXME: Is this the right place for this check?
+ if /*check*/ (ast.is_call_expr(e)) {
+ hi = e.span;
+ ex = ast.expr_be(e);
+ }
+ else {
+ p.err("Non-call expression in tail call");
+ }
+ }
+
+ case (_) {
+ auto lit = parse_lit(p);
+ hi = lit.span;
+ ex = ast.expr_lit(@lit, ast.ann_none);
+ }
}
ret @spanned(lo, hi, ex);
}
+/*
+ * FIXME: This is a crude approximation of the syntax-extension system,
+ * for purposes of prototyping and/or hard-wiring any extensions we
+ * wish to use while bootstrapping. The eventual aim is to permit
+ * loading rust crates to process extensions, but this will likely
+ * require a rust-based frontend, or an ocaml-FFI-based connection to
+ * rust crates. At the moment we have neither.
+ */
+
+impure fn expand_syntax_ext(parser p, ast.span sp,
+ &ast.path path, vec[@ast.expr] args,
+ option.t[@ast.expr] body) -> ast.expr_ {
+
+ check (_vec.len[ast.ident](path.node.idents) > 0u);
+ auto extname = path.node.idents.(0);
+ if (_str.eq(extname, "fmt")) {
+ auto expanded = extfmt.expand_syntax_ext(args, body);
+ auto newexpr = ast.expr_ext(path, args, body,
+ expanded,
+ ast.ann_none);
+
+ ret newexpr;
+ } else {
+ p.err("unknown syntax extension");
+ fail;
+ }
+}
+
impure fn extend_expr_by_ident(parser p, span lo, span hi,
@ast.expr e, ast.ident i) -> @ast.expr {
auto e_ = e.node;
@@ -705,6 +967,13 @@ impure fn parse_prefix_expr(parser p) -> @ast.expr {
ex = ast.expr_unary(ast.box, e, ast.ann_none);
}
+ case (token.MUTABLE) {
+ p.bump();
+ auto e = parse_prefix_expr(p);
+ hi = e.span;
+ ex = ast.expr_unary(ast._mutable, e, ast.ann_none);
+ }
+
case (_) {
ret parse_dot_or_call_expr(p);
}
@@ -712,144 +981,73 @@ impure fn parse_prefix_expr(parser p) -> @ast.expr {
ret @spanned(lo, hi, ex);
}
-impure fn parse_binops(parser p,
- (impure fn(parser) -> @ast.expr) sub,
- vec[tup(token.binop, ast.binop)] ops)
+type op_spec = rec(token.token tok, ast.binop op, int prec);
+
+// FIXME make this a const, don't store it in parser state
+fn prec_table() -> vec[op_spec] {
+ ret vec(rec(tok=token.BINOP(token.STAR), op=ast.mul, prec=11),
+ rec(tok=token.BINOP(token.SLASH), op=ast.div, prec=11),
+ rec(tok=token.BINOP(token.PERCENT), op=ast.rem, prec=11),
+ rec(tok=token.BINOP(token.PLUS), op=ast.add, prec=10),
+ rec(tok=token.BINOP(token.MINUS), op=ast.sub, prec=10),
+ rec(tok=token.BINOP(token.LSL), op=ast.lsl, prec=9),
+ rec(tok=token.BINOP(token.LSR), op=ast.lsr, prec=9),
+ rec(tok=token.BINOP(token.ASR), op=ast.asr, prec=9),
+ rec(tok=token.BINOP(token.AND), op=ast.bitand, prec=8),
+ rec(tok=token.BINOP(token.CARET), op=ast.bitxor, prec=6),
+ rec(tok=token.BINOP(token.OR), op=ast.bitor, prec=6),
+ // ast.mul is a bogus placeholder here, AS is special
+ // cased in parse_more_binops
+ rec(tok=token.AS, op=ast.mul, prec=5),
+ rec(tok=token.LT, op=ast.lt, prec=4),
+ rec(tok=token.LE, op=ast.le, prec=4),
+ rec(tok=token.GE, op=ast.ge, prec=4),
+ rec(tok=token.GT, op=ast.gt, prec=4),
+ rec(tok=token.EQEQ, op=ast.eq, prec=3),
+ rec(tok=token.NE, op=ast.ne, prec=3),
+ rec(tok=token.ANDAND, op=ast.and, prec=2),
+ rec(tok=token.OROR, op=ast.or, prec=1));
+}
+
+impure fn parse_binops(parser p) -> @ast.expr {
+ ret parse_more_binops(p, parse_prefix_expr(p), 0);
+}
+
+impure fn parse_more_binops(parser p, @ast.expr lhs, int min_prec)
-> @ast.expr {
- auto lo = p.get_span();
- auto hi = lo;
- auto e = sub(p);
- auto more = true;
- while (more) {
- more = false;
- for (tup(token.binop, ast.binop) pair in ops) {
- alt (p.peek()) {
- case (token.BINOP(?op)) {
- if (pair._0 == op) {
- p.bump();
- auto rhs = sub(p);
- hi = rhs.span;
- auto exp = ast.expr_binary(pair._1, e, rhs,
- ast.ann_none);
- e = @spanned(lo, hi, exp);
- more = true;
- }
- }
- case (_) { /* fall through */ }
- }
- }
+ // Magic nonsense to work around rustboot bug
+ fn op_eq(token.token a, token.token b) -> bool {
+ if (a == b) {ret true;}
+ else {ret false;}
}
- ret e;
-}
-
-impure fn parse_binary_exprs(parser p,
- (impure fn(parser) -> @ast.expr) sub,
- vec[tup(token.token, ast.binop)] ops)
- -> @ast.expr {
- auto lo = p.get_span();
- auto hi = lo;
- auto e = sub(p);
- auto more = true;
- while (more) {
- more = false;
- for (tup(token.token, ast.binop) pair in ops) {
- if (pair._0 == p.peek()) {
- p.bump();
- auto rhs = sub(p);
- hi = rhs.span;
- auto exp = ast.expr_binary(pair._1, e, rhs, ast.ann_none);
- e = @spanned(lo, hi, exp);
- more = true;
- }
- }
- }
- ret e;
-}
-
-impure fn parse_factor_expr(parser p) -> @ast.expr {
- auto sub = parse_prefix_expr;
- ret parse_binops(p, sub, vec(tup(token.STAR, ast.mul),
- tup(token.SLASH, ast.div),
- tup(token.PERCENT, ast.rem)));
-}
-
-impure fn parse_term_expr(parser p) -> @ast.expr {
- auto sub = parse_factor_expr;
- ret parse_binops(p, sub, vec(tup(token.PLUS, ast.add),
- tup(token.MINUS, ast.sub)));
-}
-
-impure fn parse_shift_expr(parser p) -> @ast.expr {
- auto sub = parse_term_expr;
- ret parse_binops(p, sub, vec(tup(token.LSL, ast.lsl),
- tup(token.LSR, ast.lsr),
- tup(token.ASR, ast.asr)));
-}
-
-impure fn parse_bitand_expr(parser p) -> @ast.expr {
- auto sub = parse_shift_expr;
- ret parse_binops(p, sub, vec(tup(token.AND, ast.bitand)));
-}
-
-impure fn parse_bitxor_expr(parser p) -> @ast.expr {
- auto sub = parse_bitand_expr;
- ret parse_binops(p, sub, vec(tup(token.CARET, ast.bitxor)));
-}
-
-impure fn parse_bitor_expr(parser p) -> @ast.expr {
- auto sub = parse_bitxor_expr;
- ret parse_binops(p, sub, vec(tup(token.OR, ast.bitor)));
-}
-
-impure fn parse_cast_expr(parser p) -> @ast.expr {
- auto lo = p.get_span();
- auto e = parse_bitor_expr(p);
- auto hi = e.span;
- while (true) {
- alt (p.peek()) {
- case (token.AS) {
- p.bump();
- auto t = parse_ty(p);
- hi = t.span;
- e = @spanned(lo, hi, ast.expr_cast(e, t, ast.ann_none));
- }
-
- case (_) {
- ret e;
+ auto peeked = p.peek();
+ for (op_spec cur in p.get_prec_table()) {
+ if (cur.prec > min_prec && op_eq(cur.tok, peeked)) {
+ p.bump();
+ alt (cur.tok) {
+ case (token.AS) {
+ auto rhs = parse_ty(p);
+ auto _as = ast.expr_cast(lhs, rhs, ast.ann_none);
+ auto span = @spanned(lhs.span, rhs.span, _as);
+ ret parse_more_binops(p, span, min_prec);
+ }
+ case (_) {
+ auto rhs = parse_more_binops(p, parse_prefix_expr(p),
+ cur.prec);
+ auto bin = ast.expr_binary(cur.op, lhs, rhs,
+ ast.ann_none);
+ auto span = @spanned(lhs.span, rhs.span, bin);
+ ret parse_more_binops(p, span, min_prec);
+ }
}
}
}
- ret e;
-}
-
-impure fn parse_relational_expr(parser p) -> @ast.expr {
- auto sub = parse_cast_expr;
- ret parse_binary_exprs(p, sub, vec(tup(token.LT, ast.lt),
- tup(token.LE, ast.le),
- tup(token.GE, ast.ge),
- tup(token.GT, ast.gt)));
-}
-
-
-impure fn parse_equality_expr(parser p) -> @ast.expr {
- auto sub = parse_relational_expr;
- ret parse_binary_exprs(p, sub, vec(tup(token.EQEQ, ast.eq),
- tup(token.NE, ast.ne)));
-}
-
-impure fn parse_and_expr(parser p) -> @ast.expr {
- auto sub = parse_equality_expr;
- ret parse_binary_exprs(p, sub, vec(tup(token.ANDAND, ast.and)));
-}
-
-impure fn parse_or_expr(parser p) -> @ast.expr {
- auto sub = parse_and_expr;
- ret parse_binary_exprs(p, sub, vec(tup(token.OROR, ast.or)));
+ ret lhs;
}
impure fn parse_assign_expr(parser p) -> @ast.expr {
auto lo = p.get_span();
- auto lhs = parse_or_expr(p);
+ auto lhs = parse_binops(p);
alt (p.peek()) {
case (token.EQ) {
p.bump();
@@ -901,6 +1099,7 @@ impure fn parse_if_expr(parser p) -> @ast.expr {
}
case (_) { /* fall through */ }
}
+
ret @spanned(lo, hi, ast.expr_if(cond, thn, els, ast.ann_none));
}
@@ -935,8 +1134,14 @@ impure fn parse_head_local(parser p) -> @ast.decl {
impure fn parse_for_expr(parser p) -> @ast.expr {
auto lo = p.get_span();
auto hi = lo;
+ auto is_each = false;
expect(p, token.FOR);
+ if (p.peek() == token.EACH) {
+ is_each = true;
+ p.bump();
+ }
+
expect (p, token.LPAREN);
auto decl = parse_head_local(p);
@@ -946,9 +1151,16 @@ impure fn parse_for_expr(parser p) -> @ast.expr {
expect(p, token.RPAREN);
auto body = parse_block(p);
hi = body.span;
- ret @spanned(lo, hi, ast.expr_for(decl, seq, body, ast.ann_none));
+ if (is_each) {
+ ret @spanned(lo, hi, ast.expr_for_each(decl, seq, body,
+ ast.ann_none));
+ } else {
+ ret @spanned(lo, hi, ast.expr_for(decl, seq, body,
+ ast.ann_none));
+ }
}
+
impure fn parse_while_expr(parser p) -> @ast.expr {
auto lo = p.get_span();
auto hi = lo;
@@ -996,6 +1208,23 @@ impure fn parse_alt_expr(parser p) -> @ast.expr {
auto block = parse_block(p);
arms += vec(rec(pat=pat, block=block, index=index));
}
+
+ // FIXME: this is a vestigial form left over from
+ // rustboot, we're keeping it here for source-compat
+ // for the time being but it should be flushed out
+ // once we've bootstrapped. When we see 'else {' here,
+ // we pretend we saw 'case (_) {'. It has the same
+ // meaning, and only exists due to the cexp/pexp split
+ // in rustboot, which we're not maintaining.
+
+ case (token.ELSE) {
+ p.bump();
+ auto hi = p.get_span();
+ auto pat = @spanned(lo, hi, ast.pat_wild(ast.ann_none));
+ auto index = index_arm(pat);
+ auto block = parse_block(p);
+ arms += vec(rec(pat=pat, block=block, index=index));
+ }
case (token.RBRACE) { /* empty */ }
case (?tok) {
p.err("expected 'case' or '}' when parsing 'alt' statement " +
@@ -1062,10 +1291,12 @@ impure fn parse_initializer(parser p) -> option.t[@ast.expr] {
impure fn parse_pat(parser p) -> @ast.pat {
auto lo = p.get_span();
+ auto hi = lo;
+ auto pat;
- auto pat = ast.pat_wild(ast.ann_none); // FIXME: typestate bug
alt (p.peek()) {
case (token.UNDERSCORE) {
+ hi = p.get_span();
p.bump();
pat = ast.pat_wild(ast.ann_none);
}
@@ -1073,6 +1304,7 @@ impure fn parse_pat(parser p) -> @ast.pat {
p.bump();
alt (p.peek()) {
case (token.IDENT(?id)) {
+ hi = p.get_span();
p.bump();
pat = ast.pat_bind(id, p.next_def_id(), ast.ann_none);
}
@@ -1085,13 +1317,16 @@ impure fn parse_pat(parser p) -> @ast.pat {
}
case (token.IDENT(?id)) {
auto tag_path = parse_path(p, GREEDY);
+ hi = tag_path.span;
let vec[@ast.pat] args;
alt (p.peek()) {
case (token.LPAREN) {
auto f = parse_pat;
- args = parse_seq[@ast.pat](token.LPAREN, token.RPAREN,
- some(token.COMMA), f, p).node;
+ auto a = parse_seq[@ast.pat](token.LPAREN, token.RPAREN,
+ some(token.COMMA), f, p);
+ args = a.node;
+ hi = a.span;
}
case (_) { args = vec(); }
}
@@ -1099,13 +1334,13 @@ impure fn parse_pat(parser p) -> @ast.pat {
pat = ast.pat_tag(tag_path, args, none[ast.variant_def],
ast.ann_none);
}
- case (?tok) {
- p.err("expected pattern but found " + token.to_str(tok));
- fail;
+ case (_) {
+ auto lit = parse_lit(p);
+ hi = lit.span;
+ pat = ast.pat_lit(@lit, ast.ann_none);
}
}
- auto hi = p.get_span();
ret @spanned(lo, hi, pat);
}
@@ -1147,49 +1382,22 @@ impure fn parse_auto(parser p) -> @ast.decl {
}
impure fn parse_stmt(parser p) -> @ast.stmt {
- auto lo = p.get_span();
- alt (p.peek()) {
-
- case (token.LOG) {
- p.bump();
- auto e = parse_expr(p);
- auto hi = p.get_span();
- ret @spanned(lo, hi, ast.stmt_log(e));
- }
-
- case (token.CHECK) {
- p.bump();
- alt (p.peek()) {
- case (token.LPAREN) {
- auto e = parse_expr(p);
- auto hi = p.get_span();
- ret @spanned(lo, hi, ast.stmt_check_expr(e));
- }
- case (_) {
- p.get_session().unimpl("constraint-check stmt");
- }
- }
- }
+ if (p.get_file_type() == SOURCE_FILE) {
+ ret parse_source_stmt(p);
+ } else {
+ ret parse_crate_stmt(p);
+ }
+}
- case (token.FAIL) {
- p.bump();
- ret @spanned(lo, p.get_span(), ast.stmt_fail);
- }
+impure fn parse_crate_stmt(parser p) -> @ast.stmt {
+ auto cdir = parse_crate_directive(p);
+ ret @spanned(cdir.span, cdir.span,
+ ast.stmt_crate_directive(@cdir));
+}
- case (token.RET) {
- p.bump();
- alt (p.peek()) {
- case (token.SEMI) {
- ret @spanned(lo, p.get_span(),
- ast.stmt_ret(none[@ast.expr]));
- }
- case (_) {
- auto e = parse_expr(p);
- ret @spanned(lo, e.span,
- ast.stmt_ret(some[@ast.expr](e)));
- }
- }
- }
+impure fn parse_source_stmt(parser p) -> @ast.stmt {
+ auto lo = p.get_span();
+ alt (p.peek()) {
case (token.LET) {
auto decl = parse_let(p);
@@ -1260,31 +1468,28 @@ fn index_block(vec[@ast.stmt] stmts, option.t[@ast.expr] expr) -> ast.block_ {
auto index = new_str_hash[uint]();
auto u = 0u;
for (@ast.stmt s in stmts) {
- // FIXME: typestate bug requires we do this up top, not
- // down below loop. Sigh.
- u += 1u;
alt (s.node) {
case (ast.stmt_decl(?d)) {
alt (d.node) {
case (ast.decl_local(?loc)) {
- index.insert(loc.ident, u-1u);
+ index.insert(loc.ident, u);
}
case (ast.decl_item(?it)) {
alt (it.node) {
case (ast.item_fn(?i, _, _, _, _)) {
- index.insert(i, u-1u);
+ index.insert(i, u);
}
case (ast.item_mod(?i, _, _)) {
- index.insert(i, u-1u);
+ index.insert(i, u);
}
case (ast.item_ty(?i, _, _, _, _)) {
- index.insert(i, u-1u);
+ index.insert(i, u);
}
case (ast.item_tag(?i, _, _, _)) {
- index.insert(i, u-1u);
+ index.insert(i, u);
}
case (ast.item_obj(?i, _, _, _, _)) {
- index.insert(i, u-1u);
+ index.insert(i, u);
}
}
}
@@ -1292,6 +1497,7 @@ fn index_block(vec[@ast.stmt] stmts, option.t[@ast.expr] expr) -> ast.block_ {
}
case (_) { /* fall through */ }
}
+ u += 1u;
}
ret rec(stmts=stmts, expr=expr, index=index);
}
@@ -1301,6 +1507,7 @@ fn index_arm(@ast.pat pat) -> hashmap[ast.ident,ast.def_id] {
alt (pat.node) {
case (ast.pat_bind(?i, ?def_id, _)) { index.insert(i, def_id); }
case (ast.pat_wild(_)) { /* empty */ }
+ case (ast.pat_lit(_, _)) { /* empty */ }
case (ast.pat_tag(_, ?pats, _, _)) {
for (@ast.pat p in pats) {
do_index_arm(index, p);
@@ -1330,15 +1537,11 @@ fn stmt_ends_with_semi(@ast.stmt stmt) -> bool {
case (ast.decl_item(_)) { ret false; }
}
}
- case (ast.stmt_ret(_)) { ret true; }
- case (ast.stmt_log(_)) { ret true; }
- case (ast.stmt_check_expr(_)) { ret true; }
- case (ast.stmt_fail) { ret true; }
case (ast.stmt_expr(?e)) {
alt (e.node) {
case (ast.expr_vec(_,_)) { ret true; }
case (ast.expr_tup(_,_)) { ret true; }
- case (ast.expr_rec(_,_)) { ret true; }
+ case (ast.expr_rec(_,_,_)) { ret true; }
case (ast.expr_call(_,_,_)) { ret true; }
case (ast.expr_binary(_,_,_,_)) { ret true; }
case (ast.expr_unary(_,_,_)) { ret true; }
@@ -1346,18 +1549,28 @@ fn stmt_ends_with_semi(@ast.stmt stmt) -> bool {
case (ast.expr_cast(_,_,_)) { ret true; }
case (ast.expr_if(_,_,_,_)) { ret false; }
case (ast.expr_for(_,_,_,_)) { ret false; }
+ case (ast.expr_for_each(_,_,_,_))
+ { ret false; }
case (ast.expr_while(_,_,_)) { ret false; }
case (ast.expr_do_while(_,_,_)) { ret false; }
case (ast.expr_alt(_,_,_)) { ret false; }
case (ast.expr_block(_,_)) { ret false; }
case (ast.expr_assign(_,_,_)) { ret true; }
case (ast.expr_assign_op(_,_,_,_))
- { ret true; }
+ { ret true; }
case (ast.expr_field(_,_,_)) { ret true; }
case (ast.expr_index(_,_,_)) { ret true; }
case (ast.expr_path(_,_,_)) { ret true; }
+ case (ast.expr_fail) { ret true; }
+ case (ast.expr_ret(_)) { ret true; }
+ case (ast.expr_put(_)) { ret true; }
+ case (ast.expr_be(_)) { ret true; }
+ case (ast.expr_log(_)) { ret true; }
+ case (ast.expr_check_expr(_)) { ret true; }
}
}
+ // We should not be calling this on a cdir.
+ case (ast.stmt_crate_directive(?cdir)) { fail; }
}
}
@@ -1401,8 +1614,13 @@ impure fn parse_block(parser p) -> ast.block {
case (none[@ast.expr]) {
// Not an expression statement.
stmts += vec(stmt);
- if (stmt_ends_with_semi(stmt)) {
- expect(p, token.SEMI);
+ // FIXME: crazy differentiation between conditions
+ // used in branches and binary expressions in rustboot
+ // means we cannot use && here. I know, right?
+ if (p.get_file_type() == SOURCE_FILE) {
+ if (stmt_ends_with_semi(stmt)) {
+ expect(p, token.SEMI);
+ }
}
}
}
@@ -1432,7 +1650,7 @@ impure fn parse_ty_params(parser p) -> vec[ast.ty_param] {
ret ty_params;
}
-impure fn parse_fn(parser p, ast.effect eff, bool is_iter) -> ast._fn {
+impure fn parse_fn_decl(parser p, ast.effect eff) -> ast.fn_decl {
auto pf = parse_arg;
let util.common.spanned[vec[ast.arg]] inputs =
// FIXME: passing parse_arg as an lval doesn't work at the
@@ -1444,34 +1662,41 @@ impure fn parse_fn(parser p, ast.effect eff, bool is_iter) -> ast._fn {
pf, p);
let @ast.ty output;
+
+ // FIXME: dropping constrs on the floor at the moment.
+ // pick them up when they're used by typestate pass.
+ parse_constrs(p);
+
if (p.peek() == token.RARROW) {
p.bump();
output = parse_ty(p);
} else {
output = @spanned(inputs.span, inputs.span, ast.ty_nil);
}
+ ret rec(effect=eff, inputs=inputs.node, output=output);
+}
+impure fn parse_fn(parser p, ast.effect eff, ast.proto proto) -> ast._fn {
+ auto decl = parse_fn_decl(p, eff);
auto body = parse_block(p);
-
- ret rec(effect = eff,
- is_iter = is_iter,
- inputs = inputs.node,
- output = output,
+ ret rec(decl = decl,
+ proto = proto,
body = body);
}
-impure fn parse_item_fn_or_iter(parser p, ast.effect eff,
- bool is_iter) -> @ast.item {
- auto lo = p.get_span();
- if (is_iter) {
- expect(p, token.ITER);
- } else {
- expect(p, token.FN);
- }
+impure fn parse_fn_header(parser p)
+ -> tup(ast.ident, vec[ast.ty_param]) {
auto id = parse_ident(p);
auto ty_params = parse_ty_params(p);
- auto f = parse_fn(p, eff, is_iter);
- auto item = ast.item_fn(id, f, ty_params,
+ ret tup(id, ty_params);
+}
+
+impure fn parse_item_fn_or_iter(parser p, ast.effect eff) -> @ast.item {
+ auto lo = p.get_span();
+ auto proto = parse_proto(p);
+ auto t = parse_fn_header(p);
+ auto f = parse_fn(p, eff, proto);
+ auto item = ast.item_fn(t._0, f, t._1,
p.next_def_id(), ast.ann_none);
ret @spanned(lo, f.body.span, item);
}
@@ -1486,14 +1711,9 @@ impure fn parse_obj_field(parser p) -> ast.obj_field {
impure fn parse_method(parser p) -> @ast.method {
auto lo = p.get_span();
auto eff = parse_effect(p);
- auto is_iter = false;
- alt (p.peek()) {
- case (token.FN) { p.bump(); }
- case (token.ITER) { p.bump(); is_iter = true; }
- case (?t) { unexpected(p, t); }
- }
+ auto proto = parse_proto(p);
auto ident = parse_ident(p);
- auto f = parse_fn(p, eff, is_iter);
+ auto f = parse_fn(p, eff, proto);
auto meth = rec(ident=ident, meth=f,
id=p.next_def_id(), ann=ast.ann_none);
ret @spanned(lo, f.body.span, meth);
@@ -1512,21 +1732,33 @@ impure fn parse_item_obj(parser p, ast.layer lyr) -> @ast.item {
some(token.COMMA),
pf, p);
- auto pm = parse_method;
- let util.common.spanned[vec[@ast.method]] meths =
- parse_seq[@ast.method]
- (token.LBRACE,
- token.RBRACE,
- none[token.token],
- pm, p);
+ let vec[@ast.method] meths = vec();
+ let option.t[ast.block] dtor = none[ast.block];
+
+ expect(p, token.LBRACE);
+ while (p.peek() != token.RBRACE) {
+ alt (p.peek()) {
+ case (token.DROP) {
+ p.bump();
+ dtor = some[ast.block](parse_block(p));
+ }
+ case (_) {
+ append[@ast.method](meths,
+ parse_method(p));
+ }
+ }
+ }
+ auto hi = p.get_span();
+ expect(p, token.RBRACE);
let ast._obj ob = rec(fields=fields.node,
- methods=meths.node);
+ methods=meths,
+ dtor=dtor);
auto item = ast.item_obj(ident, ob, ty_params,
p.next_def_id(), ast.ann_none);
- ret @spanned(lo, meths.span, item);
+ ret @spanned(lo, hi, item);
}
impure fn parse_mod_items(parser p, token.token term) -> ast._mod {
@@ -1568,18 +1800,127 @@ impure fn parse_item_mod(parser p) -> @ast.item {
ret @spanned(lo, hi, item);
}
-impure fn parse_item_type(parser p) -> @ast.item {
+impure fn parse_item_native_type(parser p) -> @ast.native_item {
+ auto t = parse_type_decl(p);
+ auto hi = p.get_span();
+ expect(p, token.SEMI);
+ auto item = ast.native_item_ty(t._1, p.next_def_id());
+ ret @spanned(t._0, hi, item);
+}
+
+impure fn parse_item_native_fn(parser p, ast.effect eff) -> @ast.native_item {
+ auto lo = p.get_span();
+ expect(p, token.FN);
+ auto t = parse_fn_header(p);
+ auto decl = parse_fn_decl(p, eff);
+ auto hi = p.get_span();
+ expect(p, token.SEMI);
+ auto item = ast.native_item_fn(t._0, decl, t._1, p.next_def_id(),
+ ast.ann_none);
+ ret @spanned(lo, hi, item);
+}
+
+impure fn parse_native_item(parser p) -> @ast.native_item {
+ let ast.effect eff = parse_effect(p);
+ let ast.opacity opa = parse_opacity(p);
+ let ast.layer lyr = parse_layer(p);
+ alt (p.peek()) {
+ case (token.TYPE) {
+ ret parse_item_native_type(p);
+ }
+ case (token.FN) {
+ ret parse_item_native_fn(p, eff);
+ }
+ case (?t) {
+ unexpected(p, t);
+ fail;
+ }
+ }
+}
+
+impure fn parse_native_mod_items(parser p,
+ str native_name,
+ ast.native_abi abi) -> ast.native_mod {
+ auto index = new_str_hash[ast.native_mod_index_entry]();
+ let vec[@ast.native_item] items = vec();
+
+ auto view_items = parse_native_view(p, index);
+
+ while (p.peek() != token.RBRACE) {
+ auto item = parse_native_item(p);
+ items += vec(item);
+
+ // Index the item.
+ ast.index_native_item(index, item);
+ }
+ ret rec(native_name=native_name, abi=abi,
+ view_items=view_items,
+ items=items,
+ index=index);
+}
+
+fn default_native_name(session.session sess, str id) -> str {
+ alt (sess.get_targ_cfg().os) {
+ case (session.os_win32) {
+ ret id + ".dll";
+ }
+ case (session.os_macos) {
+ ret "lib" + id + ".dylib";
+ }
+ case (session.os_linux) {
+ ret "lib" + id + ".so";
+ }
+ }
+}
+
+impure fn parse_item_native_mod(parser p) -> @ast.item {
+ auto lo = p.get_span();
+ expect(p, token.NATIVE);
+ auto abi = ast.native_abi_cdecl;
+ if (p.peek() != token.MOD) {
+ auto t = parse_str_lit_or_env_ident(p);
+ if (_str.eq(t, "cdecl")) {
+ } else if (_str.eq(t, "rust")) {
+ abi = ast.native_abi_rust;
+ } else {
+ p.err("unsupported abi: " + t);
+ fail;
+ }
+ }
+ expect(p, token.MOD);
+ auto id = parse_ident(p);
+ auto native_name;
+ if (p.peek() == token.EQ) {
+ expect(p, token.EQ);
+ native_name = parse_str_lit_or_env_ident(p);
+ } else {
+ native_name = default_native_name(p.get_session(), id);
+ }
+ expect(p, token.LBRACE);
+ auto m = parse_native_mod_items(p, native_name, abi);
+ auto hi = p.get_span();
+ expect(p, token.RBRACE);
+ auto item = ast.item_native_mod(id, m, p.next_def_id());
+ ret @spanned(lo, hi, item);
+}
+
+impure fn parse_type_decl(parser p) -> tup(span, ast.ident) {
auto lo = p.get_span();
expect(p, token.TYPE);
auto id = parse_ident(p);
+ ret tup(lo, id);
+}
+
+impure fn parse_item_type(parser p) -> @ast.item {
+ auto t = parse_type_decl(p);
auto tps = parse_ty_params(p);
expect(p, token.EQ);
auto ty = parse_ty(p);
auto hi = p.get_span();
expect(p, token.SEMI);
- auto item = ast.item_ty(id, ty, tps, p.next_def_id(), ast.ann_none);
- ret @spanned(lo, hi, item);
+ auto item = ast.item_ty(t._1, ty, tps, p.next_def_id(), ast.ann_none);
+ ret @spanned(t._0, hi, item);
}
impure fn parse_item_tag(parser p) -> @ast.item {
@@ -1631,6 +1972,19 @@ impure fn parse_item_tag(parser p) -> @ast.item {
ret @spanned(lo, hi, item);
}
+impure fn parse_opacity(parser p) -> ast.opacity {
+ alt (p.peek()) {
+ case (token.ABS) {
+ p.bump();
+ ret ast.op_abstract;
+ }
+ case (_) {
+ ret ast.op_transparent;
+ }
+ }
+ fail;
+}
+
impure fn parse_layer(parser p) -> ast.layer {
alt (p.peek()) {
case (token.STATE) {
@@ -1686,6 +2040,7 @@ fn peeking_at_item(parser p) -> bool {
impure fn parse_item(parser p) -> @ast.item {
let ast.effect eff = parse_effect(p);
+ let ast.opacity opa = parse_opacity(p);
let ast.layer lyr = parse_layer(p);
alt (p.peek()) {
@@ -1697,17 +2052,22 @@ impure fn parse_item(parser p) -> @ast.item {
case (token.FN) {
check (lyr == ast.layer_value);
- ret parse_item_fn_or_iter(p, eff, false);
+ ret parse_item_fn_or_iter(p, eff);
}
case (token.ITER) {
check (lyr == ast.layer_value);
- ret parse_item_fn_or_iter(p, eff, true);
+ ret parse_item_fn_or_iter(p, eff);
}
case (token.MOD) {
check (eff == ast.eff_pure);
check (lyr == ast.layer_value);
ret parse_item_mod(p);
}
+ case (token.NATIVE) {
+ check (eff == ast.eff_pure);
+ check (lyr == ast.layer_value);
+ ret parse_item_native_mod(p);
+ }
case (token.TYPE) {
check (eff == ast.eff_pure);
ret parse_item_type(p);
@@ -1840,7 +2200,16 @@ impure fn parse_import(parser p) -> @ast.view_item {
fail;
}
-impure fn parse_use_or_import(parser p) -> @ast.view_item {
+impure fn parse_export(parser p) -> @ast.view_item {
+ auto lo = p.get_span();
+ expect(p, token.EXPORT);
+ auto id = parse_ident(p);
+ auto hi = p.get_span();
+ expect(p, token.SEMI);
+ ret @spanned(lo, hi, ast.view_item_export(id));
+}
+
+impure fn parse_view_item(parser p) -> @ast.view_item {
alt (p.peek()) {
case (token.USE) {
ret parse_use(p);
@@ -1848,23 +2217,26 @@ impure fn parse_use_or_import(parser p) -> @ast.view_item {
case (token.IMPORT) {
ret parse_import(p);
}
+ case (token.EXPORT) {
+ ret parse_export(p);
+ }
}
}
-fn is_use_or_import(token.token t) -> bool {
- if (t == token.USE) {
- ret true;
- }
- if (t == token.IMPORT) {
- ret true;
+fn is_view_item(token.token t) -> bool {
+ alt (t) {
+ case (token.USE) { ret true; }
+ case (token.IMPORT) { ret true; }
+ case (token.EXPORT) { ret true; }
+ case (_) {}
}
ret false;
}
impure fn parse_view(parser p, ast.mod_index index) -> vec[@ast.view_item] {
let vec[@ast.view_item] items = vec();
- while (is_use_or_import(p.peek())) {
- auto item = parse_use_or_import(p);
+ while (is_view_item(p.peek())) {
+ auto item = parse_view_item(p);
items += vec(item);
ast.index_view_item(index, item);
@@ -1872,6 +2244,19 @@ impure fn parse_view(parser p, ast.mod_index index) -> vec[@ast.view_item] {
ret items;
}
+impure fn parse_native_view(parser p, ast.native_mod_index index)
+ -> vec[@ast.view_item] {
+ let vec[@ast.view_item] items = vec();
+ while (is_view_item(p.peek())) {
+ auto item = parse_view_item(p);
+ items += vec(item);
+
+ ast.index_native_view_item(index, item);
+ }
+ ret items;
+}
+
+
impure fn parse_crate_from_source_file(parser p) -> @ast.crate {
auto lo = p.get_span();
auto hi = lo;
@@ -1885,33 +2270,46 @@ impure fn parse_crate_from_source_file(parser p) -> @ast.crate {
//
// Each directive imperatively extends its environment with 0 or more items.
-impure fn parse_crate_directive(str prefix, parser p,
- &mutable vec[@ast.item] items,
- hashmap[ast.ident,ast.mod_index_entry] index)
+impure fn parse_crate_directive(parser p) -> ast.crate_directive
{
auto lo = p.get_span();
auto hi = lo;
alt (p.peek()) {
- case (token.CONST) {
- auto c = parse_item_const(p);
- ast.index_item(index, c);
- append[@ast.item](items, c);
- }
+ case (token.AUTH) {
+ // FIXME: currently dropping auth clauses on the floor,
+ // as there is no effect-checking pass.
+ p.bump();
+ auto n = parse_path(p, GREEDY);
+ expect(p, token.EQ);
+ auto e = parse_effect(p);
+ hi = p.get_span();
+ expect(p, token.SEMI);
+ ret spanned(lo, hi, ast.cdir_auth(n, e));
+ }
+
+ case (token.META) {
+ // FIXME: currently dropping meta clauses on the floor,
+ // as there is no crate metadata system
+ p.bump();
+ auto mis = parse_meta(p);
+ hi = p.get_span();
+ expect(p, token.SEMI);
+ ret spanned(lo, hi, ast.cdir_meta(mis));
+ }
+
case (token.MOD) {
p.bump();
auto id = parse_ident(p);
- auto file_path = id;
+ auto file_opt = none[filename];
alt (p.peek()) {
case (token.EQ) {
p.bump();
// FIXME: turn this into parse+eval expr
- file_path = parse_str_lit(p);
+ file_opt = some[filename](parse_str_lit_or_env_ident(p));
}
case (_) {}
}
- // dir-qualify file path.
- auto full_path = prefix + std.os.path_sep() + file_path;
alt (p.peek()) {
@@ -1920,29 +2318,18 @@ impure fn parse_crate_directive(str prefix, parser p,
case (token.SEMI) {
hi = p.get_span();
p.bump();
- if (!_str.ends_with(full_path, ".rs")) {
- full_path += ".rs";
- }
- auto p0 = new_parser(p.get_session(), 0, full_path);
- auto m0 = parse_mod_items(p0, token.EOF);
- auto im = ast.item_mod(id, m0, p.next_def_id());
- auto i = @spanned(lo, hi, im);
- ast.index_item(index, i);
- append[@ast.item](items, i);
+ ret spanned(lo, hi, ast.cdir_src_mod(id, file_opt));
}
// mod x = "foo_dir" { ...directives... }
case (token.LBRACE) {
p.bump();
- auto m0 = parse_crate_directives(full_path, p,
- token.RBRACE);
+ auto cdirs = parse_crate_directives(p, token.RBRACE);
hi = p.get_span();
expect(p, token.RBRACE);
- auto im = ast.item_mod(id, m0, p.next_def_id());
- auto i = @spanned(lo, hi, im);
- ast.index_item(index, i);
- append[@ast.item](items, i);
+ ret spanned(lo, hi,
+ ast.cdir_dir_mod(id, file_opt, cdirs));
}
case (?t) {
@@ -1950,28 +2337,65 @@ impure fn parse_crate_directive(str prefix, parser p,
}
}
}
+
+ case (token.LET) {
+ p.bump();
+ expect(p, token.LPAREN);
+ auto id = parse_ident(p);
+ expect(p, token.EQ);
+ auto x = parse_expr(p);
+ expect(p, token.RPAREN);
+ expect(p, token.LBRACE);
+ auto v = parse_crate_directives(p, token.RBRACE);
+ hi = p.get_span();
+ expect(p, token.RBRACE);
+ ret spanned(lo, hi, ast.cdir_let(id, x, v));
+ }
+
+ case (token.USE) {
+ auto vi = parse_view_item(p);
+ ret spanned(lo, vi.span, ast.cdir_view_item(vi));
+ }
+
+ case (token.IMPORT) {
+ auto vi = parse_view_item(p);
+ ret spanned(lo, vi.span, ast.cdir_view_item(vi));
+ }
+
+ case (token.EXPORT) {
+ auto vi = parse_view_item(p);
+ ret spanned(lo, vi.span, ast.cdir_view_item(vi));
+ }
+
+ case (_) {
+ auto x = parse_expr(p);
+ ret spanned(lo, x.span, ast.cdir_expr(x));
+ }
}
+ fail;
}
-impure fn parse_crate_directives(str prefix, parser p,
- token.token term) -> ast._mod {
- auto index = new_str_hash[ast.mod_index_entry]();
- auto view_items = parse_view(p, index);
- let vec[@ast.item] items = vec();
+impure fn parse_crate_directives(parser p, token.token term)
+ -> vec[@ast.crate_directive] {
+
+ let vec[@ast.crate_directive] cdirs = vec();
while (p.peek() != term) {
- parse_crate_directive(prefix, p, items, index);
+ auto cdir = @parse_crate_directive(p);
+ append[@ast.crate_directive](cdirs, cdir);
}
- ret rec(view_items=view_items, items=items, index=index);
+ ret cdirs;
}
impure fn parse_crate_from_crate_file(parser p) -> @ast.crate {
auto lo = p.get_span();
auto hi = lo;
auto prefix = std.path.dirname(lo.filename);
- auto m = parse_crate_directives(prefix, p, token.EOF);
+ auto cdirs = parse_crate_directives(p, token.EOF);
+ auto m = eval.eval_crate_directives_to_mod(p, p.get_env(),
+ cdirs, prefix);
hi = p.get_span();
expect(p, token.EOF);
ret @spanned(lo, hi, rec(module=m));
diff --git a/src/comp/front/pretty.rs b/src/comp/front/pretty.rs
new file mode 100644
index 00000000..2fd58126
--- /dev/null
+++ b/src/comp/front/pretty.rs
@@ -0,0 +1,87 @@
+import std._int;
+import std._str;
+import std._uint;
+import std._vec;
+
+export print_expr;
+
+// FIXME this is superseded by ../pretty/pprust.rs. can it be dropped?
+
+fn unknown() -> str {
+ ret "<unknown ast node>";
+}
+
+fn print_expr(@ast.expr expr) -> str {
+ alt (expr.node) {
+ case (ast.expr_lit(?lit, _)) {
+ ret print_lit(lit);
+ }
+ case (ast.expr_binary(?op, ?lhs, ?rhs, _)) {
+ ret print_expr_binary(op, lhs, rhs);
+ }
+ case (ast.expr_call(?path, ?args, _)) {
+ ret print_expr_call(path, args);
+ }
+ case (ast.expr_path(?path, _, _)) {
+ ret print_path(path);
+ }
+ case (_) {
+ ret unknown();
+ }
+ }
+}
+
+fn print_lit(@ast.lit lit) -> str {
+ alt (lit.node) {
+ case (ast.lit_str(?s)) {
+ ret "\"" + s + "\"";
+ }
+ case (ast.lit_int(?i)) {
+ ret _int.to_str(i, 10u);
+ }
+ case (ast.lit_uint(?u)) {
+ ret _uint.to_str(u, 10u);
+ }
+ case (_) {
+ ret unknown();
+ }
+ }
+}
+
+fn print_expr_binary(ast.binop op, @ast.expr lhs, @ast.expr rhs) -> str {
+ alt (op) {
+ case (ast.add) {
+ auto l = print_expr(lhs);
+ auto r = print_expr(rhs);
+ ret l + " + " + r;
+ }
+ }
+}
+
+fn print_expr_call(@ast.expr path_expr, vec[@ast.expr] args) -> str {
+ auto s = print_expr(path_expr);
+
+ s += "(";
+ fn print_expr_ref(&@ast.expr e) -> str { ret print_expr(e); }
+ auto mapfn = print_expr_ref;
+ auto argstrs = _vec.map[@ast.expr, str](mapfn, args);
+ s += _str.connect(argstrs, ", ");
+ s += ")";
+
+ ret s;
+}
+
+fn print_path(ast.path path) -> str {
+ ret _str.connect(path.node.idents, ".");
+}
+
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C ../.. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
+//
diff --git a/src/comp/lib/llvm.rs b/src/comp/lib/llvm.rs
index 624c56f9..f75bdbe1 100644
--- a/src/comp/lib/llvm.rs
+++ b/src/comp/lib/llvm.rs
@@ -76,6 +76,25 @@ const uint LLVMIntSLT = 40u;
const uint LLVMIntSLE = 41u;
+// Consts for the LLVM RealPredicate type, pre-case to uint.
+// FIXME: as above.
+
+const uint LLVMRealOEQ = 1u;
+const uint LLVMRealOGT = 2u;
+const uint LLVMRealOGE = 3u;
+const uint LLVMRealOLT = 4u;
+const uint LLVMRealOLE = 5u;
+const uint LLVMRealONE = 6u;
+
+const uint LLVMRealORD = 7u;
+const uint LLVMRealUNO = 8u;
+const uint LLVMRealUEQ = 9u;
+const uint LLVMRealUGT = 10u;
+const uint LLVMRealUGE = 11u;
+const uint LLVMRealULT = 12u;
+const uint LLVMRealULE = 13u;
+const uint LLVMRealUNE = 14u;
+
native mod llvm = llvm_lib {
type ModuleRef;
@@ -657,7 +676,7 @@ native mod llvm = llvm_lib {
fn LLVMBuildICmp(BuilderRef B, uint Op,
ValueRef LHS, ValueRef RHS,
sbuf Name) -> ValueRef;
- fn LLVMBuildFCmp(BuilderRef B, RealPredicate Op,
+ fn LLVMBuildFCmp(BuilderRef B, uint Op,
ValueRef LHS, ValueRef RHS,
sbuf Name) -> ValueRef;
@@ -1034,7 +1053,7 @@ obj builder(BuilderRef B) {
ret llvm.LLVMBuildICmp(B, Op, LHS, RHS, _str.buf(""));
}
- fn FCmp(RealPredicate Op, ValueRef LHS, ValueRef RHS) -> ValueRef {
+ fn FCmp(uint Op, ValueRef LHS, ValueRef RHS) -> ValueRef {
ret llvm.LLVMBuildFCmp(B, Op, LHS, RHS, _str.buf(""));
}
@@ -1151,18 +1170,70 @@ fn mk_type_handle() -> type_handle {
ret rec(llth=th, dtor=type_handle_dtor(th));
}
-fn type_to_str(TypeRef ty) -> str {
+
+state obj type_names(std.map.hashmap[TypeRef, str] type_names,
+ std.map.hashmap[str, TypeRef] named_types) {
+
+ fn associate(str s, TypeRef t) {
+ check (!named_types.contains_key(s));
+ check (!type_names.contains_key(t));
+ type_names.insert(t, s);
+ named_types.insert(s, t);
+ }
+
+ fn type_has_name(TypeRef t) -> bool {
+ ret type_names.contains_key(t);
+ }
+
+ fn get_name(TypeRef t) -> str {
+ ret type_names.get(t);
+ }
+
+ fn name_has_type(str s) -> bool {
+ ret named_types.contains_key(s);
+ }
+
+ fn get_type(str s) -> TypeRef {
+ ret named_types.get(s);
+ }
+}
+
+fn mk_type_names() -> type_names {
+ auto nt = util.common.new_str_hash[TypeRef]();
+
+ fn hash(&TypeRef t) -> uint {
+ ret t as uint;
+ }
+
+ fn eq(&TypeRef a, &TypeRef b) -> bool {
+ ret (a as uint) == (b as uint);
+ }
+
+ let std.map.hashfn[TypeRef] hasher = hash;
+ let std.map.eqfn[TypeRef] eqer = eq;
+ auto tn = std.map.mk_hashmap[TypeRef,str](hasher, eqer);
+
+ ret type_names(tn, nt);
+}
+
+fn type_to_str(type_names names, TypeRef ty) -> str {
let vec[TypeRef] v = vec();
- ret type_to_str_inner(v, ty);
+ ret type_to_str_inner(names, v, ty);
}
-fn type_to_str_inner(vec[TypeRef] outer0, TypeRef ty) -> str {
+fn type_to_str_inner(type_names names,
+ vec[TypeRef] outer0, TypeRef ty) -> str {
+
+ if (names.type_has_name(ty)) {
+ ret names.get_name(ty);
+ }
auto outer = outer0 + vec(ty);
let int kind = llvm.LLVMGetTypeKind(ty);
- fn tys_str(vec[TypeRef] outer, vec[TypeRef] tys) -> str {
+ fn tys_str(type_names names,
+ vec[TypeRef] outer, vec[TypeRef] tys) -> str {
let str s = "";
let bool first = true;
for (TypeRef t in tys) {
@@ -1171,7 +1242,7 @@ fn type_to_str_inner(vec[TypeRef] outer0, TypeRef ty) -> str {
} else {
s += ", ";
}
- s += type_to_str_inner(outer, t);
+ s += type_to_str_inner(names, outer, t);
}
ret s;
}
@@ -1200,9 +1271,9 @@ fn type_to_str_inner(vec[TypeRef] outer0, TypeRef ty) -> str {
let vec[TypeRef] args =
_vec.init_elt[TypeRef](0 as TypeRef, n_args);
llvm.LLVMGetParamTypes(ty, _vec.buf[TypeRef](args));
- s += tys_str(outer, args);
+ s += tys_str(names, outer, args);
s += ") -> ";
- s += type_to_str_inner(outer, out_ty);
+ s += type_to_str_inner(names, outer, out_ty);
ret s;
}
@@ -1212,7 +1283,7 @@ fn type_to_str_inner(vec[TypeRef] outer0, TypeRef ty) -> str {
let vec[TypeRef] elts =
_vec.init_elt[TypeRef](0 as TypeRef, n_elts);
llvm.LLVMGetStructElementTypes(ty, _vec.buf[TypeRef](elts));
- s += tys_str(outer, elts);
+ s += tys_str(names, outer, elts);
s += "}";
ret s;
}
@@ -1228,7 +1299,8 @@ fn type_to_str_inner(vec[TypeRef] outer0, TypeRef ty) -> str {
ret "*\\" + util.common.istr(n as int);
}
}
- ret "*" + type_to_str_inner(outer, llvm.LLVMGetElementType(ty));
+ ret "*" + type_to_str_inner(names, outer,
+ llvm.LLVMGetElementType(ty));
}
case (12) { ret "Opaque"; }
diff --git a/src/comp/middle/fold.rs b/src/comp/middle/fold.rs
index ca10e79f..d7660460 100644
--- a/src/comp/middle/fold.rs
+++ b/src/comp/middle/fold.rs
@@ -10,6 +10,7 @@ import util.common.ty_mach;
import util.common.append;
import front.ast;
+import front.ast.fn_decl;
import front.ast.ident;
import front.ast.path;
import front.ast.mutability;
@@ -20,6 +21,7 @@ import front.ast.block;
import front.ast.item;
import front.ast.view_item;
import front.ast.meta_item;
+import front.ast.native_item;
import front.ast.arg;
import front.ast.pat;
import front.ast.decl;
@@ -28,6 +30,7 @@ import front.ast.def;
import front.ast.def_id;
import front.ast.ann;
+import std._uint;
import std._vec;
type ast_fold[ENV] =
@@ -56,6 +59,7 @@ type ast_fold[ENV] =
vec[ast.ty_method] meths) -> @ty) fold_ty_obj,
(fn(&ENV e, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty) fold_ty_fn,
@@ -72,7 +76,8 @@ type ast_fold[ENV] =
vec[ast.elt] es, ann a) -> @expr) fold_expr_tup,
(fn(&ENV e, &span sp,
- vec[ast.field] fields, ann a) -> @expr) fold_expr_rec,
+ vec[ast.field] fields,
+ option.t[@expr] base, ann a) -> @expr) fold_expr_rec,
(fn(&ENV e, &span sp,
@expr f, vec[@expr] args,
@@ -108,6 +113,10 @@ type ast_fold[ENV] =
ann a) -> @expr) fold_expr_for,
(fn(&ENV e, &span sp,
+ @decl decl, @expr seq, &block body,
+ ann a) -> @expr) fold_expr_for_each,
+
+ (fn(&ENV e, &span sp,
@expr cond, &block body,
ann a) -> @expr) fold_expr_while,
@@ -144,6 +153,29 @@ type ast_fold[ENV] =
&option.t[def] d,
ann a) -> @expr) fold_expr_path,
+ (fn(&ENV e, &span sp,
+ &path p, vec[@expr] args,
+ option.t[@expr] body,
+ @expr expanded,
+ ann a) -> @expr) fold_expr_ext,
+
+ (fn(&ENV e, &span sp) -> @expr) fold_expr_fail,
+
+ (fn(&ENV e, &span sp,
+ &option.t[@expr] rv) -> @expr) fold_expr_ret,
+
+ (fn(&ENV e, &span sp,
+ &option.t[@expr] rv) -> @expr) fold_expr_put,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_be,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_log,
+
+ (fn(&ENV e, &span sp,
+ @expr e) -> @expr) fold_expr_check_expr,
+
// Decl folds.
(fn(&ENV e, &span sp,
@ast.local local) -> @decl) fold_decl_local,
@@ -157,6 +189,9 @@ type ast_fold[ENV] =
ann a) -> @pat) fold_pat_wild,
(fn(&ENV e, &span sp,
+ @ast.lit lit, ann a) -> @pat) fold_pat_lit,
+
+ (fn(&ENV e, &span sp,
ident i, def_id did, ann a) -> @pat) fold_pat_bind,
(fn(&ENV e, &span sp,
@@ -170,15 +205,6 @@ type ast_fold[ENV] =
@decl decl) -> @stmt) fold_stmt_decl,
(fn(&ENV e, &span sp,
- &option.t[@expr] rv) -> @stmt) fold_stmt_ret,
-
- (fn(&ENV e, &span sp,
- @expr e) -> @stmt) fold_stmt_log,
-
- (fn(&ENV e, &span sp,
- @expr e) -> @stmt) fold_stmt_check_expr,
-
- (fn(&ENV e, &span sp,
@expr e) -> @stmt) fold_stmt_expr,
// Item folds.
@@ -192,13 +218,24 @@ type ast_fold[ENV] =
def_id id, ann a) -> @item) fold_item_fn,
(fn(&ENV e, &span sp, ident ident,
+ &ast.fn_decl decl,
+ vec[ast.ty_param] ty_params,
+ def_id id, ann a) -> @native_item) fold_native_item_fn,
+
+ (fn(&ENV e, &span sp, ident ident,
&ast._mod m, def_id id) -> @item) fold_item_mod,
(fn(&ENV e, &span sp, ident ident,
+ &ast.native_mod m, def_id id) -> @item) fold_item_native_mod,
+
+ (fn(&ENV e, &span sp, ident ident,
@ty t, vec[ast.ty_param] ty_params,
def_id id, ann a) -> @item) fold_item_ty,
(fn(&ENV e, &span sp, ident ident,
+ def_id id) -> @native_item) fold_native_item_ty,
+
+ (fn(&ENV e, &span sp, ident ident,
vec[ast.variant] variants,
vec[ast.ty_param] ty_params,
def_id id) -> @item) fold_item_tag,
@@ -220,23 +257,30 @@ type ast_fold[ENV] =
(fn(&ENV e, &span sp,
&ast.block_) -> block) fold_block,
+ (fn(&ENV e, &fn_decl decl,
+ ast.proto proto,
+ &block body) -> ast._fn) fold_fn,
+
(fn(&ENV e, ast.effect effect,
- bool is_iter,
vec[arg] inputs,
- @ty output, &block body) -> ast._fn) fold_fn,
+ @ty output) -> ast.fn_decl) fold_fn_decl,
(fn(&ENV e, &ast._mod m) -> ast._mod) fold_mod,
+ (fn(&ENV e, &ast.native_mod m) -> ast.native_mod) fold_native_mod,
+
(fn(&ENV e, &span sp,
&ast._mod m) -> @ast.crate) fold_crate,
(fn(&ENV e,
vec[ast.obj_field] fields,
- vec[@ast.method] methods) -> ast._obj) fold_obj,
+ vec[@ast.method] methods,
+ option.t[block] dtor) -> ast._obj) fold_obj,
// Env updates.
(fn(&ENV e, @ast.crate c) -> ENV) update_env_for_crate,
(fn(&ENV e, @item i) -> ENV) update_env_for_item,
+ (fn(&ENV e, @native_item i) -> ENV) update_env_for_native_item,
(fn(&ENV e, @view_item i) -> ENV) update_env_for_view_item,
(fn(&ENV e, &block b) -> ENV) update_env_for_block,
(fn(&ENV e, @stmt s) -> ENV) update_env_for_stmt,
@@ -312,11 +356,13 @@ fn fold_ty[ENV](&ENV env, ast_fold[ENV] fld, @ty t) -> @ty {
case (ast.ty_obj(?meths)) {
let vec[ast.ty_method] meths_ = vec();
for (ast.ty_method m in meths) {
- auto tfn = fold_ty_fn(env_, fld, t.span, m.inputs, m.output);
+ auto tfn = fold_ty_fn(env_, fld, t.span, m.proto,
+ m.inputs, m.output);
alt (tfn.node) {
- case (ast.ty_fn(?ins, ?out)) {
+ case (ast.ty_fn(?p, ?ins, ?out)) {
append[ast.ty_method]
- (meths_, rec(inputs=ins, output=out with m));
+ (meths_, rec(proto=p, inputs=ins, output=out
+ with m));
}
}
}
@@ -333,13 +379,14 @@ fn fold_ty[ENV](&ENV env, ast_fold[ENV] fld, @ty t) -> @ty {
ret fld.fold_ty_mutable(env_, t.span, ty_);
}
- case (ast.ty_fn(?inputs, ?output)) {
- ret fold_ty_fn(env_, fld, t.span, inputs, output);
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
+ ret fold_ty_fn(env_, fld, t.span, proto, inputs, output);
}
}
}
fn fold_ty_fn[ENV](&ENV env, ast_fold[ENV] fld, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty {
auto output_ = fold_ty(env, fld, output);
@@ -349,7 +396,7 @@ fn fold_ty_fn[ENV](&ENV env, ast_fold[ENV] fld, &span sp,
auto input_ = rec(ty=ty_ with input);
inputs_ += vec(input_);
}
- ret fld.fold_ty_fn(env, sp, inputs_, output_);
+ ret fld.fold_ty_fn(env, sp, proto, inputs_, output_);
}
fn fold_decl[ENV](&ENV env, ast_fold[ENV] fld, @decl d) -> @decl {
@@ -397,6 +444,9 @@ fn fold_pat[ENV](&ENV env, ast_fold[ENV] fld, @ast.pat p) -> @ast.pat {
alt (p.node) {
case (ast.pat_wild(?t)) { ret fld.fold_pat_wild(env_, p.span, t); }
+ case (ast.pat_lit(?lt, ?t)) {
+ ret fld.fold_pat_lit(env_, p.span, lt, t);
+ }
case (ast.pat_bind(?id, ?did, ?t)) {
ret fld.fold_pat_bind(env_, p.span, id, did, t);
}
@@ -449,12 +499,19 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
ret fld.fold_expr_tup(env_, e.span, elts, t);
}
- case (ast.expr_rec(?fs, ?t)) {
+ case (ast.expr_rec(?fs, ?base, ?t)) {
let vec[ast.field] fields = vec();
+ let option.t[@expr] b = none[@expr];
for (ast.field f in fs) {
fields += fold_rec_field(env, fld, f);
}
- ret fld.fold_expr_rec(env_, e.span, fields, t);
+ alt (base) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?eb)) {
+ b = some[@expr](fold_expr(env_, fld, eb));
+ }
+ }
+ ret fld.fold_expr_rec(env_, e.span, fields, b, t);
}
case (ast.expr_call(?f, ?args, ?t)) {
@@ -521,6 +578,13 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
ret fld.fold_expr_for(env_, e.span, ddecl, sseq, bbody, t);
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, ?t)) {
+ auto ddecl = fold_decl(env_, fld, decl);
+ auto sseq = fold_expr(env_, fld, seq);
+ auto bbody = fold_block(env_, fld, body);
+ ret fld.fold_expr_for_each(env_, e.span, ddecl, sseq, bbody, t);
+ }
+
case (ast.expr_while(?cnd, ?body, ?t)) {
auto ccnd = fold_expr(env_, fld, cnd);
auto bbody = fold_block(env_, fld, body);
@@ -574,9 +638,59 @@ fn fold_expr[ENV](&ENV env, ast_fold[ENV] fld, &@expr e) -> @expr {
auto p_ = fold_path(env_, fld, p);
ret fld.fold_expr_path(env_, e.span, p_, r, t);
}
+
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, ?t)) {
+ // Only fold the expanded expression, not the
+ // expressions involved in syntax extension
+ auto exp = fold_expr(env_, fld, expanded);
+ ret fld.fold_expr_ext(env_, e.span, p, args, body,
+ exp, t);
+ }
+
+ case (ast.expr_fail) {
+ ret fld.fold_expr_fail(env_, e.span);
+ }
+
+ case (ast.expr_ret(?oe)) {
+ auto oee = none[@expr];
+ alt (oe) {
+ case (some[@expr](?x)) {
+ oee = some(fold_expr(env_, fld, x));
+ }
+ case (_) { /* fall through */ }
+ }
+ ret fld.fold_expr_ret(env_, e.span, oee);
+ }
+
+ case (ast.expr_put(?oe)) {
+ auto oee = none[@expr];
+ alt (oe) {
+ case (some[@expr](?x)) {
+ oee = some(fold_expr(env_, fld, x));
+ }
+ case (_) { /* fall through */ }
+ }
+ ret fld.fold_expr_put(env_, e.span, oee);
+ }
+
+ case (ast.expr_be(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_be(env_, e.span, ee);
+ }
+
+ case (ast.expr_log(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_log(env_, e.span, ee);
+ }
+
+ case (ast.expr_check_expr(?x)) {
+ auto ee = fold_expr(env_, fld, x);
+ ret fld.fold_expr_check_expr(env_, e.span, ee);
+ }
+
}
- ret e;
+ fail;
}
@@ -594,37 +708,12 @@ fn fold_stmt[ENV](&ENV env, ast_fold[ENV] fld, &@stmt s) -> @stmt {
ret fld.fold_stmt_decl(env_, s.span, dd);
}
- case (ast.stmt_ret(?oe)) {
- auto oee = none[@expr];
- alt (oe) {
- case (some[@expr](?e)) {
- oee = some(fold_expr(env_, fld, e));
- }
- case (_) { /* fall through */ }
- }
- ret fld.fold_stmt_ret(env_, s.span, oee);
- }
-
- case (ast.stmt_log(?e)) {
- auto ee = fold_expr(env_, fld, e);
- ret fld.fold_stmt_log(env_, s.span, ee);
- }
-
- case (ast.stmt_check_expr(?e)) {
- auto ee = fold_expr(env_, fld, e);
- ret fld.fold_stmt_check_expr(env_, s.span, ee);
- }
-
- case (ast.stmt_fail) {
- ret s;
- }
-
case (ast.stmt_expr(?e)) {
auto ee = fold_expr(env_, fld, e);
ret fld.fold_stmt_expr(env_, s.span, ee);
}
}
- ret s;
+ fail;
}
fn fold_block[ENV](&ENV env, ast_fold[ENV] fld, &block blk) -> block {
@@ -666,17 +755,22 @@ fn fold_arg[ENV](&ENV env, ast_fold[ENV] fld, &arg a) -> arg {
ret rec(ty=ty with a);
}
-
-fn fold_fn[ENV](&ENV env, ast_fold[ENV] fld, &ast._fn f) -> ast._fn {
-
+fn fold_fn_decl[ENV](&ENV env, ast_fold[ENV] fld,
+ &ast.fn_decl decl) -> ast.fn_decl {
let vec[ast.arg] inputs = vec();
- for (ast.arg a in f.inputs) {
+ for (ast.arg a in decl.inputs) {
inputs += fold_arg(env, fld, a);
}
- auto output = fold_ty[ENV](env, fld, f.output);
+ auto output = fold_ty[ENV](env, fld, decl.output);
+ ret fld.fold_fn_decl(env, decl.effect, inputs, output);
+}
+
+fn fold_fn[ENV](&ENV env, ast_fold[ENV] fld, &ast._fn f) -> ast._fn {
+ auto decl = fold_fn_decl(env, fld, f.decl);
+
auto body = fold_block[ENV](env, fld, f.body);
- ret fld.fold_fn(env, f.effect, f.is_iter, inputs, output, body);
+ ret fld.fold_fn(env, decl, f.proto, body);
}
@@ -701,6 +795,13 @@ fn fold_obj[ENV](&ENV env, ast_fold[ENV] fld, &ast._obj ob) -> ast._obj {
for (ast.obj_field f in ob.fields) {
fields += fold_obj_field(env, fld, f);
}
+ let option.t[block] dtor = none[block];
+ alt (ob.dtor) {
+ case (none[block]) { }
+ case (some[block](?b)) {
+ dtor = some[block](fold_block[ENV](env, fld, b));
+ }
+ }
let vec[ast.ty_param] tp = vec();
for (@ast.method m in ob.methods) {
// Fake-up an ast.item for this method.
@@ -715,7 +816,7 @@ fn fold_obj[ENV](&ENV env, ast_fold[ENV] fld, &ast._obj ob) -> ast._obj {
let ENV _env = fld.update_env_for_item(env, i);
append[@ast.method](meths, fold_method(_env, fld, m));
}
- ret fld.fold_obj(env, fields, meths);
+ ret fld.fold_obj(env, fields, meths, dtor);
}
fn fold_view_item[ENV](&ENV env, ast_fold[ENV] fld, @view_item vi)
@@ -768,6 +869,11 @@ fn fold_item[ENV](&ENV env, ast_fold[ENV] fld, @item i) -> @item {
ret fld.fold_item_mod(env_, i.span, ident, mm_, id);
}
+ case (ast.item_native_mod(?ident, ?mm, ?id)) {
+ let ast.native_mod mm_ = fold_native_mod[ENV](env_, fld, mm);
+ ret fld.fold_item_native_mod(env_, i.span, ident, mm_, id);
+ }
+
case (ast.item_ty(?ident, ?ty, ?params, ?id, ?ann)) {
let @ast.ty ty_ = fold_ty[ENV](env_, fld, ty);
ret fld.fold_item_ty(env_, i.span, ident, ty_, params, id, ann);
@@ -798,7 +904,6 @@ fn fold_item[ENV](&ENV env, ast_fold[ENV] fld, @item i) -> @item {
fail;
}
-
fn fold_mod[ENV](&ENV e, ast_fold[ENV] fld, &ast._mod m) -> ast._mod {
let vec[@view_item] view_items = vec();
@@ -818,7 +923,50 @@ fn fold_mod[ENV](&ENV e, ast_fold[ENV] fld, &ast._mod m) -> ast._mod {
}
ret fld.fold_mod(e, rec(view_items=view_items, items=items, index=index));
- }
+}
+
+fn fold_native_item[ENV](&ENV env, ast_fold[ENV] fld,
+ @native_item i) -> @native_item {
+ let ENV env_ = fld.update_env_for_native_item(env, i);
+
+ if (!fld.keep_going(env_)) {
+ ret i;
+ }
+ alt (i.node) {
+ case (ast.native_item_ty(?ident, ?id)) {
+ ret fld.fold_native_item_ty(env_, i.span, ident, id);
+ }
+ case (ast.native_item_fn(?ident, ?fn_decl, ?ty_params, ?id, ?ann)) {
+ auto d = fold_fn_decl[ENV](env_, fld, fn_decl);
+ ret fld.fold_native_item_fn(env_, i.span, ident, d,
+ ty_params, id, ann);
+ }
+ }
+}
+
+fn fold_native_mod[ENV](&ENV e, ast_fold[ENV] fld,
+ &ast.native_mod m) -> ast.native_mod {
+ let vec[@view_item] view_items = vec();
+ let vec[@native_item] items = vec();
+ auto index = new_str_hash[ast.native_mod_index_entry]();
+
+ for (@view_item vi in m.view_items) {
+ auto new_vi = fold_view_item[ENV](e, fld, vi);
+ append[@view_item](view_items, new_vi);
+ }
+
+ for (@native_item i in m.items) {
+ auto new_item = fold_native_item[ENV](e, fld, i);
+ append[@native_item](items, new_item);
+ ast.index_native_item(index, new_item);
+ }
+
+ ret fld.fold_native_mod(e, rec(native_name=m.native_name,
+ abi=m.abi,
+ view_items=view_items,
+ items=items,
+ index=index));
+}
fn fold_crate[ENV](&ENV env, ast_fold[ENV] fld, @ast.crate c) -> @ast.crate {
let ENV env_ = fld.update_env_for_crate(env, c);
@@ -894,9 +1042,10 @@ fn identity_fold_ty_obj[ENV](&ENV env, &span sp,
}
fn identity_fold_ty_fn[ENV](&ENV env, &span sp,
+ ast.proto proto,
vec[rec(ast.mode mode, @ty ty)] inputs,
@ty output) -> @ty {
- ret @respan(sp, ast.ty_fn(inputs, output));
+ ret @respan(sp, ast.ty_fn(proto, inputs, output));
}
fn identity_fold_ty_path[ENV](&ENV env, &span sp, ast.path p,
@@ -922,8 +1071,9 @@ fn identity_fold_expr_tup[ENV](&ENV env, &span sp,
}
fn identity_fold_expr_rec[ENV](&ENV env, &span sp,
- vec[ast.field] fields, ann a) -> @expr {
- ret @respan(sp, ast.expr_rec(fields, a));
+ vec[ast.field] fields,
+ option.t[@expr] base, ann a) -> @expr {
+ ret @respan(sp, ast.expr_rec(fields, base, a));
}
fn identity_fold_expr_call[ENV](&ENV env, &span sp, @expr f,
@@ -971,6 +1121,12 @@ fn identity_fold_expr_for[ENV](&ENV env, &span sp,
ret @respan(sp, ast.expr_for(d, seq, body, a));
}
+fn identity_fold_expr_for_each[ENV](&ENV env, &span sp,
+ @decl d, @expr seq,
+ &block body, ann a) -> @expr {
+ ret @respan(sp, ast.expr_for_each(d, seq, body, a));
+}
+
fn identity_fold_expr_while[ENV](&ENV env, &span sp,
@expr cond, &block body, ann a) -> @expr {
ret @respan(sp, ast.expr_while(cond, body, a));
@@ -1019,6 +1175,40 @@ fn identity_fold_expr_path[ENV](&ENV env, &span sp,
ret @respan(sp, ast.expr_path(p, d, a));
}
+fn identity_fold_expr_ext[ENV](&ENV env, &span sp,
+ &path p, vec[@expr] args,
+ option.t[@expr] body,
+ @expr expanded,
+ ann a) -> @expr {
+ ret @respan(sp, ast.expr_ext(p, args, body, expanded, a));
+}
+
+fn identity_fold_expr_fail[ENV](&ENV env, &span sp) -> @expr {
+ ret @respan(sp, ast.expr_fail);
+}
+
+fn identity_fold_expr_ret[ENV](&ENV env, &span sp,
+ &option.t[@expr] rv) -> @expr {
+ ret @respan(sp, ast.expr_ret(rv));
+}
+
+fn identity_fold_expr_put[ENV](&ENV env, &span sp,
+ &option.t[@expr] rv) -> @expr {
+ ret @respan(sp, ast.expr_put(rv));
+}
+
+fn identity_fold_expr_be[ENV](&ENV env, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_be(x));
+}
+
+fn identity_fold_expr_log[ENV](&ENV e, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_log(x));
+}
+
+fn identity_fold_expr_check_expr[ENV](&ENV e, &span sp, @expr x) -> @expr {
+ ret @respan(sp, ast.expr_check_expr(x));
+}
+
// Decl identities.
@@ -1038,6 +1228,10 @@ fn identity_fold_pat_wild[ENV](&ENV e, &span sp, ann a) -> @pat {
ret @respan(sp, ast.pat_wild(a));
}
+fn identity_fold_pat_lit[ENV](&ENV e, &span sp, @ast.lit lit, ann a) -> @pat {
+ ret @respan(sp, ast.pat_lit(lit, a));
+}
+
fn identity_fold_pat_bind[ENV](&ENV e, &span sp, ident i, def_id did, ann a)
-> @pat {
ret @respan(sp, ast.pat_bind(i, did, a));
@@ -1055,19 +1249,6 @@ fn identity_fold_stmt_decl[ENV](&ENV env, &span sp, @decl d) -> @stmt {
ret @respan(sp, ast.stmt_decl(d));
}
-fn identity_fold_stmt_ret[ENV](&ENV env, &span sp,
- &option.t[@expr] rv) -> @stmt {
- ret @respan(sp, ast.stmt_ret(rv));
-}
-
-fn identity_fold_stmt_log[ENV](&ENV e, &span sp, @expr x) -> @stmt {
- ret @respan(sp, ast.stmt_log(x));
-}
-
-fn identity_fold_stmt_check_expr[ENV](&ENV e, &span sp, @expr x) -> @stmt {
- ret @respan(sp, ast.stmt_check_expr(x));
-}
-
fn identity_fold_stmt_expr[ENV](&ENV e, &span sp, @expr x) -> @stmt {
ret @respan(sp, ast.stmt_expr(x));
}
@@ -1087,17 +1268,34 @@ fn identity_fold_item_fn[ENV](&ENV e, &span sp, ident i,
ret @respan(sp, ast.item_fn(i, f, ty_params, id, a));
}
+fn identity_fold_native_item_fn[ENV](&ENV e, &span sp, ident i,
+ &ast.fn_decl decl,
+ vec[ast.ty_param] ty_params,
+ def_id id, ann a) -> @native_item {
+ ret @respan(sp, ast.native_item_fn(i, decl, ty_params, id, a));
+}
+
fn identity_fold_item_mod[ENV](&ENV e, &span sp, ident i,
&ast._mod m, def_id id) -> @item {
ret @respan(sp, ast.item_mod(i, m, id));
}
+fn identity_fold_item_native_mod[ENV](&ENV e, &span sp, ident i,
+ &ast.native_mod m, def_id id) -> @item {
+ ret @respan(sp, ast.item_native_mod(i, m, id));
+}
+
fn identity_fold_item_ty[ENV](&ENV e, &span sp, ident i,
@ty t, vec[ast.ty_param] ty_params,
def_id id, ann a) -> @item {
ret @respan(sp, ast.item_ty(i, t, ty_params, id, a));
}
+fn identity_fold_native_item_ty[ENV](&ENV e, &span sp, ident i,
+ def_id id) -> @native_item {
+ ret @respan(sp, ast.native_item_ty(i, id));
+}
+
fn identity_fold_item_tag[ENV](&ENV e, &span sp, ident i,
vec[ast.variant] variants,
vec[ast.ty_param] ty_params,
@@ -1132,28 +1330,38 @@ fn identity_fold_block[ENV](&ENV e, &span sp, &ast.block_ blk) -> block {
ret respan(sp, blk);
}
+fn identity_fold_fn_decl[ENV](&ENV e,
+ ast.effect effect,
+ vec[arg] inputs,
+ @ty output) -> ast.fn_decl {
+ ret rec(effect=effect, inputs=inputs, output=output);
+}
+
fn identity_fold_fn[ENV](&ENV e,
- ast.effect effect,
- bool is_iter,
- vec[arg] inputs,
- @ast.ty output,
+ &fn_decl decl,
+ ast.proto proto,
&block body) -> ast._fn {
- ret rec(effect=effect, is_iter=is_iter, inputs=inputs,
- output=output, body=body);
+ ret rec(decl=decl, proto=proto, body=body);
}
fn identity_fold_mod[ENV](&ENV e, &ast._mod m) -> ast._mod {
ret m;
}
+fn identity_fold_native_mod[ENV](&ENV e,
+ &ast.native_mod m) -> ast.native_mod {
+ ret m;
+}
+
fn identity_fold_crate[ENV](&ENV e, &span sp, &ast._mod m) -> @ast.crate {
ret @respan(sp, rec(module=m));
}
fn identity_fold_obj[ENV](&ENV e,
vec[ast.obj_field] fields,
- vec[@ast.method] methods) -> ast._obj {
- ret rec(fields=fields, methods=methods);
+ vec[@ast.method] methods,
+ option.t[block] dtor) -> ast._obj {
+ ret rec(fields=fields, methods=methods, dtor=dtor);
}
@@ -1167,6 +1375,10 @@ fn identity_update_env_for_item[ENV](&ENV e, @item i) -> ENV {
ret e;
}
+fn identity_update_env_for_native_item[ENV](&ENV e, @native_item i) -> ENV {
+ ret e;
+}
+
fn identity_update_env_for_view_item[ENV](&ENV e, @view_item i) -> ENV {
ret e;
}
@@ -1224,13 +1436,13 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_ty_tup = bind identity_fold_ty_tup[ENV](_,_,_),
fold_ty_rec = bind identity_fold_ty_rec[ENV](_,_,_),
fold_ty_obj = bind identity_fold_ty_obj[ENV](_,_,_),
- fold_ty_fn = bind identity_fold_ty_fn[ENV](_,_,_,_),
+ fold_ty_fn = bind identity_fold_ty_fn[ENV](_,_,_,_,_),
fold_ty_path = bind identity_fold_ty_path[ENV](_,_,_,_),
fold_ty_mutable = bind identity_fold_ty_mutable[ENV](_,_,_),
fold_expr_vec = bind identity_fold_expr_vec[ENV](_,_,_,_),
fold_expr_tup = bind identity_fold_expr_tup[ENV](_,_,_,_),
- fold_expr_rec = bind identity_fold_expr_rec[ENV](_,_,_,_),
+ fold_expr_rec = bind identity_fold_expr_rec[ENV](_,_,_,_,_),
fold_expr_call = bind identity_fold_expr_call[ENV](_,_,_,_,_),
fold_expr_bind = bind identity_fold_expr_bind[ENV](_,_,_,_,_),
fold_expr_binary = bind identity_fold_expr_binary[ENV](_,_,_,_,_,_),
@@ -1239,6 +1451,8 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_expr_cast = bind identity_fold_expr_cast[ENV](_,_,_,_,_),
fold_expr_if = bind identity_fold_expr_if[ENV](_,_,_,_,_,_),
fold_expr_for = bind identity_fold_expr_for[ENV](_,_,_,_,_,_),
+ fold_expr_for_each
+ = bind identity_fold_expr_for_each[ENV](_,_,_,_,_,_),
fold_expr_while = bind identity_fold_expr_while[ENV](_,_,_,_,_),
fold_expr_do_while
= bind identity_fold_expr_do_while[ENV](_,_,_,_,_),
@@ -1250,25 +1464,36 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
fold_expr_field = bind identity_fold_expr_field[ENV](_,_,_,_,_),
fold_expr_index = bind identity_fold_expr_index[ENV](_,_,_,_,_),
fold_expr_path = bind identity_fold_expr_path[ENV](_,_,_,_,_),
+ fold_expr_ext = bind identity_fold_expr_ext[ENV](_,_,_,_,_,_,_),
+ fold_expr_fail = bind identity_fold_expr_fail[ENV](_,_),
+ fold_expr_ret = bind identity_fold_expr_ret[ENV](_,_,_),
+ fold_expr_put = bind identity_fold_expr_put[ENV](_,_,_),
+ fold_expr_be = bind identity_fold_expr_be[ENV](_,_,_),
+ fold_expr_log = bind identity_fold_expr_log[ENV](_,_,_),
+ fold_expr_check_expr
+ = bind identity_fold_expr_check_expr[ENV](_,_,_),
fold_decl_local = bind identity_fold_decl_local[ENV](_,_,_),
fold_decl_item = bind identity_fold_decl_item[ENV](_,_,_),
fold_pat_wild = bind identity_fold_pat_wild[ENV](_,_,_),
+ fold_pat_lit = bind identity_fold_pat_lit[ENV](_,_,_,_),
fold_pat_bind = bind identity_fold_pat_bind[ENV](_,_,_,_,_),
fold_pat_tag = bind identity_fold_pat_tag[ENV](_,_,_,_,_,_),
fold_stmt_decl = bind identity_fold_stmt_decl[ENV](_,_,_),
- fold_stmt_ret = bind identity_fold_stmt_ret[ENV](_,_,_),
- fold_stmt_log = bind identity_fold_stmt_log[ENV](_,_,_),
- fold_stmt_check_expr
- = bind identity_fold_stmt_check_expr[ENV](_,_,_),
fold_stmt_expr = bind identity_fold_stmt_expr[ENV](_,_,_),
fold_item_const= bind identity_fold_item_const[ENV](_,_,_,_,_,_,_),
fold_item_fn = bind identity_fold_item_fn[ENV](_,_,_,_,_,_,_),
+ fold_native_item_fn =
+ bind identity_fold_native_item_fn[ENV](_,_,_,_,_,_,_),
fold_item_mod = bind identity_fold_item_mod[ENV](_,_,_,_,_),
+ fold_item_native_mod =
+ bind identity_fold_item_native_mod[ENV](_,_,_,_,_),
fold_item_ty = bind identity_fold_item_ty[ENV](_,_,_,_,_,_,_),
+ fold_native_item_ty =
+ bind identity_fold_native_item_ty[ENV](_,_,_,_),
fold_item_tag = bind identity_fold_item_tag[ENV](_,_,_,_,_,_),
fold_item_obj = bind identity_fold_item_obj[ENV](_,_,_,_,_,_,_),
@@ -1278,13 +1503,17 @@ fn new_identity_fold[ENV]() -> ast_fold[ENV] {
bind identity_fold_view_item_import[ENV](_,_,_,_,_,_),
fold_block = bind identity_fold_block[ENV](_,_,_),
- fold_fn = bind identity_fold_fn[ENV](_,_,_,_,_,_),
+ fold_fn = bind identity_fold_fn[ENV](_,_,_,_),
+ fold_fn_decl = bind identity_fold_fn_decl[ENV](_,_,_,_),
fold_mod = bind identity_fold_mod[ENV](_,_),
+ fold_native_mod = bind identity_fold_native_mod[ENV](_,_),
fold_crate = bind identity_fold_crate[ENV](_,_,_),
- fold_obj = bind identity_fold_obj[ENV](_,_,_),
+ fold_obj = bind identity_fold_obj[ENV](_,_,_,_),
update_env_for_crate = bind identity_update_env_for_crate[ENV](_,_),
update_env_for_item = bind identity_update_env_for_item[ENV](_,_),
+ update_env_for_native_item =
+ bind identity_update_env_for_native_item[ENV](_,_),
update_env_for_view_item =
bind identity_update_env_for_view_item[ENV](_,_),
update_env_for_block = bind identity_update_env_for_block[ENV](_,_),
diff --git a/src/comp/middle/resolve.rs b/src/comp/middle/resolve.rs
index 1af3b205..5b6db631 100644
--- a/src/comp/middle/resolve.rs
+++ b/src/comp/middle/resolve.rs
@@ -18,6 +18,7 @@ import std._vec;
tag scope {
scope_crate(@ast.crate);
scope_item(@ast.item);
+ scope_native_item(@ast.native_item);
scope_loop(@ast.decl); // there's only 1 decl per loop.
scope_block(ast.block);
scope_arm(ast.arm);
@@ -34,6 +35,7 @@ tag def_wrap {
def_wrap_use(@ast.view_item);
def_wrap_import(@ast.view_item);
def_wrap_mod(@ast.item);
+ def_wrap_native_mod(@ast.item);
def_wrap_other(def);
def_wrap_expr_field(uint, def);
def_wrap_resolving;
@@ -103,6 +105,29 @@ fn find_final_def(&env e, import_map index,
// should return what a.b.c.d points to in the end.
fn found_something(&env e, import_map index,
&span sp, vec[ident] idents, def_wrap d) -> def_wrap {
+
+ fn found_mod(&env e, &import_map index, &span sp,
+ vec[ident] idents, @ast.item i) -> def_wrap {
+ auto len = _vec.len[ident](idents);
+ auto rest_idents = _vec.slice[ident](idents, 1u, len);
+ auto empty_e = rec(scopes = nil[scope],
+ sess = e.sess);
+ auto tmp_e = update_env_for_item(empty_e, i);
+ auto next_i = rest_idents.(0);
+ auto next_ = lookup_name_wrapped(tmp_e, next_i);
+ alt (next_) {
+ case (none[tup(@env, def_wrap)]) {
+ e.sess.span_err(sp, "unresolved name: " + next_i);
+ fail;
+ }
+ case (some[tup(@env, def_wrap)](?next)) {
+ auto combined_e = update_env_for_item(e, i);
+ ret found_something(combined_e, index, sp,
+ rest_idents, next._1);
+ }
+ }
+ }
+
alt (d) {
case (def_wrap_import(?imp)) {
alt (imp.node) {
@@ -122,23 +147,10 @@ fn find_final_def(&env e, import_map index,
}
alt (d) {
case (def_wrap_mod(?i)) {
- auto rest_idents = _vec.slice[ident](idents, 1u, len);
- auto empty_e = rec(scopes = nil[scope],
- sess = e.sess);
- auto tmp_e = update_env_for_item(empty_e, i);
- auto next_i = rest_idents.(0);
- auto next_ = lookup_name_wrapped(tmp_e, next_i);
- alt (next_) {
- case (none[tup(@env, def_wrap)]) {
- e.sess.span_err(sp, "unresolved name: " + next_i);
- fail;
- }
- case (some[tup(@env, def_wrap)](?next)) {
- auto combined_e = update_env_for_item(e, i);
- ret found_something(combined_e, index, sp,
- rest_idents, next._1);
- }
- }
+ ret found_mod(e, index, sp, idents, i);
+ }
+ case (def_wrap_native_mod(?i)) {
+ ret found_mod(e, index, sp, idents, i);
}
case (def_wrap_use(?c)) {
e.sess.span_err(sp, "Crate access is not implemented");
@@ -201,6 +213,9 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (ast.item_mod(_, _, ?id)) {
ret def_wrap_mod(i);
}
+ case (ast.item_native_mod(_, _, ?id)) {
+ ret def_wrap_native_mod(i);
+ }
case (ast.item_ty(_, _, _, ?id, _)) {
ret def_wrap_other(ast.def_ty(id));
}
@@ -213,6 +228,17 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
+ fn found_def_native_item(@ast.native_item i) -> def_wrap {
+ alt (i.node) {
+ case (ast.native_item_ty(_, ?id)) {
+ ret def_wrap_other(ast.def_native_ty(id));
+ }
+ case (ast.native_item_fn(_, _, _, ?id, _)) {
+ ret def_wrap_other(ast.def_native_fn(id));
+ }
+ }
+ }
+
fn found_decl_stmt(@ast.stmt s) -> def_wrap {
alt (s.node) {
case (ast.stmt_decl(?d)) {
@@ -267,11 +293,47 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
}
- case (none[ast.mod_index_entry]) { /* fall through */ }
+ case (none[ast.mod_index_entry]) {
+ ret none[def_wrap];
+ }
+ }
+ }
+
+ fn check_native_mod(ast.ident i, ast.native_mod m) -> option.t[def_wrap] {
+
+ alt (m.index.find(i)) {
+ case (some[ast.native_mod_index_entry](?ent)) {
+ alt (ent) {
+ case (ast.nmie_view_item(?view_item)) {
+ ret some(found_def_view(view_item));
+ }
+ case (ast.nmie_item(?item)) {
+ ret some(found_def_native_item(item));
+ }
+ }
+ }
+ case (none[ast.native_mod_index_entry]) {
+ ret none[def_wrap];
+ }
}
- ret none[def_wrap];
}
+ fn handle_fn_decl(ast.ident i, &ast.fn_decl decl,
+ &vec[ast.ty_param] ty_params) -> option.t[def_wrap] {
+ for (ast.arg a in decl.inputs) {
+ if (_str.eq(a.ident, i)) {
+ auto t = ast.def_arg(a.id);
+ ret some(def_wrap_other(t));
+ }
+ }
+ for (ast.ty_param tp in ty_params) {
+ if (_str.eq(tp.ident, i)) {
+ auto t = ast.def_ty_arg(tp.id);
+ ret some(def_wrap_other(t));
+ }
+ }
+ ret none[def_wrap];
+ }
fn in_scope(ast.ident i, &scope s) -> option.t[def_wrap] {
alt (s) {
@@ -283,9 +345,12 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (scope_item(?it)) {
alt (it.node) {
case (ast.item_fn(_, ?f, ?ty_params, _, _)) {
- for (ast.arg a in f.inputs) {
- if (_str.eq(a.ident, i)) {
- auto t = ast.def_arg(a.id);
+ ret handle_fn_decl(i, f.decl, ty_params);
+ }
+ case (ast.item_obj(_, ?ob, ?ty_params, _, _)) {
+ for (ast.obj_field f in ob.fields) {
+ if (_str.eq(f.ident, i)) {
+ auto t = ast.def_obj_field(f.id);
ret some(def_wrap_other(t));
}
}
@@ -296,13 +361,7 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
}
- case (ast.item_obj(_, ?ob, ?ty_params, _, _)) {
- for (ast.obj_field f in ob.fields) {
- if (_str.eq(f.ident, i)) {
- auto t = ast.def_obj_field(f.id);
- ret some(def_wrap_other(t));
- }
- }
+ case (ast.item_tag(_, _, ?ty_params, _)) {
for (ast.ty_param tp in ty_params) {
if (_str.eq(tp.ident, i)) {
auto t = ast.def_ty_arg(tp.id);
@@ -313,6 +372,9 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
case (ast.item_mod(_, ?m, _)) {
ret check_mod(i, m);
}
+ case (ast.item_native_mod(_, ?m, _)) {
+ ret check_native_mod(i, m);
+ }
case (ast.item_ty(_, _, ?ty_params, _, _)) {
for (ast.ty_param tp in ty_params) {
if (_str.eq(tp.ident, i)) {
@@ -325,6 +387,14 @@ fn lookup_name_wrapped(&env e, ast.ident i) -> option.t[tup(@env, def_wrap)] {
}
}
+ case (scope_native_item(?it)) {
+ alt (it.node) {
+ case (ast.native_item_fn(_, ?decl, ?ty_params, _, _)) {
+ ret handle_fn_decl(i, decl, ty_params);
+ }
+ }
+ }
+
case (scope_loop(?d)) {
alt (d.node) {
case (ast.decl_local(?local)) {
@@ -432,8 +502,7 @@ fn fold_expr_path(&env e, &span sp, &ast.path p, &option.t[def] d,
path_len = n_idents - remaining + 1u;
}
case (def_wrap_other(_)) {
- check (n_idents == 1u);
- path_len = 1u;
+ path_len = n_idents;
}
case (def_wrap_mod(?m)) {
e.sess.span_err(sp,
@@ -491,6 +560,10 @@ fn update_env_for_item(&env e, @ast.item i) -> env {
ret rec(scopes = cons[scope](scope_item(i), @e.scopes) with e);
}
+fn update_env_for_native_item(&env e, @ast.native_item i) -> env {
+ ret rec(scopes = cons[scope](scope_native_item(i), @e.scopes) with e);
+}
+
fn update_env_for_block(&env e, &ast.block b) -> env {
ret rec(scopes = cons[scope](scope_block(b), @e.scopes) with e);
}
@@ -500,6 +573,9 @@ fn update_env_for_expr(&env e, @ast.expr x) -> env {
case (ast.expr_for(?d, _, _, _)) {
ret rec(scopes = cons[scope](scope_loop(d), @e.scopes) with e);
}
+ case (ast.expr_for_each(?d, _, _, _)) {
+ ret rec(scopes = cons[scope](scope_loop(d), @e.scopes) with e);
+ }
case (_) { }
}
ret e;
@@ -517,6 +593,8 @@ fn resolve_imports(session.session sess, @ast.crate crate) -> @ast.crate {
= bind fold_view_item_import(_,_,import_index,_,_,_,_),
update_env_for_crate = bind update_env_for_crate(_,_),
update_env_for_item = bind update_env_for_item(_,_),
+ update_env_for_native_item =
+ bind update_env_for_native_item(_,_),
update_env_for_block = bind update_env_for_block(_,_),
update_env_for_arm = bind update_env_for_arm(_,_),
update_env_for_expr = bind update_env_for_expr(_,_)
@@ -539,6 +617,8 @@ fn resolve_crate(session.session sess, @ast.crate crate) -> @ast.crate {
fold_ty_path = bind fold_ty_path(_,_,_,_),
update_env_for_crate = bind update_env_for_crate(_,_),
update_env_for_item = bind update_env_for_item(_,_),
+ update_env_for_native_item =
+ bind update_env_for_native_item(_,_),
update_env_for_block = bind update_env_for_block(_,_),
update_env_for_arm = bind update_env_for_arm(_,_),
update_env_for_expr = bind update_env_for_expr(_,_)
diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs
index c2b0ae48..728f20dd 100644
--- a/src/comp/middle/trans.rs
+++ b/src/comp/middle/trans.rs
@@ -1,3 +1,4 @@
+import std._int;
import std._str;
import std._uint;
import std._vec;
@@ -16,6 +17,7 @@ import back.x86;
import back.abi;
import middle.ty.pat_ty;
+import middle.ty.plain_ty;
import util.common;
import util.common.append;
@@ -27,9 +29,11 @@ import lib.llvm.llvm;
import lib.llvm.builder;
import lib.llvm.target_data;
import lib.llvm.type_handle;
+import lib.llvm.type_names;
import lib.llvm.mk_pass_manager;
import lib.llvm.mk_target_data;
import lib.llvm.mk_type_handle;
+import lib.llvm.mk_type_names;
import lib.llvm.llvm.ModuleRef;
import lib.llvm.llvm.ValueRef;
import lib.llvm.llvm.TypeRef;
@@ -53,27 +57,33 @@ type glue_fns = rec(ValueRef activate_glue,
vec[ValueRef] upcall_glues,
ValueRef no_op_type_glue,
ValueRef memcpy_glue,
- ValueRef bzero_glue);
+ ValueRef bzero_glue,
+ ValueRef vec_append_glue);
-tag arity { nullary; n_ary; }
-type tag_info = rec(type_handle th,
- mutable vec[tup(ast.def_id,arity)] variants,
- mutable uint size);
+type tydesc_info = rec(ValueRef tydesc,
+ ValueRef take_glue,
+ ValueRef drop_glue);
state type crate_ctxt = rec(session.session sess,
ModuleRef llmod,
target_data td,
+ type_names tn,
ValueRef crate_ptr,
hashmap[str, ValueRef] upcalls,
hashmap[str, ValueRef] intrinsics,
hashmap[str, ValueRef] item_names,
hashmap[ast.def_id, ValueRef] item_ids,
hashmap[ast.def_id, @ast.item] items,
- hashmap[ast.def_id, @tag_info] tags,
+ hashmap[ast.def_id,
+ @ast.native_item] native_items,
+ // TODO: hashmap[tup(tag_id,subtys), @tag_info]
+ hashmap[@ty.t, uint] tag_sizes,
+ hashmap[ast.def_id, ValueRef] discrims,
hashmap[ast.def_id, ValueRef] fn_pairs,
hashmap[ast.def_id, ValueRef] consts,
hashmap[ast.def_id,()] obj_methods,
- hashmap[@ty.t, ValueRef] tydescs,
+ hashmap[@ty.t, @tydesc_info] tydescs,
+ vec[ast.ty_param] obj_typarams,
vec[ast.obj_field] obj_fields,
@glue_fns glues,
namegen names,
@@ -81,9 +91,10 @@ state type crate_ctxt = rec(session.session sess,
state type fn_ctxt = rec(ValueRef llfn,
ValueRef lltaskptr,
- ValueRef llclosure,
+ ValueRef llenv,
+ ValueRef llretptr,
mutable option.t[ValueRef] llself,
- mutable option.t[ValueRef] llretptr,
+ mutable option.t[ValueRef] lliterbody,
hashmap[ast.def_id, ValueRef] llargs,
hashmap[ast.def_id, ValueRef] llobjfields,
hashmap[ast.def_id, ValueRef] lllocals,
@@ -119,21 +130,25 @@ tag block_parent {
state type result = rec(mutable @block_ctxt bcx,
mutable ValueRef val);
+fn sep() -> str {
+ ret "_";
+}
+
fn res(@block_ctxt bcx, ValueRef val) -> result {
ret rec(mutable bcx = bcx,
mutable val = val);
}
-fn ty_str(TypeRef t) -> str {
- ret lib.llvm.type_to_str(t);
+fn ty_str(type_names tn, TypeRef t) -> str {
+ ret lib.llvm.type_to_str(tn, t);
}
fn val_ty(ValueRef v) -> TypeRef {
ret llvm.LLVMTypeOf(v);
}
-fn val_str(ValueRef v) -> str {
- ret ty_str(val_ty(v));
+fn val_str(type_names tn, ValueRef v) -> str {
+ ret ty_str(tn, val_ty(v));
}
@@ -206,9 +221,9 @@ fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
False);
}
-fn T_fn_pair(TypeRef tfn) -> TypeRef {
+fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
ret T_struct(vec(T_ptr(tfn),
- T_opaque_closure_ptr()));
+ T_opaque_closure_ptr(tn)));
}
fn T_ptr(TypeRef t) -> TypeRef {
@@ -225,25 +240,56 @@ fn T_opaque() -> TypeRef {
ret llvm.LLVMOpaqueType();
}
-fn T_task() -> TypeRef {
- ret T_struct(vec(T_int(), // Refcount
- T_int(), // Delegate pointer
- T_int(), // Stack segment pointer
- T_int(), // Runtime SP
- T_int(), // Rust SP
- T_int(), // GC chain
- T_int(), // Domain pointer
- T_int() // Crate cache pointer
- ));
+fn T_task(type_names tn) -> TypeRef {
+ auto s = "task";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_struct(vec(T_int(), // Refcount
+ T_int(), // Delegate pointer
+ T_int(), // Stack segment pointer
+ T_int(), // Runtime SP
+ T_int(), // Rust SP
+ T_int(), // GC chain
+ T_int(), // Domain pointer
+ T_int() // Crate cache pointer
+ ));
+ tn.associate(s, t);
+ ret t;
}
-fn T_tydesc() -> TypeRef {
+fn T_glue_fn(type_names tn) -> TypeRef {
+ auto s = "glue_fn";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ // Bit of a kludge: pick the fn typeref out of the tydesc..
+ let vec[TypeRef] tydesc_elts = _vec.init_elt[TypeRef](T_nil(), 10u);
+ llvm.LLVMGetStructElementTypes(T_tydesc(tn),
+ _vec.buf[TypeRef](tydesc_elts));
+ auto t =
+ llvm.LLVMGetElementType
+ (tydesc_elts.(abi.tydesc_field_drop_glue_off));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_tydesc(type_names tn) -> TypeRef {
+
+ auto s = "tydesc";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
auto th = mk_type_handle();
auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
auto tydescpp = T_ptr(T_ptr(abs_tydesc));
auto pvoid = T_ptr(T_i8());
- auto glue_fn_ty = T_ptr(T_fn(vec(T_taskptr(),
+ auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
+ T_taskptr(tn),
+ T_ptr(T_nil()),
tydescpp,
pvoid), T_void()));
auto tydesc = T_struct(vec(tydescpp, // first_param
@@ -258,7 +304,9 @@ fn T_tydesc() -> TypeRef {
glue_fn_ty)); // is_stateful
llvm.LLVMRefineType(abs_tydesc, tydesc);
- ret llvm.LLVMResolveTypeHandle(th.llth);
+ auto t = llvm.LLVMResolveTypeHandle(th.llth);
+ tn.associate(s, t);
+ ret t;
}
fn T_array(TypeRef t, uint n) -> TypeRef {
@@ -273,6 +321,10 @@ fn T_vec(TypeRef t) -> TypeRef {
));
}
+fn T_opaque_vec_ptr() -> TypeRef {
+ ret T_ptr(T_vec(T_int()));
+}
+
fn T_str() -> TypeRef {
ret T_vec(T_i8());
}
@@ -281,165 +333,307 @@ fn T_box(TypeRef t) -> TypeRef {
ret T_struct(vec(T_int(), t));
}
-fn T_crate() -> TypeRef {
- ret T_struct(vec(T_int(), // ptrdiff_t image_base_off
- T_int(), // uintptr_t self_addr
- T_int(), // ptrdiff_t debug_abbrev_off
- T_int(), // size_t debug_abbrev_sz
- T_int(), // ptrdiff_t debug_info_off
- T_int(), // size_t debug_info_sz
- T_int(), // size_t activate_glue_off
- T_int(), // size_t yield_glue_off
- T_int(), // size_t unwind_glue_off
- T_int(), // size_t gc_glue_off
- T_int(), // size_t main_exit_task_glue_off
- T_int(), // int n_rust_syms
- T_int(), // int n_c_syms
- T_int() // int n_libs
- ));
+fn T_crate(type_names tn) -> TypeRef {
+ auto s = "crate";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_struct(vec(T_int(), // ptrdiff_t image_base_off
+ T_int(), // uintptr_t self_addr
+ T_int(), // ptrdiff_t debug_abbrev_off
+ T_int(), // size_t debug_abbrev_sz
+ T_int(), // ptrdiff_t debug_info_off
+ T_int(), // size_t debug_info_sz
+ T_int(), // size_t activate_glue_off
+ T_int(), // size_t yield_glue_off
+ T_int(), // size_t unwind_glue_off
+ T_int(), // size_t gc_glue_off
+ T_int(), // size_t main_exit_task_glue_off
+ T_int(), // int n_rust_syms
+ T_int(), // int n_c_syms
+ T_int(), // int n_libs
+ T_int() // uintptr_t abi_tag
+ ));
+ tn.associate(s, t);
+ ret t;
}
fn T_double() -> TypeRef {
ret llvm.LLVMDoubleType();
}
-fn T_taskptr() -> TypeRef {
- ret T_ptr(T_task());
+fn T_taskptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_task(tn));
}
-fn T_typaram_ptr() -> TypeRef {
- ret T_ptr(T_i8());
+// This type must never be used directly; it must always be cast away.
+fn T_typaram(type_names tn) -> TypeRef {
+ auto s = "typaram";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+
+ auto t = T_i8();
+ tn.associate(s, t);
+ ret t;
}
-fn T_closure_ptr(TypeRef lltarget_ty,
- TypeRef llbindings_ty) -> TypeRef {
- ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
+fn T_typaram_ptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_typaram(tn));
+}
+
+fn T_closure_ptr(type_names tn,
+ TypeRef lltarget_ty,
+ TypeRef llbindings_ty,
+ uint n_ty_params) -> TypeRef {
+ ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
lltarget_ty,
- llbindings_ty)
- // FIXME: add captured typarams.
+ llbindings_ty,
+ T_captured_tydescs(tn, n_ty_params))
)));
}
-fn T_opaque_closure_ptr() -> TypeRef {
- ret T_closure_ptr(T_struct(vec(T_ptr(T_nil()),
- T_ptr(T_nil()))),
- T_nil());
+fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
+ auto s = "*closure";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
+ T_ptr(T_nil()))),
+ T_nil(),
+ 0u);
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_tag(type_names tn, uint size) -> TypeRef {
+ auto s = "tag_" + _uint.to_str(size, 10u);
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_struct(vec(T_int(), T_array(T_i8(), size)));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_opaque_tag(type_names tn) -> TypeRef {
+ auto s = "tag";
+ if (tn.name_has_type(s)) {
+ ret tn.get_type(s);
+ }
+ auto t = T_struct(vec(T_int(), T_i8()));
+ tn.associate(s, t);
+ ret t;
+}
+
+fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
+ ret T_ptr(T_opaque_tag(tn));
+}
+
+fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
+ ret T_struct(_vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
+}
+
+fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
+ // This function is not publicly exposed because it returns an incomplete
+ // type. The dynamically-sized fields follow the captured tydescs.
+ fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
+ ret T_struct(vec(T_ptr(T_tydesc(tn)),
+ T_captured_tydescs(tn, n_captured_tydescs)));
+ }
+
+ ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
+}
+
+fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
+ ret T_obj_ptr(tn, 0u);
}
+// This function now fails if called on a type with dynamic size (as its
+// return value was always meaningless in that case anyhow). Beware!
+//
+// TODO: Enforce via a predicate.
fn type_of(@crate_ctxt cx, @ty.t t) -> TypeRef {
- let TypeRef llty = type_of_inner(cx, t);
- check (llty as int != 0);
- llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
- ret llty;
+ if (ty.type_has_dynamic_size(t)) {
+ log "type_of() called on a type with dynamic size: " +
+ ty.ty_to_str(t);
+ fail;
+ }
+
+ ret type_of_inner(cx, t, false);
+}
+
+fn type_of_explicit_args(@crate_ctxt cx,
+ vec[ty.arg] inputs) -> vec[TypeRef] {
+ let vec[TypeRef] atys = vec();
+ for (ty.arg arg in inputs) {
+ if (ty.type_has_dynamic_size(arg.ty)) {
+ check (arg.mode == ast.alias);
+ atys += T_typaram_ptr(cx.tn);
+ } else {
+ let TypeRef t;
+ alt (arg.mode) {
+ case (ast.alias) {
+ t = T_ptr(type_of_inner(cx, arg.ty, true));
+ }
+ case (_) {
+ t = type_of_inner(cx, arg.ty, false);
+ }
+ }
+ atys += t;
+ }
+ }
+ ret atys;
}
-// NB: this must match trans_args and create_llargs_for_fn_args.
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn type_of_fn_full(@crate_ctxt cx,
+ ast.proto proto,
option.t[TypeRef] obj_self,
vec[ty.arg] inputs,
@ty.t output) -> TypeRef {
- let vec[TypeRef] atys = vec(T_taskptr());
-
- auto fn_ty = ty.plain_ty(ty.ty_fn(inputs, output));
- auto ty_param_count = ty.count_ty_params(fn_ty);
- auto i = 0u;
- while (i < ty_param_count) {
- atys += T_ptr(T_tydesc());
- i += 1u;
- }
+ let vec[TypeRef] atys = vec();
+ // Arg 0: Output pointer.
if (ty.type_has_dynamic_size(output)) {
- atys += T_typaram_ptr();
+ atys += T_typaram_ptr(cx.tn);
+ } else {
+ atys += T_ptr(type_of_inner(cx, output, false));
}
+ // Arg 1: Task pointer.
+ atys += T_taskptr(cx.tn);
+
+ // Arg 2: Env (closure-bindings / self-obj)
alt (obj_self) {
case (some[TypeRef](?t)) {
check (t as int != 0);
atys += t;
}
case (_) {
- atys += T_opaque_closure_ptr();
+ atys += T_opaque_closure_ptr(cx.tn);
}
}
- for (ty.arg arg in inputs) {
- if (ty.type_has_dynamic_size(arg.ty)) {
- check (arg.mode == ast.alias);
- atys += T_typaram_ptr();
- } else {
- let TypeRef t = type_of(cx, arg.ty);
- alt (arg.mode) {
- case (ast.alias) {
- t = T_ptr(t);
- }
- case (_) { /* fall through */ }
- }
- atys += t;
+ // Args >3: ty params, if not acquired via capture...
+ if (obj_self == none[TypeRef]) {
+ auto ty_param_count =
+ ty.count_ty_params(plain_ty(ty.ty_fn(proto,
+ inputs,
+ output)));
+ auto i = 0u;
+ while (i < ty_param_count) {
+ atys += T_ptr(T_tydesc(cx.tn));
+ i += 1u;
}
}
- auto ret_ty;
- if (ty.type_is_nil(output) || ty.type_has_dynamic_size(output)) {
- ret_ty = llvm.LLVMVoidType();
- } else {
- ret_ty = type_of(cx, output);
+ if (proto == ast.proto_iter) {
+ // If it's an iter, the 'output' type of the iter is actually the
+ // *input* type of the function we're given as our iter-block
+ // argument.
+ atys += T_fn_pair(cx.tn,
+ type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
+ vec(rec(mode=ast.val, ty=output)),
+ plain_ty(ty.ty_nil)));
}
- ret T_fn(atys, ret_ty);
+ // ... then explicit args.
+ atys += type_of_explicit_args(cx, inputs);
+
+ ret T_fn(atys, llvm.LLVMVoidType());
}
-fn type_of_fn(@crate_ctxt cx, vec[ty.arg] inputs, @ty.t output) -> TypeRef {
- ret type_of_fn_full(cx, none[TypeRef], inputs, output);
+fn type_of_fn(@crate_ctxt cx,
+ ast.proto proto,
+ vec[ty.arg] inputs, @ty.t output) -> TypeRef {
+ ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output);
+}
+
+fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
+ vec[ty.arg] inputs,
+ @ty.t output) -> TypeRef {
+ let vec[TypeRef] atys = vec();
+ if (abi == ast.native_abi_rust) {
+ atys += T_taskptr(cx.tn);
+ auto t = ty.ty_native_fn(abi, inputs, output);
+ auto ty_param_count = ty.count_ty_params(plain_ty(t));
+ auto i = 0u;
+ while (i < ty_param_count) {
+ atys += T_ptr(T_tydesc(cx.tn));
+ i += 1u;
+ }
+ }
+ atys += type_of_explicit_args(cx, inputs);
+ ret T_fn(atys, type_of_inner(cx, output, false));
}
-fn type_of_inner(@crate_ctxt cx, @ty.t t) -> TypeRef {
+fn type_of_inner(@crate_ctxt cx, @ty.t t, bool boxed) -> TypeRef {
+ let TypeRef llty = 0 as TypeRef;
+
alt (t.struct) {
- case (ty.ty_nil) { ret T_nil(); }
- case (ty.ty_bool) { ret T_bool(); }
- case (ty.ty_int) { ret T_int(); }
- case (ty.ty_uint) { ret T_int(); }
+ case (ty.ty_native) { llty = T_ptr(T_i8()); }
+ case (ty.ty_nil) { llty = T_nil(); }
+ case (ty.ty_bool) { llty = T_bool(); }
+ case (ty.ty_int) { llty = T_int(); }
+ case (ty.ty_uint) { llty = T_int(); }
case (ty.ty_machine(?tm)) {
alt (tm) {
- case (common.ty_i8) { ret T_i8(); }
- case (common.ty_u8) { ret T_i8(); }
- case (common.ty_i16) { ret T_i16(); }
- case (common.ty_u16) { ret T_i16(); }
- case (common.ty_i32) { ret T_i32(); }
- case (common.ty_u32) { ret T_i32(); }
- case (common.ty_i64) { ret T_i64(); }
- case (common.ty_u64) { ret T_i64(); }
- case (common.ty_f32) { ret T_f32(); }
- case (common.ty_f64) { ret T_f64(); }
+ case (common.ty_i8) { llty = T_i8(); }
+ case (common.ty_u8) { llty = T_i8(); }
+ case (common.ty_i16) { llty = T_i16(); }
+ case (common.ty_u16) { llty = T_i16(); }
+ case (common.ty_i32) { llty = T_i32(); }
+ case (common.ty_u32) { llty = T_i32(); }
+ case (common.ty_i64) { llty = T_i64(); }
+ case (common.ty_u64) { llty = T_i64(); }
+ case (common.ty_f32) { llty = T_f32(); }
+ case (common.ty_f64) { llty = T_f64(); }
}
}
- case (ty.ty_char) { ret T_char(); }
- case (ty.ty_str) { ret T_ptr(T_str()); }
- case (ty.ty_tag(?tag_id)) {
- ret llvm.LLVMResolveTypeHandle(cx.tags.get(tag_id).th.llth);
+ case (ty.ty_char) { llty = T_char(); }
+ case (ty.ty_str) { llty = T_ptr(T_str()); }
+ case (ty.ty_tag(_, _)) {
+ if (boxed) {
+ llty = T_opaque_tag(cx.tn);
+ } else {
+ auto size = static_size_of_tag(cx, t);
+ llty = T_tag(cx.tn, size);
+ }
}
case (ty.ty_box(?t)) {
- ret T_ptr(T_box(type_of(cx, t)));
+ llty = T_ptr(T_box(type_of_inner(cx, t, true)));
}
case (ty.ty_vec(?t)) {
- ret T_ptr(T_vec(type_of(cx, t)));
+ llty = T_ptr(T_vec(type_of_inner(cx, t, true)));
}
case (ty.ty_tup(?elts)) {
let vec[TypeRef] tys = vec();
for (@ty.t elt in elts) {
- tys += type_of(cx, elt);
+ tys += type_of_inner(cx, elt, boxed);
}
- ret T_struct(tys);
+ llty = T_struct(tys);
}
case (ty.ty_rec(?fields)) {
let vec[TypeRef] tys = vec();
for (ty.field f in fields) {
- tys += type_of(cx, f.ty);
+ tys += type_of_inner(cx, f.ty, boxed);
}
- ret T_struct(tys);
+ llty = T_struct(tys);
}
- case (ty.ty_fn(?args, ?out)) {
- ret T_fn_pair(type_of_fn(cx, args, out));
+ case (ty.ty_fn(?proto, ?args, ?out)) {
+ llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out));
+ }
+ case (ty.ty_native_fn(?abi, ?args, ?out)) {
+ llty = T_fn_pair(cx.tn, type_of_native_fn(cx, abi, args, out));
}
case (ty.ty_obj(?meths)) {
auto th = mk_type_handle();
@@ -448,39 +642,54 @@ fn type_of_inner(@crate_ctxt cx, @ty.t t) -> TypeRef {
let vec[TypeRef] mtys = vec();
for (ty.method m in meths) {
let TypeRef mty =
- type_of_fn_full(cx,
+ type_of_fn_full(cx, m.proto,
some[TypeRef](self_ty),
m.inputs, m.output);
mtys += T_ptr(mty);
}
let TypeRef vtbl = T_struct(mtys);
- let TypeRef body = T_struct(vec(T_ptr(T_tydesc()),
- T_nil()));
- let TypeRef pair =
- T_struct(vec(T_ptr(vtbl),
- T_ptr(T_box(body))));
+ let TypeRef pair = T_struct(vec(T_ptr(vtbl),
+ T_opaque_obj_ptr(cx.tn)));
+
auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
llvm.LLVMRefineType(abs_pair, pair);
abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
- ret abs_pair;
+ llty = abs_pair;
}
case (ty.ty_var(_)) {
log "ty_var in trans.type_of";
fail;
}
case (ty.ty_param(_)) {
- ret T_typaram_ptr();
+ llty = T_i8();
}
+ case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
}
- fail;
+
+ check (llty as int != 0);
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
+ ret llty;
}
fn type_of_arg(@crate_ctxt cx, &ty.arg arg) -> TypeRef {
- auto ty = type_of(cx, arg.ty);
+ alt (arg.ty.struct) {
+ case (ty.ty_param(_)) {
+ if (arg.mode == ast.alias) {
+ ret T_typaram_ptr(cx.tn);
+ }
+ }
+ case (_) {
+ // fall through
+ }
+ }
+
+ auto typ;
if (arg.mode == ast.alias) {
- ty = T_ptr(ty);
+ typ = T_ptr(type_of_inner(cx, arg.ty, true));
+ } else {
+ typ = type_of_inner(cx, arg.ty, false);
}
- ret ty;
+ ret typ;
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
@@ -606,11 +815,11 @@ fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
}
-fn decl_glue(ModuleRef llmod, str s) -> ValueRef {
- ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr()), T_void()));
+fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
+ ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
}
-fn decl_upcall(ModuleRef llmod, uint _n) -> ValueRef {
+fn decl_upcall_glue(ModuleRef llmod, type_names tn, uint _n) -> ValueRef {
// It doesn't actually matter what type we come up with here, at the
// moment, as we cast the upcall function pointers to int before passing
// them to the indirect upcall-invocation glue. But eventually we'd like
@@ -618,7 +827,7 @@ fn decl_upcall(ModuleRef llmod, uint _n) -> ValueRef {
let int n = _n as int;
let str s = abi.upcall_glue_name(n);
let vec[TypeRef] args =
- vec(T_taskptr(), // taskptr
+ vec(T_taskptr(tn), // taskptr
T_int()) // callee
+ _vec.init_elt[TypeRef](T_int(), n as uint);
@@ -629,7 +838,7 @@ fn get_upcall(@crate_ctxt cx, str name, int n_args) -> ValueRef {
if (cx.upcalls.contains_key(name)) {
ret cx.upcalls.get(name);
}
- auto inputs = vec(T_taskptr());
+ auto inputs = vec(T_taskptr(cx.tn));
inputs += _vec.init_elt[TypeRef](T_int(), n_args as uint);
auto output = T_int();
auto f = decl_cdecl_fn(cx.llmod, name, T_fn(inputs, output));
@@ -644,14 +853,16 @@ fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args) -> result {
let ValueRef llglue = cx.fcx.ccx.glues.upcall_glues.(n);
let vec[ValueRef] call_args = vec(cx.fcx.lltaskptr, llupcall);
+
for (ValueRef a in args) {
call_args += cx.build.ZExtOrBitCast(a, T_int());
}
+
ret res(cx, cx.build.FastCall(llglue, call_args));
}
fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
- ret trans_upcall(cx, "upcall_free", vec(cx.build.PtrToInt(v, T_int()),
+ ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v),
C_int(0)));
}
@@ -680,6 +891,11 @@ fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
ret cx.build.And(bumped, cx.build.Not(mask));
}
+// Returns the real size of the given type for the current target.
+fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
+ ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
+}
+
fn llsize_of(TypeRef t) -> ValueRef {
ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
}
@@ -702,51 +918,111 @@ fn align_of(@block_ctxt cx, @ty.t t) -> result {
ret dynamic_align_of(cx, t);
}
+// Computes the size of the data part of a non-dynamically-sized tag.
+fn static_size_of_tag(@crate_ctxt cx, @ty.t t) -> uint {
+ if (ty.type_has_dynamic_size(t)) {
+ log "dynamically sized type passed to static_size_of_tag()";
+ fail;
+ }
+
+ if (cx.tag_sizes.contains_key(t)) {
+ ret cx.tag_sizes.get(t);
+ }
+
+ auto tid;
+ let vec[@ty.t] subtys;
+ alt (t.struct) {
+ case (ty.ty_tag(?tid_, ?subtys_)) {
+ tid = tid_;
+ subtys = subtys_;
+ }
+ case (_) {
+ log "non-tag passed to static_size_of_tag()";
+ fail;
+ }
+ }
+
+ // Compute max(variant sizes).
+ auto max_size = 0u;
+ auto variants = tag_variants(cx, tid);
+ for (ast.variant variant in variants) {
+ let vec[@ty.t] tys = variant_types(cx, variant);
+ auto tup_ty = ty.plain_ty(ty.ty_tup(tys));
+
+ // Here we possibly do a recursive call.
+ auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));
+
+ if (max_size < this_size) {
+ max_size = this_size;
+ }
+ }
+
+ cx.tag_sizes.insert(t, max_size);
+ ret max_size;
+}
+
fn dynamic_size_of(@block_ctxt cx, @ty.t t) -> result {
+ fn align_elements(@block_ctxt cx, vec[@ty.t] elts) -> result {
+ //
+ // C padding rules:
+ //
+ //
+ // - Pad after each element so that next element is aligned.
+ // - Pad after final structure member so that whole structure
+ // is aligned to max alignment of interior.
+ //
+ auto off = C_int(0);
+ auto max_align = C_int(1);
+ auto bcx = cx;
+ for (@ty.t e in elts) {
+ auto elt_align = align_of(bcx, e);
+ bcx = elt_align.bcx;
+ auto elt_size = size_of(bcx, e);
+ bcx = elt_size.bcx;
+ auto aligned_off = align_to(bcx, off, elt_align.val);
+ off = cx.build.Add(aligned_off, elt_size.val);
+ max_align = umax(bcx, max_align, elt_align.val);
+ }
+ off = align_to(bcx, off, max_align);
+ ret res(bcx, off);
+ }
+
alt (t.struct) {
case (ty.ty_param(?p)) {
auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
}
case (ty.ty_tup(?elts)) {
- //
- // C padding rules:
- //
- //
- // - Pad after each element so that next element is aligned.
- // - Pad after final structure member so that whole structure
- // is aligned to max alignment of interior.
- //
- auto off = C_int(0);
- auto max_align = C_int(1);
- auto bcx = cx;
- for (@ty.t e in elts) {
- auto elt_align = align_of(bcx, e);
- bcx = elt_align.bcx;
- auto elt_size = size_of(bcx, e);
- bcx = elt_size.bcx;
- auto aligned_off = align_to(bcx, off, elt_align.val);
- off = cx.build.Add(aligned_off, elt_size.val);
- max_align = umax(bcx, max_align, elt_align.val);
- }
- off = align_to(bcx, off, max_align);
- ret res(bcx, off);
+ ret align_elements(cx, elts);
}
case (ty.ty_rec(?flds)) {
- auto off = C_int(0);
- auto max_align = C_int(1);
- auto bcx = cx;
+ let vec[@ty.t] tys = vec();
for (ty.field f in flds) {
- auto elt_align = align_of(bcx, f.ty);
- bcx = elt_align.bcx;
- auto elt_size = size_of(bcx, f.ty);
- bcx = elt_size.bcx;
- auto aligned_off = align_to(bcx, off, elt_align.val);
- off = cx.build.Add(aligned_off, elt_size.val);
- max_align = umax(bcx, max_align, elt_align.val);
+ tys += vec(f.ty);
}
- off = align_to(bcx, off, max_align);
- ret res(bcx, off);
+ ret align_elements(cx, tys);
+ }
+ case (ty.ty_tag(?tid, ?tps)) {
+ auto bcx = cx;
+
+ // Compute max(variant sizes).
+ let ValueRef max_size = bcx.build.Alloca(T_int());
+ bcx.build.Store(C_int(0), max_size);
+
+ auto variants = tag_variants(bcx.fcx.ccx, tid);
+ for (ast.variant variant in variants) {
+ let vec[@ty.t] tys = variant_types(bcx.fcx.ccx, variant);
+ auto rslt = align_elements(bcx, tys);
+ bcx = rslt.bcx;
+
+ auto this_size = rslt.val;
+ auto old_max_size = bcx.build.Load(max_size);
+ bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
+ }
+
+ auto max_size_val = bcx.build.Load(max_size);
+ auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
+ ret res(bcx, total_size);
}
}
}
@@ -781,10 +1057,10 @@ fn dynamic_align_of(@block_ctxt cx, @ty.t t) -> result {
}
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
-// tuple-like structure (tup, rec, tag) with a static index. This one is
-// driven off ty.struct and knows what to do when it runs into a ty_param
-// stuck in the middle of the thing it's GEP'ing into. Much like size_of and
-// align_of, above.
+// tuple-like structure (tup, rec) with a static index. This one is driven off
+// ty.struct and knows what to do when it runs into a ty_param stuck in the
+// middle of the thing it's GEP'ing into. Much like size_of and align_of,
+// above.
fn GEP_tup_like(@block_ctxt cx, @ty.t t,
ValueRef base, vec[int] ixs) -> result {
@@ -868,38 +1144,86 @@ fn GEP_tup_like(@block_ctxt cx, @ty.t t,
// flattened the incoming structure.
auto s = split_type(t, ixs, 0u);
- auto prefix_ty = ty.plain_ty(ty.ty_tup(s.prefix));
+ auto prefix_ty = plain_ty(ty.ty_tup(s.prefix));
auto bcx = cx;
auto sz = size_of(bcx, prefix_ty);
bcx = sz.bcx;
auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
auto bumped = bcx.build.GEP(raw, vec(sz.val));
- alt (s.target.struct) {
- case (ty.ty_param(_)) { ret res(bcx, bumped); }
- case (_) {
- auto ty = T_ptr(type_of(bcx.fcx.ccx, s.target));
- ret res(bcx, bcx.build.PointerCast(bumped, ty));
+
+ if (ty.type_has_dynamic_size(s.target)) {
+ ret res(bcx, bumped);
+ }
+
+ auto typ = T_ptr(type_of(bcx.fcx.ccx, s.target));
+ ret res(bcx, bcx.build.PointerCast(bumped, typ));
+}
+
+// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
+// This function uses GEP_tup_like() above and automatically performs casts as
+// appropriate. @llblobptr is the data part of a tag value; its actual type is
+// meaningless, as it will be cast away.
+fn GEP_tag(@block_ctxt cx, ValueRef llblobptr, &ast.variant variant, int ix)
+ -> result {
+ // Synthesize a tuple type so that GEP_tup_like() can work its magic.
+ // Separately, store the type of the element we're interested in.
+ auto arg_tys = arg_tys_of_fn(variant.ann);
+ auto elem_ty = ty.plain_ty(ty.ty_nil); // typestate infelicity
+ auto i = 0;
+ let vec[@ty.t] true_arg_tys = vec();
+ for (ty.arg a in arg_tys) {
+ true_arg_tys += vec(a.ty);
+ if (i == ix) {
+ elem_ty = a.ty;
}
+
+ i += 1;
}
+ auto tup_ty = ty.plain_ty(ty.ty_tup(true_arg_tys));
+
+ // Cast the blob pointer to the appropriate type, if we need to (i.e. if
+ // the blob pointer isn't dynamically sized).
+ let ValueRef llunionptr;
+ if (!ty.type_has_dynamic_size(tup_ty)) {
+ auto llty = type_of(cx.fcx.ccx, tup_ty);
+ llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
+ } else {
+ llunionptr = llblobptr;
+ }
+
+ // Do the GEP_tup_like().
+ auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));
+
+ // Cast the result to the appropriate type, if necessary.
+ auto val;
+ if (!ty.type_has_dynamic_size(elem_ty)) {
+ auto llelemty = type_of(rslt.bcx.fcx.ccx, elem_ty);
+ val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
+ } else {
+ val = rslt.val;
+ }
+
+ ret res(rslt.bcx, val);
}
-fn trans_malloc_inner(@block_ctxt cx, TypeRef llptr_ty) -> result {
- auto llbody_ty = lib.llvm.llvm.LLVMGetElementType(llptr_ty);
+fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
+ -> result {
// FIXME: need a table to collect tydesc globals.
auto tydesc = C_int(0);
- auto sz = llsize_of(llbody_ty);
- auto sub = trans_upcall(cx, "upcall_malloc", vec(sz, tydesc));
- sub.val = sub.bcx.build.IntToPtr(sub.val, llptr_ty);
- ret sub;
+ auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc));
+ rslt = res(rslt.bcx, vi2p(cx, rslt.val, llptr_ty));
+ ret rslt;
}
-fn trans_malloc(@block_ctxt cx, @ty.t t) -> result {
- auto scope_cx = find_scope_cx(cx);
- auto llptr_ty = type_of(cx.fcx.ccx, t);
- auto sub = trans_malloc_inner(cx, llptr_ty);
- scope_cx.cleanups += clean(bind drop_ty(_, sub.val, t));
- ret sub;
+fn trans_malloc_boxed(@block_ctxt cx, @ty.t t) -> result {
+ // Synthesize a fake box type structurally so we have something
+ // to measure the size of.
+ auto boxed_body = plain_ty(ty.ty_tup(vec(plain_ty(ty.ty_int), t)));
+ auto box_ptr = plain_ty(ty.ty_box(t));
+ auto sz = size_of(cx, boxed_body);
+ auto llty = type_of(cx.fcx.ccx, box_ptr);
+ ret trans_raw_malloc(sz.bcx, llty, sz.val);
}
@@ -941,6 +1265,7 @@ fn linearize_ty_params(@block_ctxt cx, @ty.t t)
r.defs += pid;
}
}
+ case (_) { }
}
ret t;
}
@@ -960,6 +1285,7 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
// Is the supplied type a type param? If so, return the passed-in tydesc.
alt (ty.type_param(t)) {
case (some[ast.def_id](?id)) {
+ check (cx.fcx.lltydescs.contains_key(id));
ret res(cx, cx.fcx.lltydescs.get(id));
}
case (none[ast.def_id]) { /* fall through */ }
@@ -975,16 +1301,23 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
check (n_params == _vec.len[ValueRef](tys._1));
if (!cx.fcx.ccx.tydescs.contains_key(t)) {
- make_tydesc(cx.fcx.ccx, t, tys._0);
+ declare_tydesc(cx.fcx.ccx, t);
+ define_tydesc(cx.fcx.ccx, t, tys._0);
}
- auto root = cx.fcx.ccx.tydescs.get(t);
+ auto root = cx.fcx.ccx.tydescs.get(t).tydesc;
+
+ auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc(cx.fcx.ccx.tn)),
+ n_params));
- auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc()), n_params));
auto i = 0;
+ auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
+ cx.build.Store(root, tdp);
+ i += 1;
for (ValueRef td in tys._1) {
auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
cx.build.Store(td, tdp);
+ i += 1;
}
auto bcx = cx;
@@ -997,75 +1330,121 @@ fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
vec(p2i(bcx.fcx.ccx.crate_ptr),
sz.val,
align.val,
- C_int(n_params as int),
- bcx.build.PtrToInt(tydescs, T_int())));
+ C_int((1u + n_params) as int),
+ vp2i(bcx, tydescs)));
- ret res(v.bcx, v.bcx.build.IntToPtr(v.val, T_ptr(T_tydesc())));
+ ret res(v.bcx, vi2p(v.bcx, v.val,
+ T_ptr(T_tydesc(cx.fcx.ccx.tn))));
}
// Otherwise, generate a tydesc if necessary, and return it.
if (!cx.fcx.ccx.tydescs.contains_key(t)) {
let vec[ast.def_id] defs = vec();
- make_tydesc(cx.fcx.ccx, t, defs);
+ declare_tydesc(cx.fcx.ccx, t);
+ define_tydesc(cx.fcx.ccx, t, defs);
}
- ret res(cx, cx.fcx.ccx.tydescs.get(t));
+ ret res(cx, cx.fcx.ccx.tydescs.get(t).tydesc);
}
-fn make_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
- auto tg = make_take_glue;
- auto take_glue = make_generic_glue(cx, t, "take", tg, typaram_defs);
- auto dg = make_drop_glue;
- auto drop_glue = make_generic_glue(cx, t, "drop", dg, typaram_defs);
+// Generates the declaration for (but doesn't fill in) a type descriptor. This
+// needs to be separate from make_tydesc() below, because sometimes type glue
+// functions needs to refer to their own type descriptors.
+fn declare_tydesc(@crate_ctxt cx, @ty.t t) {
+ auto take_glue = declare_generic_glue(cx, t, "take");
+ auto drop_glue = declare_generic_glue(cx, t, "drop");
- auto llty = type_of(cx, t);
- auto pvoid = T_ptr(T_i8());
- auto glue_fn_ty = T_ptr(T_fn(vec(T_taskptr(),
- T_ptr(T_ptr(T_tydesc())),
- pvoid), T_void()));
- auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc()))),
- llsize_of(llty),
- llalign_of(llty),
- take_glue, // take_glue_off
- drop_glue, // drop_glue_off
+ auto llsize;
+ auto llalign;
+ if (!ty.type_has_dynamic_size(t)) {
+ auto llty = type_of(cx, t);
+ llsize = llsize_of(llty);
+ llalign = llalign_of(llty);
+ } else {
+ // These will be overwritten as the derived tydesc is generated, so
+ // we create placeholder values.
+ llsize = C_int(0);
+ llalign = C_int(0);
+ }
+
+ auto glue_fn_ty = T_ptr(T_glue_fn(cx.tn));
+
+ // FIXME: this adjustment has to do with the ridiculous encoding of
+ // glue-pointer-constants in the tydesc records: They are tydesc-relative
+ // displacements. This is purely for compatibility with rustboot and
+ // should go when it is discarded.
+ fn off(ValueRef tydescp,
+ ValueRef gluefn) -> ValueRef {
+ ret i2p(llvm.LLVMConstSub(p2i(gluefn), p2i(tydescp)),
+ val_ty(gluefn));
+ }
+
+ auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
+ auto gvar = llvm.LLVMAddGlobal(cx.llmod, T_tydesc(cx.tn),
+ _str.buf(name));
+ auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(cx.tn)))),
+ llsize,
+ llalign,
+ off(gvar, take_glue), // take_glue_off
+ off(gvar, drop_glue), // drop_glue_off
C_null(glue_fn_ty), // free_glue_off
C_null(glue_fn_ty), // sever_glue_off
C_null(glue_fn_ty), // mark_glue_off
C_null(glue_fn_ty), // obj_drop_glue_off
C_null(glue_fn_ty))); // is_stateful
- auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
- auto gvar = llvm.LLVMAddGlobal(cx.llmod, val_ty(tydesc), _str.buf(name));
llvm.LLVMSetInitializer(gvar, tydesc);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
as llvm.Linkage);
- cx.tydescs.insert(t, gvar);
+
+ auto info = rec(
+ tydesc=gvar,
+ take_glue=take_glue,
+ drop_glue=drop_glue
+ );
+
+ cx.tydescs.insert(t, @info);
}
-fn make_generic_glue(@crate_ctxt cx, @ty.t t, str name,
- val_and_ty_fn helper,
- vec[ast.def_id] typaram_defs) -> ValueRef {
- auto llfnty = T_fn(vec(T_taskptr(),
- T_ptr(T_ptr(T_tydesc())),
- T_ptr(T_i8())), T_void());
+// declare_tydesc() above must have been called first.
+fn define_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
+ auto info = cx.tydescs.get(t);
+ auto gvar = info.tydesc;
+
+ auto tg = make_take_glue;
+ auto take_glue = make_generic_glue(cx, t, info.take_glue, tg,
+ typaram_defs);
+ auto dg = make_drop_glue;
+ auto drop_glue = make_generic_glue(cx, t, info.drop_glue, dg,
+ typaram_defs);
+}
- auto fn_name = cx.names.next("_rust_" + name) + "." + ty.ty_to_str(t);
+fn declare_generic_glue(@crate_ctxt cx, @ty.t t, str name) -> ValueRef {
+ auto llfnty = T_glue_fn(cx.tn);
+
+ auto fn_name = cx.names.next("_rust_" + name) + sep() + ty.ty_to_str(t);
fn_name = sanitize(fn_name);
- auto llfn = decl_fastcall_fn(cx.llmod, fn_name, llfnty);
+ ret decl_fastcall_fn(cx.llmod, fn_name, llfnty);
+}
- auto fcx = new_fn_ctxt(cx, fn_name, llfn);
+fn make_generic_glue(@crate_ctxt cx, @ty.t t, ValueRef llfn,
+ val_and_ty_fn helper,
+ vec[ast.def_id] typaram_defs) -> ValueRef {
+ auto fcx = new_fn_ctxt(cx, llfn);
auto bcx = new_top_block_ctxt(fcx);
auto re;
if (!ty.type_is_scalar(t)) {
auto llty;
- if (ty.type_is_structural(t)) {
+ if (ty.type_has_dynamic_size(t)) {
+ llty = T_ptr(T_i8());
+ } else if (ty.type_is_structural(t)) {
llty = T_ptr(type_of(cx, t));
} else {
llty = type_of(cx, t);
}
- auto lltyparams = llvm.LLVMGetParam(llfn, 1u);
+ auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
auto p = 0;
for (ast.def_id d in typaram_defs) {
auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p)));
@@ -1074,7 +1453,7 @@ fn make_generic_glue(@crate_ctxt cx, @ty.t t, str name,
p += 1;
}
- auto llrawptr = llvm.LLVMGetParam(llfn, 2u);
+ auto llrawptr = llvm.LLVMGetParam(llfn, 4u);
auto llval = bcx.build.BitCast(llrawptr, llty);
re = helper(bcx, llval, t);
@@ -1166,16 +1545,12 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
vec(C_int(0),
C_int(abi.box_rc_field_body)));
- auto fields =
- cx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_fields)));
auto tydescptr =
cx.build.GEP(body,
vec(C_int(0),
C_int(abi.obj_body_elt_tydesc)));
- call_tydesc_glue_full(cx, fields, cx.build.Load(tydescptr),
+ call_tydesc_glue_full(cx, body, cx.build.Load(tydescptr),
abi.tydesc_field_drop_glue_off);
// Then free the body.
@@ -1195,7 +1570,7 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
T_int(), C_int(0));
}
- case (ty.ty_fn(_,_)) {
+ case (ty.ty_fn(_,_,_)) {
fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
// Call through the closure's own fields-drop glue first.
@@ -1203,7 +1578,6 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
cx.build.GEP(v,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
-
auto bindings =
cx.build.GEP(body,
vec(C_int(0),
@@ -1241,6 +1615,7 @@ fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
bind drop_ty(_, _, _));
} else if (ty.type_is_scalar(t) ||
+ ty.type_is_native(t) ||
ty.type_is_nil(t)) {
ret res(cx, C_nil());
}
@@ -1294,42 +1669,105 @@ fn decr_refcnt_and_if_zero(@block_ctxt cx,
ret res(next_cx, phi);
}
-fn type_of_variant(@crate_ctxt cx, &ast.variant v) -> TypeRef {
- let vec[TypeRef] lltys = vec();
+// Tag information
+
+fn variant_types(@crate_ctxt cx, &ast.variant v) -> vec[@ty.t] {
+ let vec[@ty.t] tys = vec();
alt (ty.ann_to_type(v.ann).struct) {
- case (ty.ty_fn(?args, _)) {
+ case (ty.ty_fn(_, ?args, _)) {
for (ty.arg arg in args) {
- lltys += vec(type_of(cx, arg.ty));
+ tys += vec(arg.ty);
}
}
+ case (ty.ty_tag(_, _)) { /* nothing */ }
case (_) { fail; }
}
+ ret tys;
+}
+
+fn type_of_variant(@crate_ctxt cx, &ast.variant v) -> TypeRef {
+ let vec[TypeRef] lltys = vec();
+ auto tys = variant_types(cx, v);
+ for (@ty.t typ in tys) {
+ lltys += vec(type_of(cx, typ));
+ }
ret T_struct(lltys);
}
+// Returns the type parameters of a tag.
+fn tag_ty_params(@crate_ctxt cx, ast.def_id id) -> vec[ast.ty_param] {
+ check (cx.items.contains_key(id));
+ alt (cx.items.get(id).node) {
+ case (ast.item_tag(_, _, ?tps, _)) { ret tps; }
+ }
+ fail; // not reached
+}
+
+// Returns the variants in a tag.
+fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[ast.variant] {
+ check (cx.items.contains_key(id));
+ alt (cx.items.get(id).node) {
+ case (ast.item_tag(_, ?variants, _, _)) { ret variants; }
+ }
+ fail; // not reached
+}
+
+// Returns a new plain tag type of the given ID with no type parameters. Don't
+// use this function in new code; it's a hack to keep things working for now.
+fn mk_plain_tag(ast.def_id tid) -> @ty.t {
+ let vec[@ty.t] tps = vec();
+ ret ty.plain_ty(ty.ty_tag(tid, tps));
+}
+
+
+type val_fn = fn(@block_ctxt cx, ValueRef v) -> result;
+
type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, @ty.t t) -> result;
+type val_pair_and_ty_fn =
+ fn(@block_ctxt cx, ValueRef av, ValueRef bv, @ty.t t) -> result;
+
// Iterates through the elements of a structural type.
fn iter_structural_ty(@block_ctxt cx,
ValueRef v,
@ty.t t,
val_and_ty_fn f)
-> result {
+ fn adaptor_fn(val_and_ty_fn f,
+ @block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t) -> result {
+ ret f(cx, av, t);
+ }
+ be iter_structural_ty_full(cx, v, v, t,
+ bind adaptor_fn(f, _, _, _, _));
+}
+
+
+fn iter_structural_ty_full(@block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t,
+ val_pair_and_ty_fn f)
+ -> result {
let result r = res(cx, C_nil());
fn iter_boxpp(@block_ctxt cx,
- ValueRef box_cell,
- val_and_ty_fn f) -> result {
- auto box_ptr = cx.build.Load(box_cell);
- auto tnil = ty.plain_ty(ty.ty_nil);
- auto tbox = ty.plain_ty(ty.ty_box(tnil));
+ ValueRef box_a_cell,
+ ValueRef box_b_cell,
+ val_pair_and_ty_fn f) -> result {
+ auto box_a_ptr = cx.build.Load(box_a_cell);
+ auto box_b_ptr = cx.build.Load(box_b_cell);
+ auto tnil = plain_ty(ty.ty_nil);
+ auto tbox = plain_ty(ty.ty_box(tnil));
auto inner_cx = new_sub_block_ctxt(cx, "iter box");
auto next_cx = new_sub_block_ctxt(cx, "next");
- auto null_test = cx.build.IsNull(box_ptr);
+ auto null_test = cx.build.IsNull(box_a_ptr);
cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);
- auto r = f(inner_cx, box_ptr, tbox);
+ auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
r.bcx.build.Br(next_cx.llbb);
ret res(next_cx, r.val);
}
@@ -1338,9 +1776,13 @@ fn iter_structural_ty(@block_ctxt cx,
case (ty.ty_tup(?args)) {
let int i = 0;
for (@ty.t arg in args) {
- auto elt = r.bcx.build.GEP(v, vec(C_int(0), C_int(i)));
+ r = GEP_tup_like(r.bcx, t, av, vec(0, i));
+ auto elt_a = r.val;
+ r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
+ auto elt_b = r.val;
r = f(r.bcx,
- load_scalar_or_boxed(r.bcx, elt, arg),
+ load_scalar_or_boxed(r.bcx, elt_a, arg),
+ load_scalar_or_boxed(r.bcx, elt_b, arg),
arg);
i += 1;
}
@@ -1348,90 +1790,101 @@ fn iter_structural_ty(@block_ctxt cx,
case (ty.ty_rec(?fields)) {
let int i = 0;
for (ty.field fld in fields) {
- auto llfld = r.bcx.build.GEP(v, vec(C_int(0), C_int(i)));
+ r = GEP_tup_like(r.bcx, t, av, vec(0, i));
+ auto llfld_a = r.val;
+ r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
+ auto llfld_b = r.val;
r = f(r.bcx,
- load_scalar_or_boxed(r.bcx, llfld, fld.ty),
+ load_scalar_or_boxed(r.bcx, llfld_a, fld.ty),
+ load_scalar_or_boxed(r.bcx, llfld_b, fld.ty),
fld.ty);
i += 1;
}
}
- case (ty.ty_tag(?tid)) {
- check (cx.fcx.ccx.tags.contains_key(tid));
- auto info = cx.fcx.ccx.tags.get(tid);
- auto n_variants = _vec.len[tup(ast.def_id,arity)](info.variants);
-
- // Look up the tag in the typechecked AST.
- check (cx.fcx.ccx.items.contains_key(tid));
- auto tag_item = cx.fcx.ccx.items.get(tid);
- let vec[ast.variant] variants = vec(); // FIXME: typestate bug
- alt (tag_item.node) {
- case (ast.item_tag(_, ?vs, _, _)) {
- variants = vs;
- }
- case (_) {
- log "trans: ty_tag doesn't actually refer to a tag";
- fail;
- }
- }
+ case (ty.ty_tag(?tid, ?tps)) {
+ auto variants = tag_variants(cx.fcx.ccx, tid);
+ auto n_variants = _vec.len[ast.variant](variants);
+
+ auto lldiscrim_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(0)));
+ auto llunion_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(1)));
+ auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);
- auto lldiscrim_ptr = cx.build.GEP(v, vec(C_int(0), C_int(0)));
- auto llunion_ptr = cx.build.GEP(v, vec(C_int(0), C_int(1)));
- auto lldiscrim = cx.build.Load(lldiscrim_ptr);
+ auto lldiscrim_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(0)));
+ auto llunion_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(1)));
+ auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
- auto unr_cx = new_sub_block_ctxt(cx, "tag-iter-unr");
+ // NB: we must hit the discriminant first so that structural
+ // comparison know not to proceed when the discriminants differ.
+ auto bcx = cx;
+ bcx = f(bcx, lldiscrim_a, lldiscrim_b,
+ plain_ty(ty.ty_int)).bcx;
+
+ auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
unr_cx.build.Unreachable();
- auto llswitch = cx.build.Switch(lldiscrim, unr_cx.llbb,
- n_variants);
+ auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
+ n_variants);
- auto next_cx = new_sub_block_ctxt(cx, "tag-iter-next");
+ auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
auto i = 0u;
- for (tup(ast.def_id,arity) variant in info.variants) {
- auto variant_cx = new_sub_block_ctxt(cx, "tag-iter-variant-" +
+ for (ast.variant variant in variants) {
+ auto variant_cx = new_sub_block_ctxt(bcx,
+ "tag-iter-variant-" +
_uint.to_str(i, 10u));
llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);
- alt (variant._1) {
- case (n_ary) {
- let vec[ValueRef] vals = vec(C_int(0), C_int(1),
- C_int(i as int));
- auto llvar = variant_cx.build.GEP(v, vals);
- auto llvarty = type_of_variant(cx.fcx.ccx,
- variants.(i));
-
- auto fn_ty = ty.ann_to_type(variants.(i).ann);
- alt (fn_ty.struct) {
- case (ty.ty_fn(?args, _)) {
- auto llvarp = variant_cx.build.
- TruncOrBitCast(llunion_ptr,
- T_ptr(llvarty));
-
- auto j = 0u;
- for (ty.arg a in args) {
- auto v = vec(C_int(0),
- C_int(j as int));
- auto llfldp =
- variant_cx.build.GEP(llvarp, v);
-
- auto llfld =
- load_scalar_or_boxed(variant_cx,
- llfldp, a.ty);
-
- auto res = f(variant_cx, llfld, a.ty);
- variant_cx = res.bcx;
- j += 1u;
- }
+ if (_vec.len[ast.variant_arg](variant.args) > 0u) {
+ // N-ary variant.
+ auto llvarty = type_of_variant(bcx.fcx.ccx, variants.(i));
+
+ auto fn_ty = ty.ann_to_type(variants.(i).ann);
+ alt (fn_ty.struct) {
+ case (ty.ty_fn(_, ?args, _)) {
+ auto llvarp_a = variant_cx.build.
+ TruncOrBitCast(llunion_a_ptr, T_ptr(llvarty));
+
+ auto llvarp_b = variant_cx.build.
+ TruncOrBitCast(llunion_b_ptr, T_ptr(llvarty));
+
+ auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);
+
+ auto j = 0u;
+ for (ty.arg a in args) {
+ auto v = vec(C_int(0), C_int(j as int));
+
+ auto llfldp_a =
+ variant_cx.build.GEP(llvarp_a, v);
+
+ auto llfldp_b =
+ variant_cx.build.GEP(llvarp_b, v);
+
+ auto ty_subst = ty.substitute_ty_params(
+ ty_params, tps, a.ty);
+
+ auto llfld_a =
+ load_scalar_or_boxed(variant_cx,
+ llfldp_a,
+ ty_subst);
+
+ auto llfld_b =
+ load_scalar_or_boxed(variant_cx,
+ llfldp_b,
+ ty_subst);
+
+ auto res = f(variant_cx,
+ llfld_a, llfld_b, ty_subst);
+ variant_cx = res.bcx;
+ j += 1u;
}
- case (_) { fail; }
}
-
- variant_cx.build.Br(next_cx.llbb);
- }
- case (nullary) {
- // Nothing to do.
- variant_cx.build.Br(next_cx.llbb);
+ case (_) { fail; }
}
+
+ variant_cx.build.Br(next_cx.llbb);
+ } else {
+ // Nullary variant; nothing to do.
+ variant_cx.build.Br(next_cx.llbb);
}
i += 1u;
@@ -1439,27 +1892,96 @@ fn iter_structural_ty(@block_ctxt cx,
ret res(next_cx, C_nil());
}
- case (ty.ty_fn(_,_)) {
- auto box_cell =
- cx.build.GEP(v,
+ case (ty.ty_fn(_,_,_)) {
+ auto box_cell_a =
+ cx.build.GEP(av,
+ vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ auto box_cell_b =
+ cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.fn_field_box)));
- ret iter_boxpp(cx, box_cell, f);
+ ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (ty.ty_obj(_)) {
- auto box_cell =
- cx.build.GEP(v,
+ auto box_cell_a =
+ cx.build.GEP(av,
+ vec(C_int(0),
+ C_int(abi.obj_field_box)));
+ auto box_cell_b =
+ cx.build.GEP(bv,
vec(C_int(0),
C_int(abi.obj_field_box)));
- ret iter_boxpp(cx, box_cell, f);
+ ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (_) {
- cx.fcx.ccx.sess.unimpl("type in iter_structural_ty");
+ cx.fcx.ccx.sess.unimpl("type in iter_structural_ty_full");
}
}
ret r;
}
+// Iterates through a pointer range, until the src* hits the src_lim*.
+fn iter_sequence_raw(@block_ctxt cx,
+ ValueRef src, // elt*
+ ValueRef src_lim, // elt*
+ ValueRef elt_sz,
+ val_fn f) -> result {
+
+ auto bcx = cx;
+
+ let ValueRef src_int = vp2i(bcx, src);
+ let ValueRef src_lim_int = vp2i(bcx, src_lim);
+
+ auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
+ auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
+ auto next_cx = new_sub_block_ctxt(cx, "next");
+
+ bcx.build.Br(cond_cx.llbb);
+
+ let ValueRef src_curr = cond_cx.build.Phi(T_int(),
+ vec(src_int), vec(bcx.llbb));
+
+ auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
+ src_curr, src_lim_int);
+
+ cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
+
+ auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
+
+ auto body_res = f(body_cx, src_curr_ptr);
+ body_cx = body_res.bcx;
+
+ auto src_next = body_cx.build.Add(src_curr, elt_sz);
+ body_cx.build.Br(cond_cx.llbb);
+
+ cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
+ vec(body_cx.llbb));
+
+ ret res(next_cx, C_nil());
+}
+
+
+fn iter_sequence_inner(@block_ctxt cx,
+ ValueRef src, // elt*
+ ValueRef src_lim, // elt*
+ @ty.t elt_ty,
+ val_and_ty_fn f) -> result {
+ fn adaptor_fn(val_and_ty_fn f,
+ @ty.t elt_ty,
+ @block_ctxt cx,
+ ValueRef v) -> result {
+ auto llty = type_of(cx.fcx.ccx, elt_ty);
+ auto p = cx.build.PointerCast(v, T_ptr(llty));
+ ret f(cx, load_scalar_or_boxed(cx, p, elt_ty), elt_ty);
+ }
+
+ auto elt_sz = size_of(cx, elt_ty);
+ be iter_sequence_raw(elt_sz.bcx, src, src_lim, elt_sz.val,
+ bind adaptor_fn(f, elt_ty, _, _));
+}
+
+
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx,
ValueRef v,
@@ -1479,43 +2001,18 @@ fn iter_sequence(@block_ctxt cx,
auto llunit_ty = type_of(cx.fcx.ccx, elt_ty);
auto bcx = cx;
- auto unit_sz = size_of(bcx, elt_ty);
- bcx = unit_sz.bcx;
auto len = bcx.build.Load(lenptr);
if (trailing_null) {
+ auto unit_sz = size_of(bcx, elt_ty);
+ bcx = unit_sz.bcx;
len = bcx.build.Sub(len, unit_sz.val);
}
- auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
- auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
- auto next_cx = new_sub_block_ctxt(cx, "next");
-
- bcx.build.Br(cond_cx.llbb);
-
- auto ix = cond_cx.build.Phi(T_int(), vec(C_int(0)), vec(cx.llbb));
- auto scaled_ix = cond_cx.build.Phi(T_int(),
- vec(C_int(0)), vec(cx.llbb));
+ auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
+ T_ptr(llunit_ty));
- auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntNE,
- scaled_ix, len);
- cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
-
- auto elt = body_cx.build.GEP(p0, vec(C_int(0), ix));
- auto body_res = f(body_cx,
- load_scalar_or_boxed(body_cx, elt, elt_ty),
- elt_ty);
- auto next_ix = body_res.bcx.build.Add(ix, C_int(1));
- auto next_scaled_ix = body_res.bcx.build.Add(scaled_ix, unit_sz.val);
-
- cond_cx.build.AddIncomingToPhi(ix, vec(next_ix),
- vec(body_res.bcx.llbb));
-
- cond_cx.build.AddIncomingToPhi(scaled_ix, vec(next_scaled_ix),
- vec(body_res.bcx.llbb));
-
- body_res.bcx.build.Br(cond_cx.llbb);
- ret res(next_cx, C_nil());
+ ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
}
alt (t.struct) {
@@ -1523,7 +2020,7 @@ fn iter_sequence(@block_ctxt cx,
ret iter_sequence_body(cx, v, et, f, false);
}
case (ty.ty_str) {
- auto et = ty.plain_ty(ty.ty_machine(common.ty_u8));
+ auto et = plain_ty(ty.ty_machine(common.ty_u8));
ret iter_sequence_body(cx, v, et, f, true);
}
case (_) { fail; }
@@ -1541,7 +2038,20 @@ fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
lltydescs = cx.build.Load(lltydescs);
auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
auto llfn = cx.build.Load(llfnptr);
- cx.build.FastCall(llfn, vec(cx.fcx.lltaskptr, lltydescs, llrawptr));
+
+ // FIXME: this adjustment has to do with the ridiculous encoding of
+ // glue-pointer-constants in the tydesc records: They are tydesc-relative
+ // displacements. This is purely for compatibility with rustboot and
+ // should go when it is discarded.
+ llfn = vi2p(cx, cx.build.Add(vp2i(cx, llfn),
+ vp2i(cx, tydesc)),
+ val_ty(llfn));
+
+ cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
+ cx.fcx.lltaskptr,
+ C_null(T_ptr(T_nil())),
+ lltydescs,
+ llrawptr));
}
fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
@@ -1552,7 +2062,6 @@ fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
fn incr_all_refcnts(@block_ctxt cx,
ValueRef v,
@ty.t t) -> result {
-
if (!ty.type_is_scalar(t)) {
call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue_off);
}
@@ -1625,7 +2134,7 @@ fn copy_ty(@block_ctxt cx,
ValueRef dst,
ValueRef src,
@ty.t t) -> result {
- if (ty.type_is_scalar(t)) {
+ if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
ret res(cx, cx.build.Store(src, dst));
} else if (ty.type_is_nil(t)) {
@@ -1744,7 +2253,11 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
case (ast.box) {
auto e_ty = ty.expr_ty(e);
auto e_val = sub.val;
- sub = trans_malloc(sub.bcx, node_ann_type(sub.bcx.fcx.ccx, a));
+ auto box_ty = node_ann_type(sub.bcx.fcx.ccx, a);
+ sub = trans_malloc_boxed(sub.bcx, e_ty);
+ find_scope_cx(cx).cleanups +=
+ clean(bind drop_ty(_, sub.val, box_ty));
+
auto box = sub.val;
auto rc = sub.bcx.build.GEP(box,
vec(C_int(0),
@@ -1753,6 +2266,15 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
vec(C_int(0),
C_int(abi.box_rc_field_body)));
sub.bcx.build.Store(C_int(1), rc);
+
+ // Cast the body type to the type of the value. This is needed to
+ // make tags work, since tags have a different LLVM type depending
+ // on whether they're boxed or not.
+ if (!ty.type_has_dynamic_size(e_ty)) {
+ auto llety = T_ptr(type_of(sub.bcx.fcx.ccx, e_ty));
+ body = sub.bcx.build.PointerCast(body, llety);
+ }
+
sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
ret res(sub.bcx, box);
}
@@ -1767,41 +2289,241 @@ fn trans_unary(@block_ctxt cx, ast.unop op,
}
ret res(sub.bcx, val);
}
+ case (ast._mutable) {
+ ret trans_expr(cx, e);
+ }
}
fail;
}
-fn trans_eager_binop(@block_ctxt cx, ast.binop op,
- ValueRef lhs, ValueRef rhs) -> ValueRef {
+fn trans_compare(@block_ctxt cx, ast.binop op, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ if (ty.type_is_scalar(t)) {
+ ret res(cx, trans_scalar_compare(cx, op, t, lhs, rhs));
+
+ } else if (ty.type_is_structural(t)) {
+ auto scx = new_sub_block_ctxt(cx, "structural compare start");
+ auto next = new_sub_block_ctxt(cx, "structural compare end");
+ cx.build.Br(scx.llbb);
+
+ /*
+ * We're doing lexicographic comparison here. We start with the
+ * assumption that the two input elements are equal. Depending on
+ * operator, this means that the result is either true or false;
+ * equality produces 'true' for ==, <= and >=. It produces 'false' for
+ * !=, < and >.
+ *
+ * We then move one element at a time through the structure checking
+ * for pairwise element equality. If we have equality, our assumption
+ * about overall sequence equality is not modified, so we have to move
+ * to the next element.
+ *
+ * If we do not have pairwise element equality, we have reached an
+ * element that 'decides' the lexicographic comparison. So we exit the
+ * loop with a flag that indicates the true/false sense of that
+ * decision, by testing the element again with the operator we're
+ * interested in.
+ *
+ * When we're lucky, LLVM should be able to fold some of these two
+ * tests together (as they're applied to the same operands and in some
+ * cases are sometimes redundant). But we don't bother trying to
+ * optimize combinations like that, at this level.
+ */
+
+ auto flag = scx.build.Alloca(T_i1());
+
+ alt (op) {
+ // ==, <= and >= default to true if they find == all the way.
+ case (ast.eq) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (ast.le) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (ast.ge) { scx.build.Store(C_integral(1, T_i1()), flag); }
+ case (_) {
+ // ==, <= and >= default to false if they find == all the way.
+ scx.build.Store(C_integral(0, T_i1()), flag);
+ }
+ }
+
+ fn inner(@block_ctxt last_cx,
+ ValueRef flag,
+ ast.binop op,
+ @block_ctxt cx,
+ ValueRef av,
+ ValueRef bv,
+ @ty.t t) -> result {
+
+ auto cnt_cx = new_sub_block_ctxt(cx, "continue comparison");
+ auto stop_cx = new_sub_block_ctxt(cx, "stop comparison");
+
+ // First 'eq' comparison: if so, continue to next elts.
+ auto eq_r = trans_compare(cx, ast.eq, t, av, bv);
+ eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
+
+ // Second 'op' comparison: find out how this elt-pair decides.
+ auto stop_r = trans_compare(stop_cx, op, t, av, bv);
+ stop_r.bcx.build.Store(stop_r.val, flag);
+ stop_r.bcx.build.Br(last_cx.llbb);
+ ret res(cnt_cx, C_nil());
+ }
+
+ auto r = iter_structural_ty_full(scx, lhs, rhs, t,
+ bind inner(next, flag, op,
+ _, _, _, _));
+
+ r.bcx.build.Br(next.llbb);
+ auto v = next.build.Load(flag);
+ ret res(next, v);
+
+ } else {
+ // FIXME: compare vec, str, box?
+ cx.fcx.ccx.sess.unimpl("type in trans_compare");
+ ret res(cx, C_bool(false));
+ }
+}
+
+fn trans_scalar_compare(@block_ctxt cx, ast.binop op, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ if (ty.type_is_fp(t)) {
+ ret trans_fp_compare(cx, op, t, lhs, rhs);
+ } else {
+ ret trans_integral_compare(cx, op, t, lhs, rhs);
+ }
+}
+
+fn trans_fp_compare(@block_ctxt cx, ast.binop op, @ty.t fptype,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ auto cmp = lib.llvm.LLVMIntEQ;
alt (op) {
- case (ast.add) { ret cx.build.Add(lhs, rhs); }
- case (ast.sub) { ret cx.build.Sub(lhs, rhs); }
-
- // FIXME (issue #57): switch by signedness.
- case (ast.mul) { ret cx.build.Mul(lhs, rhs); }
- case (ast.div) { ret cx.build.SDiv(lhs, rhs); }
- case (ast.rem) { ret cx.build.SRem(lhs, rhs); }
-
- case (ast.bitor) { ret cx.build.Or(lhs, rhs); }
- case (ast.bitand) { ret cx.build.And(lhs, rhs); }
- case (ast.bitxor) { ret cx.build.Xor(lhs, rhs); }
- case (ast.lsl) { ret cx.build.Shl(lhs, rhs); }
- case (ast.lsr) { ret cx.build.LShr(lhs, rhs); }
- case (ast.asr) { ret cx.build.AShr(lhs, rhs); }
- case (_) {
- auto cmp = lib.llvm.LLVMIntEQ;
- alt (op) {
- case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
- case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }
-
- // FIXME (issue #57): switch by signedness.
- case (ast.lt) { cmp = lib.llvm.LLVMIntSLT; }
- case (ast.le) { cmp = lib.llvm.LLVMIntSLE; }
- case (ast.ge) { cmp = lib.llvm.LLVMIntSGE; }
- case (ast.gt) { cmp = lib.llvm.LLVMIntSGT; }
+ // FIXME: possibly use the unordered-or-< predicates here,
+ // for now we're only going with ordered-and-< style (no NaNs).
+ case (ast.eq) { cmp = lib.llvm.LLVMRealOEQ; }
+ case (ast.ne) { cmp = lib.llvm.LLVMRealONE; }
+ case (ast.lt) { cmp = lib.llvm.LLVMRealOLT; }
+ case (ast.gt) { cmp = lib.llvm.LLVMRealOGT; }
+ case (ast.le) { cmp = lib.llvm.LLVMRealOLE; }
+ case (ast.ge) { cmp = lib.llvm.LLVMRealOGE; }
+ }
+
+ ret cx.build.FCmp(cmp, lhs, rhs);
+}
+
+fn trans_integral_compare(@block_ctxt cx, ast.binop op, @ty.t intype,
+ ValueRef lhs, ValueRef rhs) -> ValueRef {
+ auto cmp = lib.llvm.LLVMIntEQ;
+ alt (op) {
+ case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
+ case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }
+
+ case (ast.lt) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSLT;
+ } else {
+ cmp = lib.llvm.LLVMIntULT;
+ }
+ }
+ case (ast.le) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSLE;
+ } else {
+ cmp = lib.llvm.LLVMIntULE;
+ }
+ }
+ case (ast.gt) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSGT;
+ } else {
+ cmp = lib.llvm.LLVMIntUGT;
+ }
+ }
+ case (ast.ge) {
+ if (ty.type_is_signed(intype)) {
+ cmp = lib.llvm.LLVMIntSGE;
+ } else {
+ cmp = lib.llvm.LLVMIntUGE;
}
- ret cx.build.ICmp(cmp, lhs, rhs);
+ }
+ }
+ ret cx.build.ICmp(cmp, lhs, rhs);
+}
+
+fn trans_vec_append(@block_ctxt cx, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ auto elt_ty = ty.sequence_element_type(t);
+
+ auto skip_null = C_bool(false);
+ alt (t.struct) {
+ case (ty.ty_str) { skip_null = C_bool(true); }
+ case (_) { }
+ }
+
+ auto bcx = cx;
+
+ auto llvec_tydesc = get_tydesc(bcx, t);
+ bcx = llvec_tydesc.bcx;
+
+ auto llelt_tydesc = get_tydesc(bcx, elt_ty);
+ bcx = llelt_tydesc.bcx;
+
+ auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
+ auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());
+
+ ret res(bcx, bcx.build.FastCall(cx.fcx.ccx.glues.vec_append_glue,
+ vec(cx.fcx.lltaskptr,
+ llvec_tydesc.val,
+ llelt_tydesc.val,
+ dst, src, skip_null)));
+}
+
+fn trans_vec_add(@block_ctxt cx, @ty.t t,
+ ValueRef lhs, ValueRef rhs) -> result {
+ auto r = alloc_ty(cx, t);
+ auto tmp = r.val;
+ r = copy_ty(r.bcx, INIT, tmp, lhs, t);
+ auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
+ tmp = load_scalar_or_boxed(bcx, tmp, t);
+ find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tmp, t));
+ ret res(bcx, tmp);
+}
+
+
+fn trans_eager_binop(@block_ctxt cx, ast.binop op, @ty.t intype,
+ ValueRef lhs, ValueRef rhs) -> result {
+
+ alt (op) {
+ case (ast.add) {
+ if (ty.type_is_sequence(intype)) {
+ ret trans_vec_add(cx, intype, lhs, rhs);
+ }
+ ret res(cx, cx.build.Add(lhs, rhs));
+ }
+ case (ast.sub) { ret res(cx, cx.build.Sub(lhs, rhs)); }
+
+ case (ast.mul) { ret res(cx, cx.build.Mul(lhs, rhs)); }
+ case (ast.div) {
+ if (ty.type_is_signed(intype)) {
+ ret res(cx, cx.build.SDiv(lhs, rhs));
+ } else {
+ ret res(cx, cx.build.UDiv(lhs, rhs));
+ }
+ }
+ case (ast.rem) {
+ if (ty.type_is_signed(intype)) {
+ ret res(cx, cx.build.SRem(lhs, rhs));
+ } else {
+ ret res(cx, cx.build.URem(lhs, rhs));
+ }
+ }
+
+ case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
+ case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
+ case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
+ case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
+ case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
+ case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
+ case (_) {
+ ret trans_compare(cx, op, intype, lhs, rhs);
}
}
fail;
@@ -1827,6 +2549,21 @@ fn autoderef(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
}
}
+fn autoderefed_ty(@ty.t t) -> @ty.t {
+ let @ty.t t1 = t;
+
+ while (true) {
+ alt (t1.struct) {
+ case (ty.ty_box(?inner)) {
+ t1 = inner;
+ }
+ case (_) {
+ ret t1;
+ }
+ }
+ }
+}
+
fn trans_binary(@block_ctxt cx, ast.binop op,
@ast.expr a, @ast.expr b) -> result {
@@ -1876,11 +2613,14 @@ fn trans_binary(@block_ctxt cx, ast.binop op,
case (_) {
// Remaining cases are eager:
auto lhs = trans_expr(cx, a);
- lhs = autoderef(lhs.bcx, lhs.val, ty.expr_ty(a));
+ auto lhty = ty.expr_ty(a);
+ lhs = autoderef(lhs.bcx, lhs.val, lhty);
auto rhs = trans_expr(lhs.bcx, b);
- rhs = autoderef(rhs.bcx, rhs.val, ty.expr_ty(b));
- ret res(rhs.bcx, trans_eager_binop(rhs.bcx, op,
- lhs.val, rhs.val));
+ auto rhty = ty.expr_ty(b);
+ rhs = autoderef(rhs.bcx, rhs.val, rhty);
+ ret trans_eager_binop(rhs.bcx, op,
+ autoderefed_ty(lhty),
+ lhs.val, rhs.val);
}
}
fail;
@@ -1983,6 +2723,7 @@ fn trans_for(@block_ctxt cx,
cx.build.Br(scope_cx.llbb);
auto local_res = alloc_local(scope_cx, local);
auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
+ scope_cx.cleanups += clean(bind drop_slot(_, local_res.val, t));
bcx = trans_block(bcx, body).bcx;
bcx.build.Br(next_cx.llbb);
ret res(next_cx, C_nil());
@@ -2002,6 +2743,103 @@ fn trans_for(@block_ctxt cx,
bind inner(_, local, _, _, body));
}
+fn trans_for_each(@block_ctxt cx,
+ @ast.decl decl,
+ @ast.expr seq,
+ &ast.block body) -> result {
+
+ /*
+ * The translation is a little .. complex here. Code like:
+ *
+ * let ty1 p = ...;
+ *
+ * let ty1 q = ...;
+ *
+ * foreach (ty v in foo(a,b)) { body(p,q,v) }
+ *
+ *
+ * Turns into a something like so (C/Rust mishmash):
+ *
+ * type env = { *ty1 p, *ty2 q, ... };
+ *
+ * let env e = { &p, &q, ... };
+ *
+ * fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
+ *
+ * foo([foreach123_body, env*], a, b);
+ *
+ */
+
+ // Step 1: walk body and figure out which references it makes
+ // escape. This could be determined upstream, and probably ought
+ // to be so, eventualy. For first cut, skip this. Null env.
+
+ auto env_ty = T_opaque_closure_ptr(cx.fcx.ccx.tn);
+
+
+ // Step 2: Declare foreach body function.
+
+ // FIXME: possibly support alias-mode here?
+ auto decl_ty = plain_ty(ty.ty_nil);
+ alt (decl.node) {
+ case (ast.decl_local(?local)) {
+ decl_ty = node_ann_type(cx.fcx.ccx, local.ann);
+ }
+ }
+
+ let str s =
+ cx.fcx.ccx.names.next("_rust_foreach")
+ + sep() + cx.fcx.ccx.path;
+
+ // The 'env' arg entering the body function is a fake env member (as in
+ // the env-part of the normal rust calling convention) that actually
+ // points to a stack allocated env in this frame. We bundle that env
+ // pointer along with the foreach-body-fn pointer into a 'normal' fn pair
+ // and pass it in as a first class fn-arg to the iterator.
+
+ auto iter_body_llty = type_of_fn_full(cx.fcx.ccx, ast.proto_fn,
+ none[TypeRef],
+ vec(rec(mode=ast.val, ty=decl_ty)),
+ plain_ty(ty.ty_nil));
+
+ let ValueRef lliterbody = decl_fastcall_fn(cx.fcx.ccx.llmod,
+ s, iter_body_llty);
+
+ // FIXME: handle ty params properly.
+ let vec[ast.ty_param] ty_params = vec();
+
+ auto fcx = new_fn_ctxt(cx.fcx.ccx, lliterbody);
+ auto bcx = new_top_block_ctxt(fcx);
+
+ // FIXME: populate lllocals from llenv here.
+ auto res = trans_block(bcx, body);
+ res.bcx.build.RetVoid();
+
+
+ // Step 3: Call iter passing [lliterbody, llenv], plus other args.
+
+ alt (seq.node) {
+
+ case (ast.expr_call(?f, ?args, ?ann)) {
+
+ auto pair = cx.build.Alloca(T_fn_pair(cx.fcx.ccx.tn,
+ iter_body_llty));
+ auto code_cell = cx.build.GEP(pair,
+ vec(C_int(0),
+ C_int(abi.fn_field_code)));
+ cx.build.Store(lliterbody, code_cell);
+
+ // log "lliterbody: " + val_str(cx.fcx.ccx.tn, lliterbody);
+ ret trans_call(cx, f,
+ some[ValueRef](cx.build.Load(pair)),
+ args,
+ ann);
+ }
+ }
+ fail;
+}
+
+
fn trans_while(@block_ctxt cx, @ast.expr cond,
&ast.block body) -> result {
@@ -2061,17 +2899,29 @@ fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
+
+ case (ast.pat_lit(?lt, ?ann)) {
+ auto lllit = trans_lit(cx.fcx.ccx, *lt, ann);
+ auto lltype = ty.ann_to_type(ann);
+ auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
+
+ auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
+ lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
+ ret res(matched_cx, llval);
+ }
+
case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
auto lltagptr = cx.build.GEP(llval, vec(C_int(0), C_int(0)));
auto lltag = cx.build.Load(lltagptr);
auto vdef = option.get[ast.variant_def](vdef_opt);
auto variant_id = vdef._1;
- auto tinfo = cx.fcx.ccx.tags.get(vdef._0);
auto variant_tag = 0;
+
+ auto variants = tag_variants(cx.fcx.ccx, vdef._0);
auto i = 0;
- for (tup(ast.def_id,arity) vinfo in tinfo.variants) {
- auto this_variant_id = vinfo._0;
+ for (ast.variant v in variants) {
+ auto this_variant_id = v.id;
if (variant_id._0 == this_variant_id._0 &&
variant_id._1 == this_variant_id._1) {
variant_tag = i;
@@ -2113,6 +2963,7 @@ fn trans_pat_binding(@block_ctxt cx, @ast.pat pat, ValueRef llval)
-> result {
alt (pat.node) {
case (ast.pat_wild(_)) { ret res(cx, llval); }
+ case (ast.pat_lit(_, _)) { ret res(cx, llval); }
case (ast.pat_bind(?id, ?def_id, ?ann)) {
auto ty = node_ann_type(cx.fcx.ccx, ann);
auto llty = type_of(cx.fcx.ccx, ty);
@@ -2204,6 +3055,34 @@ fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
llobj=none[ValueRef]);
}
+fn lval_generic_fn(@block_ctxt cx,
+ ty.ty_params_and_ty tpt,
+ ast.def_id fn_id,
+ &ast.ann ann)
+ -> lval_result {
+
+ check (cx.fcx.ccx.fn_pairs.contains_key(fn_id));
+ auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(fn_id));
+ auto monoty = node_ann_type(cx.fcx.ccx, ann);
+ auto tys = ty.resolve_ty_params(tpt, monoty);
+
+ if (_vec.len[@ty.t](tys) != 0u) {
+ auto bcx = cx;
+ let vec[ValueRef] tydescs = vec();
+ for (@ty.t t in tys) {
+ auto td = get_tydesc(bcx, t);
+ bcx = td.bcx;
+ append[ValueRef](tydescs, td.val);
+ }
+ auto gen = rec( item_type = tpt._1,
+ tydescs = tydescs );
+ lv = rec(res = res(bcx, lv.res.val),
+ generic = some[generic_info](gen)
+ with lv);
+ }
+ ret lv;
+}
+
fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
&ast.ann ann) -> lval_result {
alt (dopt) {
@@ -2226,49 +3105,60 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
ret lval_mem(cx, cx.fcx.llobjfields.get(did));
}
case (ast.def_fn(?did)) {
- check (cx.fcx.ccx.fn_pairs.contains_key(did));
- check (cx.fcx.ccx.item_ids.contains_key(did));
-
+ check (cx.fcx.ccx.items.contains_key(did));
auto fn_item = cx.fcx.ccx.items.get(did);
- auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(did));
- auto monoty = node_ann_type(cx.fcx.ccx, ann);
- auto tys = ty.resolve_ty_params(fn_item, monoty);
-
- if (_vec.len[@ty.t](tys) != 0u) {
- auto bcx = cx;
- let vec[ValueRef] tydescs = vec();
- for (@ty.t t in tys) {
- auto td = get_tydesc(bcx, t);
- bcx = td.bcx;
- append[ValueRef](tydescs, td.val);
- }
- auto gen = rec( item_type = ty.item_ty(fn_item)._1,
- tydescs = tydescs );
- lv = rec(res = res(bcx, lv.res.val),
- generic = some[generic_info](gen)
- with lv);
- }
-
- ret lv;
+ ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
}
case (ast.def_obj(?did)) {
- check (cx.fcx.ccx.fn_pairs.contains_key(did));
- ret lval_val(cx, cx.fcx.ccx.fn_pairs.get(did));
+ check (cx.fcx.ccx.items.contains_key(did));
+ auto fn_item = cx.fcx.ccx.items.get(did);
+ ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
}
case (ast.def_variant(?tid, ?vid)) {
- check (cx.fcx.ccx.tags.contains_key(tid));
if (cx.fcx.ccx.fn_pairs.contains_key(vid)) {
- ret lval_val(cx, cx.fcx.ccx.fn_pairs.get(vid));
+ check (cx.fcx.ccx.items.contains_key(tid));
+ auto tag_item = cx.fcx.ccx.items.get(tid);
+ auto params = ty.item_ty(tag_item)._0;
+ auto fty = plain_ty(ty.ty_nil);
+ alt (tag_item.node) {
+ case (ast.item_tag(_, ?variants, _, _)) {
+ for (ast.variant v in variants) {
+ if (v.id == vid) {
+ fty = node_ann_type(cx.fcx.ccx,
+ v.ann);
+ }
+ }
+ }
+ }
+ ret lval_generic_fn(cx, tup(params, fty), vid, ann);
} else {
- // Nullary variants are just scalar constants.
- check (cx.fcx.ccx.item_ids.contains_key(vid));
- ret lval_val(cx, cx.fcx.ccx.item_ids.get(vid));
+ // Nullary variant.
+ auto tag_ty = node_ann_type(cx.fcx.ccx, ann);
+ auto lldiscrim_gv = cx.fcx.ccx.discrims.get(vid);
+ auto lldiscrim = cx.build.Load(lldiscrim_gv);
+
+ auto alloc_result = alloc_ty(cx, tag_ty);
+ auto lltagblob = alloc_result.val;
+ auto lltagptr = alloc_result.bcx.build.PointerCast(
+ lltagblob, T_ptr(type_of(cx.fcx.ccx, tag_ty)));
+
+ auto lldiscrimptr = alloc_result.bcx.build.GEP(
+ lltagptr, vec(C_int(0), C_int(0)));
+ alloc_result.bcx.build.Store(lldiscrim, lldiscrimptr);
+
+ ret lval_val(alloc_result.bcx, lltagptr);
}
}
case (ast.def_const(?did)) {
check (cx.fcx.ccx.consts.contains_key(did));
ret lval_mem(cx, cx.fcx.ccx.consts.get(did));
}
+ case (ast.def_native_fn(?did)) {
+ check (cx.fcx.ccx.native_items.contains_key(did));
+ auto fn_item = cx.fcx.ccx.native_items.get(did);
+ ret lval_generic_fn(cx, ty.native_item_ty(fn_item),
+ did, ann);
+ }
case (_) {
cx.fcx.ccx.sess.unimpl("def variant in trans");
}
@@ -2283,11 +3173,10 @@ fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
fn trans_field(@block_ctxt cx, &ast.span sp, @ast.expr base,
&ast.ident field, &ast.ann ann) -> lval_result {
- auto lv = trans_lval(cx, base);
- auto r = lv.res;
- r = autoderef(r.bcx, r.val, ty.expr_ty(base));
- check (lv.is_mem);
+ auto r = trans_expr(cx, base);
auto t = ty.expr_ty(base);
+ r = autoderef(r.bcx, r.val, t);
+ t = autoderefed_ty(t);
alt (t.struct) {
case (ty.ty_tup(?fields)) {
let uint ix = ty.field_num(cx.fcx.ccx.sess, sp, field);
@@ -2325,10 +3214,23 @@ fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
auto v = lv.val;
auto bcx = ix.bcx;
+ // Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
+ auto ix_val;
+ auto ix_size = llsize_of_real(cx.fcx.ccx, val_ty(ix.val));
+ auto int_size = llsize_of_real(cx.fcx.ccx, T_int());
+ if (ix_size < int_size) {
+ ix_val = bcx.build.ZExt(ix.val, T_int());
+ } else if (ix_size > int_size) {
+ ix_val = bcx.build.Trunc(ix.val, T_int());
+ } else {
+ ix_val = ix.val;
+ }
+
auto llunit_ty = node_type(cx.fcx.ccx, ann);
auto unit_sz = size_of(bcx, node_ann_type(cx.fcx.ccx, ann));
bcx = unit_sz.bcx;
- auto scaled_ix = bcx.build.Mul(ix.val, unit_sz.val);
+
+ auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
lim = bcx.build.Load(lim);
@@ -2345,7 +3247,7 @@ fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
fail_res.bcx.build.Br(next_cx.llbb);
auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
- auto elt = next_cx.build.GEP(body, vec(C_int(0), ix.val));
+ auto elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
ret lval_mem(next_cx, elt);
}
@@ -2400,125 +3302,24 @@ fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
ret e_res;
}
-
-// NB: this must match type_of_fn_full and create_llargs_for_fn_args.
-fn trans_args(@block_ctxt cx,
- ValueRef llclosure,
- option.t[ValueRef] llobj,
- option.t[generic_info] gen,
- &vec[@ast.expr] es,
- @ty.t fn_ty)
- -> tup(@block_ctxt, vec[ValueRef], option.t[ValueRef]) {
- let vec[ValueRef] vs = vec(cx.fcx.lltaskptr);
- let @block_ctxt bcx = cx;
-
- let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
-
- let option.t[ValueRef] llretslot_opt = none[ValueRef];
-
- alt (gen) {
- case (some[generic_info](?g)) {
- for (ValueRef t in g.tydescs) {
- vs += t;
- }
- args = ty.ty_fn_args(g.item_type);
- if (ty.type_has_dynamic_size(ty.ty_fn_ret(g.item_type))) {
- auto retty = ty.ty_fn_ret(fn_ty);
- auto llretty = type_of(cx.fcx.ccx, retty);
- auto llretslot = cx.build.Alloca(llretty);
- vs += cx.build.PointerCast(llretslot, T_ptr(T_i8()));
- llretslot_opt = some[ValueRef](llretslot);
- }
- }
- case (_) { }
- }
-
- alt (llobj) {
- case (some[ValueRef](?ob)) {
- // Every object is always found in memory,
- // and not-yet-loaded (as part of an lval x.y
- // doted method-call).
- vs += cx.build.Load(ob);
- }
- case (_) {
- vs += llclosure;
- }
- }
-
- auto i = 0u;
- for (@ast.expr e in es) {
- auto mode = args.(i).mode;
-
- auto val;
- if (ty.type_is_structural(ty.expr_ty(e))) {
- auto re = trans_expr(bcx, e);
- val = re.val;
- bcx = re.bcx;
- if (mode == ast.val) {
- // Until here we've been treating structures by pointer;
- // we are now passing it as an arg, so need to load it.
- val = bcx.build.Load(val);
- }
- } else if (mode == ast.alias) {
- let lval_result lv;
- if (ty.is_lval(e)) {
- lv = trans_lval(bcx, e);
- } else {
- auto r = trans_expr(bcx, e);
- lv = lval_val(r.bcx, r.val);
- }
- bcx = lv.res.bcx;
-
- if (lv.is_mem) {
- val = lv.res.val;
- } else {
- // Non-mem but we're trying to alias; synthesize an
- // alloca, spill to it and pass its address.
- auto llty = val_ty(lv.res.val);
- auto llptr = lv.res.bcx.build.Alloca(llty);
- lv.res.bcx.build.Store(lv.res.val, llptr);
- val = llptr;
- }
-
- } else {
- auto re = trans_expr(bcx, e);
- val = re.val;
- bcx = re.bcx;
- }
-
- if (ty.type_has_dynamic_size(args.(i).ty)) {
- val = bcx.build.PointerCast(val, T_typaram_ptr());
- }
-
- vs += val;
- i += 1u;
- }
-
- ret tup(bcx, vs, llretslot_opt);
-}
-
fn trans_bind_thunk(@crate_ctxt cx,
@ty.t incoming_fty,
@ty.t outgoing_fty,
vec[option.t[@ast.expr]] args,
TypeRef llclosure_ty,
- vec[@ty.t] bound_tys) -> ValueRef {
+ vec[@ty.t] bound_tys,
+ uint ty_param_count) -> ValueRef {
// Construct a thunk-call with signature incoming_fty, and that copies
// args forward into a call to outgoing_fty.
- let str s = cx.names.next("_rust_thunk") + "." + cx.path;
+ let str s = cx.names.next("_rust_thunk") + sep() + cx.path;
let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx, incoming_fty));
let ValueRef llthunk = decl_fastcall_fn(cx.llmod, s, llthunk_ty);
- let @ty.t rty = ret_ty_of_fn_ty(incoming_fty);
-
- // FIXME: handle ty params properly.
- let vec[ast.ty_param] ty_params = vec();
-
- auto fcx = new_fn_ctxt(cx, s, llthunk);
+ auto fcx = new_fn_ctxt(cx, llthunk);
auto bcx = new_top_block_ctxt(fcx);
- auto llclosure = bcx.build.PointerCast(fcx.llclosure, llclosure_ty);
+ auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ty);
auto llbody = bcx.build.GEP(llclosure,
vec(C_int(0),
@@ -2536,10 +3337,33 @@ fn trans_bind_thunk(@crate_ctxt cx,
vec(C_int(0),
C_int(abi.fn_field_box)));
lltargetclosure = bcx.build.Load(lltargetclosure);
- let vec[ValueRef] llargs = vec(fcx.lltaskptr,
+
+ auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
+ auto outgoing_arg_tys = ty.ty_fn_args(outgoing_fty);
+
+ auto llretptr = fcx.llretptr;
+ if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
+ llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.tn));
+ }
+
+ let vec[ValueRef] llargs = vec(llretptr,
+ fcx.lltaskptr,
lltargetclosure);
- let uint a = 0u;
+
+ // Copy in the type parameters.
+ let uint i = 0u;
+ while (i < ty_param_count) {
+ auto lltyparam_ptr =
+ bcx.build.GEP(llbody, vec(C_int(0),
+ C_int(abi.closure_elt_ty_params),
+ C_int(i as int)));
+ llargs += vec(bcx.build.Load(lltyparam_ptr));
+ i += 1u;
+ }
+
+ let uint a = 2u + i; // retptr, task ptr, env come first
let int b = 0;
+ let uint outgoing_arg_index = 0u;
for (option.t[@ast.expr] arg in args) {
alt (arg) {
@@ -2556,10 +3380,19 @@ fn trans_bind_thunk(@crate_ctxt cx,
// Arg will be provided when the thunk is invoked.
case (none[@ast.expr]) {
let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
+ if (ty.type_has_dynamic_size(outgoing_arg_tys.
+ (outgoing_arg_index).ty)) {
+ // Cast to a generic typaram pointer in order to make a
+ // type-compatible call.
+ passed_arg = bcx.build.PointerCast(passed_arg,
+ T_typaram_ptr(cx.tn));
+ }
llargs += passed_arg;
a += 1u;
}
}
+
+ outgoing_arg_index += 0u;
}
// FIXME: turn this call + ret into a tail call.
@@ -2567,21 +3400,9 @@ fn trans_bind_thunk(@crate_ctxt cx,
vec(C_int(0),
C_int(abi.fn_field_code)));
lltargetfn = bcx.build.Load(lltargetfn);
- auto r = bcx.build.FastCall(lltargetfn, llargs);
- alt (fcx.llretptr) {
- case (some[ValueRef](?llptr)) {
- bcx.build.Store(bcx.build.Load(r), llptr);
- bcx.build.RetVoid();
- }
- case (none[ValueRef]) {
- if (ty.type_is_nil(rty)) {
- bcx.build.RetVoid();
- } else {
- bcx.build.Ret(r);
- }
- }
- }
+ auto r = bcx.build.FastCall(lltargetfn, llargs);
+ bcx.build.RetVoid();
ret llthunk;
}
@@ -2604,7 +3425,23 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
}
}
}
- if (_vec.len[@ast.expr](bound) == 0u) {
+
+ // Figure out which tydescs we need to pass, if any.
+ let @ty.t outgoing_fty;
+ let vec[ValueRef] lltydescs;
+ alt (f_res.generic) {
+ case (none[generic_info]) {
+ outgoing_fty = ty.expr_ty(f);
+ lltydescs = vec();
+ }
+ case (some[generic_info](?ginfo)) {
+ outgoing_fty = ginfo.item_type;
+ lltydescs = ginfo.tydescs;
+ }
+ }
+ auto ty_param_count = _vec.len[ValueRef](lltydescs);
+
+ if (_vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
// Trivial 'binding': just return the static pair-ptr.
ret f_res.res;
} else {
@@ -2615,22 +3452,32 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
// Translate the bound expressions.
let vec[@ty.t] bound_tys = vec();
let vec[ValueRef] bound_vals = vec();
+ auto i = 0u;
for (@ast.expr e in bound) {
auto arg = trans_expr(bcx, e);
bcx = arg.bcx;
+
append[ValueRef](bound_vals, arg.val);
append[@ty.t](bound_tys, ty.expr_ty(e));
+
+ i += 1u;
}
+ // Get the type of the bound function.
+ let TypeRef lltarget_ty = type_of(bcx.fcx.ccx, outgoing_fty);
+
// Synthesize a closure type.
- let @ty.t bindings_ty = ty.plain_ty(ty.ty_tup(bound_tys));
- let TypeRef lltarget_ty = type_of(bcx.fcx.ccx, ty.expr_ty(f));
+ let @ty.t bindings_ty = plain_ty(ty.ty_tup(bound_tys));
let TypeRef llbindings_ty = type_of(bcx.fcx.ccx, bindings_ty);
- let TypeRef llclosure_ty = T_closure_ptr(lltarget_ty,
- llbindings_ty);
+ let TypeRef llclosure_ty = T_closure_ptr(cx.fcx.ccx.tn,
+ lltarget_ty,
+ llbindings_ty,
+ ty_param_count);
// Malloc a box for the body.
- auto r = trans_malloc_inner(bcx, llclosure_ty);
+ // FIXME: this isn't generic-safe
+ auto r = trans_raw_malloc(bcx, llclosure_ty,
+ llsize_of(llvm.LLVMGetElementType(llclosure_ty)));
auto box = r.val;
bcx = r.bcx;
auto rc = bcx.build.GEP(box,
@@ -2656,19 +3503,40 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_target)));
- bcx.build.Store(bcx.build.Load(f_res.res.val), bound_target);
+ auto src = bcx.build.Load(f_res.res.val);
+ bcx.build.Store(src, bound_target);
// Copy expr values into boxed bindings.
- let int i = 0;
+ i = 0u;
auto bindings =
bcx.build.GEP(closure,
vec(C_int(0),
C_int(abi.closure_elt_bindings)));
for (ValueRef v in bound_vals) {
auto bound = bcx.build.GEP(bindings,
- vec(C_int(0),C_int(i)));
+ vec(C_int(0), C_int(i as int)));
bcx = copy_ty(r.bcx, INIT, bound, v, bound_tys.(i)).bcx;
- i += 1;
+ i += 1u;
+ }
+
+ // If necessary, copy tydescs describing type parameters into the
+ // appropriate slot in the closure.
+ alt (f_res.generic) {
+ case (none[generic_info]) { /* nothing to do */ }
+ case (some[generic_info](?ginfo)) {
+ auto ty_params_slot =
+ bcx.build.GEP(closure,
+ vec(C_int(0),
+ C_int(abi.closure_elt_ty_params)));
+ auto i = 0;
+ for (ValueRef td in ginfo.tydescs) {
+ auto ty_param_slot = bcx.build.GEP(ty_params_slot,
+ vec(C_int(0),
+ C_int(i)));
+ bcx.build.Store(td, ty_param_slot);
+ i += 1;
+ }
+ }
}
// Make thunk and store thunk-ptr in outer pair's code slot.
@@ -2678,8 +3546,9 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
let @ty.t pair_ty = node_ann_type(cx.fcx.ccx, ann);
let ValueRef llthunk =
- trans_bind_thunk(cx.fcx.ccx, pair_ty, ty.expr_ty(f),
- args, llclosure_ty, bound_tys);
+ trans_bind_thunk(cx.fcx.ccx, pair_ty, outgoing_fty,
+ args, llclosure_ty, bound_tys,
+ ty_param_count);
bcx.build.Store(llthunk, pair_code);
@@ -2687,9 +3556,11 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
auto pair_box = bcx.build.GEP(pair_v,
vec(C_int(0),
C_int(abi.fn_field_box)));
- bcx.build.Store(bcx.build.PointerCast(box,
- T_opaque_closure_ptr()),
- pair_box);
+ bcx.build.Store
+ (bcx.build.PointerCast
+ (box,
+ T_opaque_closure_ptr(bcx.fcx.ccx.tn)),
+ pair_box);
find_scope_cx(cx).cleanups +=
clean(bind drop_slot(_, pair_v, pair_ty));
@@ -2699,11 +3570,153 @@ fn trans_bind(@block_ctxt cx, @ast.expr f,
}
}
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
+fn trans_args(@block_ctxt cx,
+ ValueRef llenv,
+ option.t[ValueRef] llobj,
+ option.t[generic_info] gen,
+ option.t[ValueRef] lliterbody,
+ &vec[@ast.expr] es,
+ @ty.t fn_ty)
+ -> tup(@block_ctxt, vec[ValueRef], ValueRef) {
+
+ let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
+ let vec[ValueRef] llargs = vec();
+ let vec[ValueRef] lltydescs = vec();
+ let @block_ctxt bcx = cx;
+
+
+ // Arg 0: Output pointer.
+ auto retty = ty.ty_fn_ret(fn_ty);
+ auto llretslot_res = alloc_ty(bcx, retty);
+ bcx = llretslot_res.bcx;
+ auto llretslot = llretslot_res.val;
+
+ alt (gen) {
+ case (some[generic_info](?g)) {
+ lltydescs = g.tydescs;
+ args = ty.ty_fn_args(g.item_type);
+ retty = ty.ty_fn_ret(g.item_type);
+ }
+ case (_) {
+ }
+ }
+ if (ty.type_has_dynamic_size(retty)) {
+ llargs += bcx.build.PointerCast(llretslot,
+ T_typaram_ptr(cx.fcx.ccx.tn));
+ } else if (ty.count_ty_params(retty) != 0u) {
+ // It's possible that the callee has some generic-ness somewhere in
+ // its return value -- say a method signature within an obj or a fn
+ // type deep in a structure -- which the caller has a concrete view
+ // of. If so, cast the caller's view of the restlot to the callee's
+ // view, for the sake of making a type-compatible call.
+ llargs += cx.build.PointerCast(llretslot,
+ T_ptr(type_of(bcx.fcx.ccx, retty)));
+ } else {
+ llargs += llretslot;
+ }
+
+
+ // Arg 1: Task pointer.
+ llargs += bcx.fcx.lltaskptr;
+
+ // Arg 2: Env (closure-bindings / self-obj)
+ alt (llobj) {
+ case (some[ValueRef](?ob)) {
+ // Every object is always found in memory,
+ // and not-yet-loaded (as part of an lval x.y
+ // doted method-call).
+ llargs += bcx.build.Load(ob);
+ }
+ case (_) {
+ llargs += llenv;
+ }
+ }
+
+ // Args >3: ty_params ...
+ llargs += lltydescs;
+
+ // ... then possibly an lliterbody argument.
+ alt (lliterbody) {
+ case (none[ValueRef]) {}
+ case (some[ValueRef](?lli)) {
+ llargs += lli;
+ }
+ }
+
+ // ... then explicit args.
+
+ // First we figure out the caller's view of the types of the arguments.
+ // This will be needed if this is a generic call, because the callee has
+ // to cast her view of the arguments to the caller's view.
+ auto arg_tys = type_of_explicit_args(cx.fcx.ccx, args);
+
+ auto i = 0u;
+ for (@ast.expr e in es) {
+ auto mode = args.(i).mode;
+
+ auto val;
+ if (ty.type_is_structural(ty.expr_ty(e))) {
+ auto re = trans_expr(bcx, e);
+ val = re.val;
+ bcx = re.bcx;
+ if (mode == ast.val) {
+ // Until here we've been treating structures by pointer;
+ // we are now passing it as an arg, so need to load it.
+ val = bcx.build.Load(val);
+ }
+ } else if (mode == ast.alias) {
+ let lval_result lv;
+ if (ty.is_lval(e)) {
+ lv = trans_lval(bcx, e);
+ } else {
+ auto r = trans_expr(bcx, e);
+ lv = lval_val(r.bcx, r.val);
+ }
+ bcx = lv.res.bcx;
+
+ if (lv.is_mem) {
+ val = lv.res.val;
+ } else {
+ // Non-mem but we're trying to alias; synthesize an
+ // alloca, spill to it and pass its address.
+ auto llty = val_ty(lv.res.val);
+ auto llptr = lv.res.bcx.build.Alloca(llty);
+ lv.res.bcx.build.Store(lv.res.val, llptr);
+ val = llptr;
+ }
+
+ } else {
+ auto re = trans_expr(bcx, e);
+ val = re.val;
+ bcx = re.bcx;
+ }
+
+ if (ty.count_ty_params(args.(i).ty) > 0u) {
+ auto lldestty = arg_tys.(i);
+ val = bcx.build.PointerCast(val, lldestty);
+ }
+
+ llargs += val;
+ i += 1u;
+ }
+
+ ret tup(bcx, llargs, llretslot);
+}
+
fn trans_call(@block_ctxt cx, @ast.expr f,
- vec[@ast.expr] args, &ast.ann ann) -> result {
+ option.t[ValueRef] lliterbody,
+ vec[@ast.expr] args,
+ &ast.ann ann) -> result {
auto f_res = trans_lval(cx, f);
auto faddr = f_res.res.val;
- auto llclosure = C_null(T_opaque_closure_ptr());
+ auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.ccx.tn));
alt (f_res.llobj) {
case (some[ValueRef](_)) {
@@ -2718,71 +3731,67 @@ fn trans_call(@block_ctxt cx, @ast.expr f,
C_int(abi.fn_field_code)));
faddr = bcx.build.Load(faddr);
- llclosure = bcx.build.GEP(pair, vec(C_int(0),
- C_int(abi.fn_field_box)));
- llclosure = bcx.build.Load(llclosure);
+ auto llclosure = bcx.build.GEP(pair,
+ vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ llenv = bcx.build.Load(llclosure);
}
}
auto fn_ty = ty.expr_ty(f);
auto ret_ty = ty.ann_to_type(ann);
auto args_res = trans_args(f_res.res.bcx,
- llclosure, f_res.llobj,
+ llenv, f_res.llobj,
f_res.generic,
+ lliterbody,
args, fn_ty);
auto bcx = args_res._0;
- auto real_retval = bcx.build.FastCall(faddr, args_res._1);
- auto retval = real_retval;
+ auto llargs = args_res._1;
+ auto llretslot = args_res._2;
- if (ty.type_is_nil(ret_ty)) {
- retval = C_nil();
- }
+ /*
+ log "calling: " + val_str(cx.fcx.ccx.tn, faddr);
- // Check for a generic retslot.
- alt (args_res._2) {
+ for (ValueRef arg in llargs) {
+ log "arg: " + val_str(cx.fcx.ccx.tn, arg);
+ }
+ */
- case (some[ValueRef](?llretslot)) {
- retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
- }
+ bcx.build.FastCall(faddr, llargs);
+ auto retval = C_nil();
- case (none[ValueRef]) {
- if (! (ty.type_is_scalar(ret_ty) ||
- ty.type_is_boxed(ret_ty))) {
- // Structured returns come back as first-class values. This is
- // nice for LLVM but wrong for us; we treat structured values
- // by pointer in most of our code here. So spill it to an
- // alloca.
- auto local = bcx.build.Alloca(type_of(cx.fcx.ccx, ret_ty));
- bcx.build.Store(retval, local);
- retval = local;
- }
- }
+ if (!ty.type_is_nil(ret_ty)) {
+ retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
+ // Retval doesn't correspond to anything really tangible in the frame,
+ // but it's a ref all the same, so we put a note here to drop it when
+ // we're done in this scope.
+ find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
}
- // Retval doesn't correspond to anything really tangible in the frame, but
- // it's a ref all the same, so we put a note here to drop it when we're
- // done in this scope.
- find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
-
ret res(bcx, retval);
}
fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
&ast.ann ann) -> result {
- auto t = node_ann_type(cx.fcx.ccx, ann);
- auto llty = type_of(cx.fcx.ccx, t);
- auto tup_val = cx.build.Alloca(llty);
+ auto bcx = cx;
+ auto t = node_ann_type(bcx.fcx.ccx, ann);
+ auto tup_res = alloc_ty(bcx, t);
+ auto tup_val = tup_res.val;
+ bcx = tup_res.bcx;
+
find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tup_val, t));
let int i = 0;
- auto r = res(cx, C_nil());
+
for (ast.elt e in elts) {
- auto t = ty.expr_ty(e.expr);
- auto src_res = trans_expr(r.bcx, e.expr);
- auto dst_elt = r.bcx.build.GEP(tup_val, vec(C_int(0), C_int(i)));
- r = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, t);
+ auto e_ty = ty.expr_ty(e.expr);
+ auto src_res = trans_expr(bcx, e.expr);
+ bcx = src_res.bcx;
+ auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
+ bcx = dst_res.bcx;
+ bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
- ret res(r.bcx, tup_val);
+ ret res(bcx, tup_val);
}
fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
@@ -2807,44 +3816,89 @@ fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
// FIXME: pass tydesc properly.
auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)));
+ bcx = sub.bcx;
auto llty = type_of(bcx.fcx.ccx, t);
- auto vec_val = sub.bcx.build.IntToPtr(sub.val, llty);
+ auto vec_val = vi2p(bcx, sub.val, llty);
find_scope_cx(bcx).cleanups += clean(bind drop_ty(_, vec_val, t));
- auto body = sub.bcx.build.GEP(vec_val, vec(C_int(0),
- C_int(abi.vec_elt_data)));
+ auto body = bcx.build.GEP(vec_val, vec(C_int(0),
+ C_int(abi.vec_elt_data)));
+
+ auto pseudo_tup_ty =
+ plain_ty(ty.ty_tup(_vec.init_elt[@ty.t](unit_ty,
+ _vec.len[@ast.expr](args))));
let int i = 0;
+
for (@ast.expr e in args) {
- auto src_res = trans_expr(sub.bcx, e);
- auto dst_elt = sub.bcx.build.GEP(body, vec(C_int(0), C_int(i)));
- sub = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, unit_ty);
+ auto src_res = trans_expr(bcx, e);
+ bcx = src_res.bcx;
+ auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
+ bcx = dst_res.bcx;
+ bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, unit_ty).bcx;
i += 1;
}
- auto fill = sub.bcx.build.GEP(vec_val,
- vec(C_int(0), C_int(abi.vec_elt_fill)));
- sub.bcx.build.Store(data_sz, fill);
+ auto fill = bcx.build.GEP(vec_val,
+ vec(C_int(0), C_int(abi.vec_elt_fill)));
+ bcx.build.Store(data_sz, fill);
- ret res(sub.bcx, vec_val);
+ ret res(bcx, vec_val);
}
fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
- &ast.ann ann) -> result {
- auto t = node_ann_type(cx.fcx.ccx, ann);
- auto llty = type_of(cx.fcx.ccx, t);
- auto rec_val = cx.build.Alloca(llty);
+ option.t[@ast.expr] base, &ast.ann ann) -> result {
+
+ auto bcx = cx;
+ auto t = node_ann_type(bcx.fcx.ccx, ann);
+ auto llty = type_of(bcx.fcx.ccx, t);
+ auto rec_res = alloc_ty(bcx, t);
+ auto rec_val = rec_res.val;
+ bcx = rec_res.bcx;
+
find_scope_cx(cx).cleanups += clean(bind drop_ty(_, rec_val, t));
let int i = 0;
- auto r = res(cx, C_nil());
- for (ast.field f in fields) {
- auto t = ty.expr_ty(f.expr);
- auto src_res = trans_expr(r.bcx, f.expr);
- auto dst_elt = r.bcx.build.GEP(rec_val, vec(C_int(0), C_int(i)));
- // FIXME: calculate copy init-ness in typestate.
- r = copy_ty(src_res.bcx, INIT, dst_elt, src_res.val, t);
+
+ auto base_val = C_nil();
+
+ alt (base) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?bexp)) {
+ auto base_res = trans_expr(bcx, bexp);
+ bcx = base_res.bcx;
+ base_val = base_res.val;
+ }
+ }
+
+ let vec[ty.field] ty_fields = vec();
+ alt (t.struct) {
+ case (ty.ty_rec(?flds)) { ty_fields = flds; }
+ }
+
+ for (ty.field tf in ty_fields) {
+ auto e_ty = tf.ty;
+ auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
+ bcx = dst_res.bcx;
+
+ auto expr_provided = false;
+ auto src_res = res(bcx, C_nil());
+
+ for (ast.field f in fields) {
+ if (_str.eq(f.ident, tf.ident)) {
+ expr_provided = true;
+ src_res = trans_expr(bcx, f.expr);
+ }
+ }
+ if (!expr_provided) {
+ src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
+ src_res = res(src_res.bcx,
+ load_scalar_or_boxed(bcx, src_res.val, e_ty));
+ }
+
+ bcx = src_res.bcx;
+ bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
- ret res(r.bcx, rec_val);
+ ret res(bcx, rec_val);
}
@@ -2871,6 +3925,10 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
ret trans_for(cx, decl, seq, body);
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
+ ret trans_for_each(cx, decl, seq, body);
+ }
+
case (ast.expr_while(?cond, ?body, _)) {
ret trans_while(cx, cond, body);
}
@@ -2911,10 +3969,11 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
auto lhs_val = load_scalar_or_boxed(lhs_res.res.bcx,
lhs_res.res.val, t);
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
- auto v = trans_eager_binop(rhs_res.bcx, op, lhs_val, rhs_res.val);
+ auto v = trans_eager_binop(rhs_res.bcx, op, t,
+ lhs_val, rhs_res.val);
// FIXME: calculate copy init-ness in typestate.
- ret copy_ty(rhs_res.bcx, DROP_EXISTING,
- lhs_res.res.val, v, t);
+ ret copy_ty(v.bcx, DROP_EXISTING,
+ lhs_res.res.val, v.val, t);
}
case (ast.expr_bind(?f, ?args, ?ann)) {
@@ -2922,7 +3981,7 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
}
case (ast.expr_call(?f, ?args, ?ann)) {
- ret trans_call(cx, f, args, ann);
+ ret trans_call(cx, f, none[ValueRef], args, ann);
}
case (ast.expr_cast(?e, _, ?ann)) {
@@ -2937,8 +3996,36 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
ret trans_tup(cx, args, ann);
}
- case (ast.expr_rec(?args, ?ann)) {
- ret trans_rec(cx, args, ann);
+ case (ast.expr_rec(?args, ?base, ?ann)) {
+ ret trans_rec(cx, args, base, ann);
+ }
+
+ case (ast.expr_ext(_, _, _, ?expanded, _)) {
+ ret trans_expr(cx, expanded);
+ }
+
+ case (ast.expr_fail) {
+ ret trans_fail(cx, e.span, "explicit failure");
+ }
+
+ case (ast.expr_log(?a)) {
+ ret trans_log(cx, a);
+ }
+
+ case (ast.expr_check_expr(?a)) {
+ ret trans_check_expr(cx, a);
+ }
+
+ case (ast.expr_ret(?e)) {
+ ret trans_ret(cx, e);
+ }
+
+ case (ast.expr_put(?e)) {
+ ret trans_put(cx, e);
+ }
+
+ case (ast.expr_be(?e)) {
+ ret trans_be(cx, e);
}
// lval cases fall through to trans_lval and then
@@ -2962,7 +4049,7 @@ fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
fn load_scalar_or_boxed(@block_ctxt cx,
ValueRef v,
@ty.t t) -> ValueRef {
- if (ty.type_is_scalar(t) || ty.type_is_boxed(t)) {
+ if (ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t)) {
ret cx.build.Load(v);
} else {
ret v;
@@ -2975,7 +4062,7 @@ fn trans_log(@block_ctxt cx, @ast.expr e) -> result {
auto e_ty = ty.expr_ty(e);
alt (e_ty.struct) {
case (ty.ty_str) {
- auto v = sub.bcx.build.PtrToInt(sub.val, T_int());
+ auto v = vp2i(sub.bcx, sub.val);
ret trans_upcall(sub.bcx,
"upcall_log_str",
vec(v));
@@ -3014,6 +4101,38 @@ fn trans_fail(@block_ctxt cx, common.span sp, str fail_str) -> result {
ret trans_upcall(cx, "upcall_fail", args);
}
+fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
+ auto llcallee = C_nil();
+ auto llenv = C_nil();
+
+ alt (cx.fcx.lliterbody) {
+ case (some[ValueRef](?lli)) {
+ auto slot = cx.build.Alloca(val_ty(lli));
+ cx.build.Store(lli, slot);
+
+ llcallee = cx.build.GEP(slot, vec(C_int(0),
+ C_int(abi.fn_field_code)));
+ llcallee = cx.build.Load(llcallee);
+
+ llenv = cx.build.GEP(slot, vec(C_int(0),
+ C_int(abi.fn_field_box)));
+ llenv = cx.build.Load(llenv);
+ }
+ }
+ auto bcx = cx;
+ auto dummy_retslot = bcx.build.Alloca(T_nil());
+ let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
+ alt (e) {
+ case (none[@ast.expr]) { }
+ case (some[@ast.expr](?x)) {
+ auto r = trans_expr(bcx, x);
+ llargs += r.val;
+ bcx = r.bcx;
+ }
+ }
+ ret res(bcx, bcx.build.FastCall(llcallee, llargs));
+}
+
fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
auto bcx = cx;
auto val = C_nil();
@@ -3024,18 +4143,7 @@ fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
auto r = trans_expr(cx, x);
bcx = r.bcx;
val = r.val;
-
- // A return is an implicit copy into a newborn anonymous
- // 'return value' in the caller frame.
- bcx = incr_all_refcnts(bcx, val, t).bcx;
-
- if (ty.type_is_structural(t)) {
- // We usually treat structurals by-pointer; in particular,
- // trans_expr will have given us a structure pointer. But in
- // this case we're about to return. LLVM wants a first-class
- // value here (which makes sense; the frame is going away!)
- val = r.bcx.build.Load(val);
- }
+ bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
}
case (_) { /* fall through */ }
}
@@ -3055,38 +4163,18 @@ fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
}
}
- alt (e) {
- case (some[@ast.expr](?ex)) {
- auto t = ty.expr_ty(ex);
-
- if (ty.type_is_nil(t)) {
- bcx.build.RetVoid();
- val = C_nil();
- ret res(bcx, val); // FIXME: early return needed due to
- // typestate bug
- }
-
- alt (cx.fcx.llretptr) {
- case (some[ValueRef](?llptr)) {
- // Generic return via tydesc + retptr.
- bcx = copy_ty(bcx, INIT, llptr, val, t).bcx;
- bcx.build.RetVoid();
- }
- case (none[ValueRef]) {
- val = bcx.build.Ret(val);
- }
- }
- ret res(bcx, val);
- }
- case (_) { /* fall through */ }
- }
-
- // FIXME: until LLVM has a unit type, we are moving around
- // C_nil values rather than their void type.
bcx.build.RetVoid();
ret res(bcx, C_nil());
}
+fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
+ // FIXME: This should be a typestate precondition
+ check (ast.is_call_expr(e));
+ // FIXME: Turn this into a real tail call once
+ // calling convention issues are settled
+ ret trans_ret(cx, some(e));
+}
+
fn init_local(@block_ctxt cx, @ast.local local) -> result {
// Make a note to drop this slot on the way out.
@@ -3121,22 +4209,6 @@ fn init_local(@block_ctxt cx, @ast.local local) -> result {
fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
auto bcx = cx;
alt (s.node) {
- case (ast.stmt_log(?a)) {
- bcx = trans_log(cx, a).bcx;
- }
-
- case (ast.stmt_check_expr(?a)) {
- bcx = trans_check_expr(cx, a).bcx;
- }
-
- case (ast.stmt_fail) {
- bcx = trans_fail(cx, s.span, "explicit failure").bcx;
- }
-
- case (ast.stmt_ret(?e)) {
- bcx = trans_ret(cx, e).bcx;
- }
-
case (ast.stmt_expr(?e)) {
bcx = trans_expr(cx, e).bcx;
}
@@ -3184,7 +4256,13 @@ fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
// Use this when you're at the top block of a function or the like.
fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
- ret new_block_ctxt(fcx, parent_none, SCOPE_BLOCK, "function top level");
+ auto cx = new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
+ "function top level");
+
+ // FIXME: hack to give us some spill room to make up for an LLVM
+ // bug where it destroys its own callee-saves.
+ cx.build.Alloca(T_array(T_int(), 10u));
+ ret cx;
}
// Use this when you're at a curly-brace or similar lexical scope.
@@ -3237,8 +4315,7 @@ iter block_locals(&ast.block b) -> @ast.local {
}
}
-fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
- auto t = node_ann_type(cx.fcx.ccx, local.ann);
+fn alloc_ty(@block_ctxt cx, @ty.t t) -> result {
auto val = C_int(0);
auto bcx = cx;
if (ty.type_has_dynamic_size(t)) {
@@ -3248,10 +4325,16 @@ fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
} else {
val = bcx.build.Alloca(type_of(cx.fcx.ccx, t));
}
- bcx.fcx.lllocals.insert(local.id, val);
ret res(bcx, val);
}
+fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
+ auto t = node_ann_type(cx.fcx.ccx, local.ann);
+ auto r = alloc_ty(cx, t);
+ r.bcx.fcx.lllocals.insert(local.id, r.val);
+ ret r;
+}
+
fn trans_block(@block_ctxt cx, &ast.block b) -> result {
auto bcx = cx;
@@ -3287,12 +4370,19 @@ fn trans_block(@block_ctxt cx, &ast.block b) -> result {
ret res(bcx, r.val);
}
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn new_fn_ctxt(@crate_ctxt cx,
- str name,
ValueRef llfndecl) -> @fn_ctxt {
- let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 0u);
- let ValueRef llclosure = llvm.LLVMGetParam(llfndecl, 1u);
+ let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
+ let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
@@ -3301,9 +4391,10 @@ fn new_fn_ctxt(@crate_ctxt cx,
ret @rec(llfn=llfndecl,
lltaskptr=lltaskptr,
- llclosure=llclosure,
+ llenv=llenv,
+ llretptr=llretptr,
mutable llself=none[ValueRef],
- mutable llretptr=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
llargs=llargs,
llobjfields=llobjfields,
lllocals=lllocals,
@@ -3311,39 +4402,46 @@ fn new_fn_ctxt(@crate_ctxt cx,
ccx=cx);
}
-// NB: this must match trans_args and type_of_fn_full.
+// NB: must keep 4 fns in sync:
+//
+// - type_of_fn_full
+// - create_llargs_for_fn_args.
+// - new_fn_ctxt
+// - trans_args
+
fn create_llargs_for_fn_args(&@fn_ctxt cx,
+ ast.proto proto,
option.t[TypeRef] ty_self,
@ty.t ret_ty,
&vec[ast.arg] args,
&vec[ast.ty_param] ty_params) {
- let uint arg_n = 1u;
-
- for (ast.ty_param tp in ty_params) {
- auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
- check (llarg as int != 0);
- cx.lltydescs.insert(tp.id, llarg);
- arg_n += 1u;
- }
-
- if (ty.type_has_dynamic_size(ret_ty)) {
- cx.llretptr = some[ValueRef](llvm.LLVMGetParam(cx.llfn, arg_n));
- arg_n += 1u;
- }
alt (ty_self) {
case (some[TypeRef](_)) {
- auto llself = llvm.LLVMGetParam(cx.llfn, arg_n);
- check (llself as int != 0);
- cx.llself = some[ValueRef](llself);
- arg_n += 1u;
+ cx.llself = some[ValueRef](cx.llenv);
}
case (_) {
- // llclosure, we don't know what it is.
+ }
+ }
+
+ auto arg_n = 3u;
+
+ if (ty_self == none[TypeRef]) {
+ for (ast.ty_param tp in ty_params) {
+ auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
+ check (llarg as int != 0);
+ cx.lltydescs.insert(tp.id, llarg);
arg_n += 1u;
}
}
+ if (proto == ast.proto_iter) {
+ auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
+ check (llarg as int != 0);
+ cx.lliterbody = some[ValueRef](llarg);
+ arg_n += 1u;
+ }
+
for (ast.arg arg in args) {
auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
check (llarg as int != 0);
@@ -3398,7 +4496,7 @@ fn is_terminated(@block_ctxt cx) -> bool {
fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
alt (ty.ann_to_type(ann).struct) {
- case (ty.ty_fn(?arg_tys, _)) {
+ case (ty.ty_fn(_, ?arg_tys, _)) {
ret arg_tys;
}
}
@@ -3407,7 +4505,7 @@ fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
fn ret_ty_of_fn_ty(@ty.t t) -> @ty.t {
alt (t.struct) {
- case (ty.ty_fn(_, ?ret_ty)) {
+ case (ty.ty_fn(_, _, ?ret_ty)) {
ret ret_ty;
}
}
@@ -3419,42 +4517,73 @@ fn ret_ty_of_fn(ast.ann ann) -> @ty.t {
ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
}
-fn create_llobjfields_for_fields(@block_ctxt cx, ValueRef llself) {
+fn populate_fn_ctxt_from_llself(@block_ctxt cx, ValueRef llself) -> result {
+ auto bcx = cx;
- let vec[TypeRef] llfield_tys = vec();
+ let vec[@ty.t] field_tys = vec();
- for (ast.obj_field f in cx.fcx.ccx.obj_fields) {
- llfield_tys += node_type(cx.fcx.ccx, f.ann);
+ for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
+ field_tys += vec(node_ann_type(bcx.fcx.ccx, f.ann));
}
- let TypeRef llfields_ty = T_struct(llfield_tys);
- let TypeRef lltydesc_ty = T_ptr(T_tydesc());
- let TypeRef llobj_body_ty = T_struct(vec(lltydesc_ty,
- llfields_ty));
- let TypeRef llobj_box_ty = T_ptr(T_box(llobj_body_ty));
+ // Synthesize a tuple type for the fields so that GEP_tup_like() can work
+ // its magic.
+ auto fields_tup_ty = ty.plain_ty(ty.ty_tup(field_tys));
+
+ auto n_typarams = _vec.len[ast.ty_param](bcx.fcx.ccx.obj_typarams);
+ let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.ccx.tn, n_typarams);
auto box_cell =
- cx.build.GEP(llself,
- vec(C_int(0),
- C_int(abi.obj_field_box)));
+ bcx.build.GEP(llself,
+ vec(C_int(0),
+ C_int(abi.obj_field_box)));
+
+ auto box_ptr = bcx.build.Load(box_cell);
+
+ box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
+
+ auto obj_typarams = bcx.build.GEP(box_ptr,
+ vec(C_int(0),
+ C_int(abi.box_rc_field_body),
+ C_int(abi.obj_body_elt_typarams)));
+
+ // The object fields immediately follow the type parameters, so we skip
+ // over them to get the pointer.
+ auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
+ llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));
+
+ // If we can (i.e. the type is statically sized), then cast the resulting
+ // fields pointer to the appropriate LLVM type. If not, just leave it as
+ // i8 *.
+ if (!ty.type_has_dynamic_size(fields_tup_ty)) {
+ auto llfields_ty = type_of(bcx.fcx.ccx, fields_tup_ty);
+ obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
+ } else {
+ obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
+ }
- auto box_ptr = cx.build.Load(box_cell);
- box_ptr = cx.build.PointerCast(box_ptr, llobj_box_ty);
+ let int i = 0;
- auto obj_fields = cx.build.GEP(box_ptr,
- vec(C_int(0),
- C_int(abi.box_rc_field_body),
- C_int(abi.obj_body_elt_fields)));
+ for (ast.ty_param p in bcx.fcx.ccx.obj_typarams) {
+ let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
+ vec(C_int(0),
+ C_int(i)));
+ lltyparam = bcx.build.Load(lltyparam);
+ bcx.fcx.lltydescs.insert(p.id, lltyparam);
+ i += 1;
+ }
- let int i = 0;
- for (ast.obj_field f in cx.fcx.ccx.obj_fields) {
- let ValueRef llfield = cx.build.GEP(obj_fields,
- vec(C_int(0),
- C_int(i)));
+ i = 0;
+ for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
+ auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
+ bcx = rslt.bcx;
+ auto llfield = rslt.val;
cx.fcx.llobjfields.insert(f.id, llfield);
i += 1;
}
+
+ ret res(bcx, C_nil());
}
fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
@@ -3464,17 +4593,18 @@ fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
auto llfndecl = cx.item_ids.get(fid);
cx.item_names.insert(cx.path, llfndecl);
- auto fcx = new_fn_ctxt(cx, cx.path, llfndecl);
- create_llargs_for_fn_args(fcx, ty_self, ret_ty_of_fn(ann),
- f.inputs, ty_params);
+ auto fcx = new_fn_ctxt(cx, llfndecl);
+ create_llargs_for_fn_args(fcx, f.proto,
+ ty_self, ret_ty_of_fn(ann),
+ f.decl.inputs, ty_params);
auto bcx = new_top_block_ctxt(fcx);
- copy_args_to_allocas(bcx, ty_self, f.inputs,
+ copy_args_to_allocas(bcx, ty_self, f.decl.inputs,
arg_tys_of_fn(ann));
alt (fcx.llself) {
case (some[ValueRef](?llself)) {
- create_llobjfields_for_fields(bcx, llself);
+ bcx = populate_fn_ctxt_from_llself(bcx, llself).bcx;
}
case (_) {
}
@@ -3504,17 +4634,17 @@ fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
auto llfnty = T_nil();
alt (node_ann_type(cx, m.node.ann).struct) {
- case (ty.ty_fn(?inputs, ?output)) {
- llfnty = type_of_fn_full(cx,
+ case (ty.ty_fn(?proto, ?inputs, ?output)) {
+ llfnty = type_of_fn_full(cx, proto,
some[TypeRef](self_ty),
inputs, output);
}
}
- let @crate_ctxt mcx = @rec(path=cx.path + "." + m.node.ident
+ let @crate_ctxt mcx = @rec(path=cx.path + sep() + m.node.ident
with *cx);
- let str s = cx.names.next("_rust_method") + "." + mcx.path;
+ let str s = cx.names.next("_rust_method") + sep() + mcx.path;
let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfnty);
cx.item_ids.insert(m.node.id, llfn);
@@ -3525,7 +4655,7 @@ fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
auto vtbl = C_struct(methods);
auto gvar = llvm.LLVMAddGlobal(cx.llmod,
val_ty(vtbl),
- _str.buf("_rust_vtbl" + "." + cx.path));
+ _str.buf("_rust_vtbl" + sep() + cx.path));
llvm.LLVMSetInitializer(gvar, vtbl);
llvm.LLVMSetGlobalConstant(gvar, True);
llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
@@ -3548,8 +4678,9 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
id=f.id));
}
- auto fcx = new_fn_ctxt(cx, cx.path, llctor_decl);
- create_llargs_for_fn_args(fcx, none[TypeRef], ret_ty_of_fn(ann),
+ auto fcx = new_fn_ctxt(cx, llctor_decl);
+ create_llargs_for_fn_args(fcx, ast.proto_fn,
+ none[TypeRef], ret_ty_of_fn(ann),
fn_args, ty_params);
auto bcx = new_top_block_ctxt(fcx);
@@ -3558,7 +4689,7 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
auto llself_ty = type_of(cx, ret_ty_of_fn(ann));
- auto pair = bcx.build.Alloca(llself_ty);
+ auto pair = bcx.fcx.llretptr;
auto vtbl = trans_vtbl(cx, llself_ty, ob, ty_params);
auto pair_vtbl = bcx.build.GEP(pair,
vec(C_int(0),
@@ -3568,10 +4699,11 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
C_int(abi.obj_field_box)));
bcx.build.Store(vtbl, pair_vtbl);
- let TypeRef llbox_ty = T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
- T_nil()))));
- if (_vec.len[ty.arg](arg_tys) == 0u) {
- // Store null into pair, if no args.
+ let TypeRef llbox_ty = T_opaque_obj_ptr(cx.tn);
+
+ if (_vec.len[ast.ty_param](ty_params) == 0u &&
+ _vec.len[ty.arg](arg_tys) == 0u) {
+ // Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
// Malloc a box for the body and copy args in.
@@ -3581,55 +4713,76 @@ fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
}
// Synthesize an obj body type.
- let @ty.t fields_ty = ty.plain_ty(ty.ty_tup(obj_fields));
- let TypeRef llfields_ty = type_of(bcx.fcx.ccx, fields_ty);
- let TypeRef llobj_body_ty =
- T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc()),
- llfields_ty))));
+ auto tydesc_ty = plain_ty(ty.ty_type);
+ let vec[@ty.t] tps = vec();
+ for (ast.ty_param tp in ty_params) {
+ append[@ty.t](tps, tydesc_ty);
+ }
+
+ let @ty.t typarams_ty = plain_ty(ty.ty_tup(tps));
+ let @ty.t fields_ty = plain_ty(ty.ty_tup(obj_fields));
+ let @ty.t body_ty = plain_ty(ty.ty_tup(vec(tydesc_ty,
+ typarams_ty,
+ fields_ty)));
+ let @ty.t boxed_body_ty = plain_ty(ty.ty_box(body_ty));
// Malloc a box for the body.
- auto r = trans_malloc_inner(bcx, llobj_body_ty);
- bcx = r.bcx;
- auto box = r.val;
- auto rc = bcx.build.GEP(box,
- vec(C_int(0),
- C_int(abi.box_rc_field_refcnt)));
- auto body = bcx.build.GEP(box,
- vec(C_int(0),
- C_int(abi.box_rc_field_body)));
- bcx.build.Store(C_int(1), rc);
+ auto box = trans_malloc_boxed(bcx, body_ty);
+ bcx = box.bcx;
+ auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
+ vec(0, abi.box_rc_field_refcnt));
+ bcx = rc.bcx;
+ auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
+ vec(0, abi.box_rc_field_body));
+ bcx = body.bcx;
+ bcx.build.Store(C_int(1), rc.val);
// Store body tydesc.
auto body_tydesc =
- bcx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_tydesc)));
-
- auto fields_tydesc = get_tydesc(r.bcx, fields_ty);
- bcx = fields_tydesc.bcx;
- bcx.build.Store(fields_tydesc.val, body_tydesc);
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_tydesc));
+ bcx = body_tydesc.bcx;
+
+ auto body_td = get_tydesc(bcx, body_ty);
+ bcx = body_td.bcx;
+ bcx.build.Store(body_td.val, body_tydesc.val);
+
+ // Copy typarams into captured typarams.
+ auto body_typarams =
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_typarams));
+ bcx = body_typarams.bcx;
+ let int i = 0;
+ for (ast.ty_param tp in ty_params) {
+ auto typaram = bcx.fcx.lltydescs.get(tp.id);
+ auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
+ vec(0, i));
+ bcx = capture.bcx;
+ bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
+ i += 1;
+ }
// Copy args into body fields.
auto body_fields =
- bcx.build.GEP(body,
- vec(C_int(0),
- C_int(abi.obj_body_elt_fields)));
+ GEP_tup_like(bcx, body_ty, body.val,
+ vec(0, abi.obj_body_elt_fields));
+ bcx = body_fields.bcx;
- let int i = 0;
+ i = 0;
for (ast.obj_field f in ob.fields) {
auto arg = bcx.fcx.llargs.get(f.id);
arg = load_scalar_or_boxed(bcx, arg, arg_tys.(i).ty);
- auto field = bcx.build.GEP(body_fields,
- vec(C_int(0),C_int(i)));
- bcx = copy_ty(bcx, INIT, field, arg, arg_tys.(i).ty).bcx;
+ auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
+ vec(0, i));
+ bcx = field.bcx;
+ bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
i += 1;
}
-
// Store box ptr in outer pair.
- auto p = bcx.build.PointerCast(box, llbox_ty);
+ auto p = bcx.build.PointerCast(box.val, llbox_ty);
bcx.build.Store(p, pair_box);
}
- bcx.build.Ret(bcx.build.Load(pair));
+ bcx.build.RetVoid();
}
fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
@@ -3652,8 +4805,9 @@ fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
check (cx.item_ids.contains_key(variant.id));
let ValueRef llfndecl = cx.item_ids.get(variant.id);
- auto fcx = new_fn_ctxt(cx, cx.path, llfndecl);
- create_llargs_for_fn_args(fcx, none[TypeRef], ret_ty_of_fn(variant.ann),
+ auto fcx = new_fn_ctxt(cx, llfndecl);
+ create_llargs_for_fn_args(fcx, ast.proto_fn,
+ none[TypeRef], ret_ty_of_fn(variant.ann),
fn_args, ty_params);
auto bcx = new_top_block_ctxt(fcx);
@@ -3661,41 +4815,45 @@ fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
auto arg_tys = arg_tys_of_fn(variant.ann);
copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
- auto info = cx.tags.get(tag_id);
+ // Cast the tag to a type we can GEP into.
+ auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
+ T_opaque_tag_ptr(fcx.ccx.tn));
- auto lltagty = T_struct(vec(T_int(), T_array(T_i8(), info.size)));
-
- // FIXME: better name.
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("tag"), lltagty);
-
- auto lltagptr = bcx.build.Alloca(lltagty);
- auto lldiscrimptr = bcx.build.GEP(lltagptr, vec(C_int(0), C_int(0)));
+ auto lldiscrimptr = bcx.build.GEP(lltagptr,
+ vec(C_int(0), C_int(0)));
bcx.build.Store(C_int(index), lldiscrimptr);
- auto llblobptr = bcx.build.GEP(lltagptr, vec(C_int(0), C_int(1)));
-
- // First, generate the union type.
- let vec[TypeRef] llargtys = vec();
- for (ty.arg arg in arg_tys) {
- llargtys += vec(type_of(cx, arg.ty));
- }
-
- auto llunionty = T_struct(llargtys);
- auto llunionptr = bcx.build.TruncOrBitCast(llblobptr, T_ptr(llunionty));
+ auto llblobptr = bcx.build.GEP(lltagptr,
+ vec(C_int(0), C_int(1)));
i = 0u;
for (ast.variant_arg va in variant.args) {
- auto llargval = bcx.build.Load(fcx.llargs.get(va.id));
- auto lldestptr = bcx.build.GEP(llunionptr,
- vec(C_int(0), C_int(i as int)));
+ auto rslt = GEP_tag(bcx, llblobptr, variant, i as int);
+ bcx = rslt.bcx;
+ auto lldestptr = rslt.val;
+
+ // If this argument to this function is a tag, it'll have come in to
+ // this function as an opaque blob due to the way that type_of()
+ // works. So we have to cast to the destination's view of the type.
+ auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
+ val_ty(lldestptr));
+
+ auto arg_ty = arg_tys.(i).ty;
+ auto llargval;
+ if (ty.type_is_structural(arg_ty)) {
+ llargval = llargptr;
+ } else {
+ llargval = bcx.build.Load(llargptr);
+ }
+
+ rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
+ bcx = rslt.bcx;
- bcx.build.Store(llargval, lldestptr);
i += 1u;
}
- auto lltagval = bcx.build.Load(lltagptr);
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
- bcx.build.Ret(lltagval);
+ bcx.build.RetVoid();
}
// FIXME: this should do some structural hash-consing to avoid
@@ -3732,20 +4890,21 @@ fn trans_const(@crate_ctxt cx, @ast.expr e,
fn trans_item(@crate_ctxt cx, &ast.item item) {
alt (item.node) {
case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_fn(sub_cx, f, fid, none[TypeRef], tps, ann);
}
case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name,
+ auto sub_cx = @rec(path=cx.path + sep() + name,
+ obj_typarams=tps,
obj_fields=ob.fields with *cx);
trans_obj(sub_cx, ob, oid, tps, ann);
}
case (ast.item_mod(?name, ?m, _)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_mod(sub_cx, m);
}
case (ast.item_tag(?name, ?variants, ?tps, ?tag_id)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
auto i = 0;
for (ast.variant variant in variants) {
trans_tag_variant(sub_cx, tag_id, variant, i, tps);
@@ -3753,7 +4912,7 @@ fn trans_item(@crate_ctxt cx, &ast.item item) {
}
}
case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
- auto sub_cx = @rec(path=cx.path + "." + name with *cx);
+ auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
trans_const(sub_cx, expr, cid, ann);
}
case (_) { /* fall through */ }
@@ -3784,15 +4943,21 @@ fn decl_fn_and_pair(@crate_ctxt cx,
auto llfty = get_pair_fn_ty(llpairty);
// Declare the function itself.
- let str s = cx.names.next("_rust_" + kind) + "." + name;
+ let str s = cx.names.next("_rust_" + kind) + sep() + name;
let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfty);
// Declare the global constant pair that points to it.
- let str ps = cx.names.next("_rust_" + kind + "_pair") + "." + name;
+ let str ps = cx.names.next("_rust_" + kind + "_pair") + sep() + name;
+
+ register_fn_pair(cx, ps, llpairty, llfn, id);
+}
+
+fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
+ ast.def_id id) {
let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
_str.buf(ps));
auto pair = C_struct(vec(llfn,
- C_null(T_opaque_closure_ptr())));
+ C_null(T_opaque_closure_ptr(cx.tn))));
llvm.LLVMSetInitializer(gvar, pair);
llvm.LLVMSetGlobalConstant(gvar, True);
@@ -3804,12 +4969,86 @@ fn decl_fn_and_pair(@crate_ctxt cx,
cx.fn_pairs.insert(id, gvar);
}
+fn native_fn_wrapper_type(@crate_ctxt cx, &ast.ann ann) -> TypeRef {
+ auto x = node_ann_type(cx, ann);
+ alt (x.struct) {
+ case (ty.ty_native_fn(?abi, ?args, ?out)) {
+ ret type_of_fn(cx, ast.proto_fn, args, out);
+ }
+ }
+ fail;
+}
+
+fn decl_native_fn_and_pair(@crate_ctxt cx,
+ str name,
+ &ast.ann ann,
+ ast.def_id id) {
+ // Declare the wrapper.
+ auto wrapper_type = native_fn_wrapper_type(cx, ann);
+ let str s = cx.names.next("_rust_wrapper") + sep() + name;
+ let ValueRef wrapper_fn = decl_fastcall_fn(cx.llmod, s, wrapper_type);
+
+ // Declare the global constant pair that points to it.
+ auto wrapper_pair_type = T_fn_pair(cx.tn, wrapper_type);
+ let str ps = cx.names.next("_rust_wrapper_pair") + sep() + name;
+
+ register_fn_pair(cx, ps, wrapper_pair_type, wrapper_fn, id);
+
+ // Declare the function itself.
+ auto llfty = get_pair_fn_ty(node_type(cx, ann));
+ auto function = decl_cdecl_fn(cx.llmod, name, llfty);
+
+ // Build the wrapper.
+ auto fcx = new_fn_ctxt(cx, wrapper_fn);
+ auto bcx = new_top_block_ctxt(fcx);
+ auto fn_type = node_ann_type(cx, ann);
+
+ let vec[ValueRef] call_args = vec();
+ auto abi = ty.ty_fn_abi(fn_type);
+ auto arg_n = 3u;
+ alt (abi) {
+ case (ast.native_abi_rust) {
+ call_args += vec(fcx.lltaskptr);
+ auto num_ty_param = ty.count_ty_params(plain_ty(fn_type.struct));
+ for each (uint i in _uint.range(0u, num_ty_param)) {
+ auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
+ check (llarg as int != 0);
+ call_args += vec(llarg);
+ arg_n += 1u;
+ }
+ }
+ case (ast.native_abi_cdecl) {
+ }
+ }
+ auto args = ty.ty_fn_args(fn_type);
+ for (ty.arg arg in args) {
+ auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
+ check (llarg as int != 0);
+ call_args += vec(llarg);
+ arg_n += 1u;
+ }
+ auto r = bcx.build.Call(function, call_args);
+ bcx.build.Store(r, fcx.llretptr);
+ bcx.build.RetVoid();
+}
+
+fn collect_native_item(&@crate_ctxt cx, @ast.native_item i) -> @crate_ctxt {
+ alt (i.node) {
+ case (ast.native_item_fn(?name, _, _, ?fid, ?ann)) {
+ cx.native_items.insert(fid, i);
+ if (! cx.obj_methods.contains_key(fid)) {
+ decl_native_fn_and_pair(cx, name, ann, fid);
+ }
+ }
+ case (_) { /* fall through */ }
+ }
+ ret cx;
+}
fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
alt (i.node) {
case (ast.item_fn(?name, ?f, _, ?fid, ?ann)) {
- // TODO: type-params
cx.items.insert(fid, i);
if (! cx.obj_methods.contains_key(fid)) {
decl_fn_and_pair(cx, "fn", name, ann, fid);
@@ -3817,7 +5056,6 @@ fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
}
case (ast.item_obj(?name, ?ob, _, ?oid, ?ann)) {
- // TODO: type-params
cx.items.insert(oid, i);
decl_fn_and_pair(cx, "obj_ctor", name, ann, oid);
for (@ast.method m in ob.methods) {
@@ -3833,13 +5071,7 @@ fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
cx.items.insert(mid, i);
}
- case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto vi = new_def_hash[uint]();
- auto navi = new_def_hash[uint]();
- let vec[tup(ast.def_id,arity)] variant_info = vec();
- cx.tags.insert(tag_id, @rec(th=mk_type_handle(),
- mutable variants=variant_info,
- mutable size=0u));
+ case (ast.item_tag(_, ?variants, ?tps, ?tag_id)) {
cx.items.insert(tag_id, i);
}
@@ -3854,7 +5086,8 @@ fn collect_items(@crate_ctxt cx, @ast.crate crate) {
let fold.ast_fold[@crate_ctxt] fld =
fold.new_identity_fold[@crate_ctxt]();
- fld = @rec( update_env_for_item = bind collect_item(_,_)
+ fld = @rec( update_env_for_item = bind collect_item(_,_),
+ update_env_for_native_item = bind collect_native_item(_,_)
with *fld );
fold.fold_crate[@crate_ctxt](cx, fld, crate);
@@ -3890,103 +5123,30 @@ fn collect_tag_ctors(@crate_ctxt cx, @ast.crate crate) {
}
-// The tag type resolution pass, which determines all the LLVM types that
-// correspond to each tag type in the crate.
-
-fn resolve_tag_types_for_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
- alt (i.node) {
- case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto max_align = 0u;
- auto max_size = 0u;
-
- auto info = cx.tags.get(tag_id);
- let vec[tup(ast.def_id,arity)] variant_info = vec();
-
- for (ast.variant variant in variants) {
- auto arity_info;
- if (_vec.len[ast.variant_arg](variant.args) > 0u) {
- auto llvariantty = type_of_variant(cx, variant);
- auto align =
- llvm.LLVMPreferredAlignmentOfType(cx.td.lltd,
- llvariantty);
- auto size =
- llvm.LLVMStoreSizeOfType(cx.td.lltd,
- llvariantty) as uint;
- if (max_align < align) { max_align = align; }
- if (max_size < size) { max_size = size; }
-
- arity_info = n_ary;
- } else {
- arity_info = nullary;
- }
-
- variant_info += vec(tup(variant.id, arity_info));
- }
-
- info.variants = variant_info;
- info.size = max_size;
-
- // FIXME: alignment is wrong here, manually insert padding I
- // guess :(
- auto tag_ty = T_struct(vec(T_int(), T_array(T_i8(), max_size)));
- auto th = cx.tags.get(tag_id).th.llth;
- llvm.LLVMRefineType(llvm.LLVMResolveTypeHandle(th), tag_ty);
- }
- case (_) {
- // fall through
- }
- }
-
- ret cx;
-}
-
-fn resolve_tag_types(@crate_ctxt cx, @ast.crate crate) {
- let fold.ast_fold[@crate_ctxt] fld =
- fold.new_identity_fold[@crate_ctxt]();
-
- fld = @rec( update_env_for_item = bind resolve_tag_types_for_item(_,_)
- with *fld );
-
- fold.fold_crate[@crate_ctxt](cx, fld, crate);
-}
-
// The constant translation pass.
fn trans_constant(&@crate_ctxt cx, @ast.item it) -> @crate_ctxt {
alt (it.node) {
case (ast.item_tag(_, ?variants, _, ?tag_id)) {
- auto info = cx.tags.get(tag_id);
+ auto i = 0u;
+ auto n_variants = _vec.len[ast.variant](variants);
+ while (i < n_variants) {
+ auto variant = variants.(i);
- auto tag_ty = llvm.LLVMResolveTypeHandle(info.th.llth);
- check (llvm.LLVMCountStructElementTypes(tag_ty) == 2u);
- auto elts = vec(0 as TypeRef, 0 as TypeRef);
- llvm.LLVMGetStructElementTypes(tag_ty, _vec.buf[TypeRef](elts));
- auto union_ty = elts.(1);
+ auto discrim_val = C_int(i as int);
- auto i = 0u;
- while (i < _vec.len[tup(ast.def_id,arity)](info.variants)) {
- auto variant_info = info.variants.(i);
- alt (variant_info._1) {
- case (nullary) {
- // Nullary tags become constants.
- auto union_val = C_zero_byte_arr(info.size as uint);
- auto val = C_struct(vec(C_int(i as int), union_val));
-
- // FIXME: better name
- auto gvar = llvm.LLVMAddGlobal(cx.llmod, val_ty(val),
- _str.buf("tag"));
- llvm.LLVMSetInitializer(gvar, val);
- llvm.LLVMSetGlobalConstant(gvar, True);
- llvm.LLVMSetLinkage(gvar,
- lib.llvm.LLVMPrivateLinkage
- as llvm.Linkage);
- cx.item_ids.insert(variant_info._0, gvar);
- }
- case (n_ary) {
- // N-ary tags are treated as functions and generated
- // later.
- }
- }
+ // FIXME: better name.
+ auto discrim_gvar = llvm.LLVMAddGlobal(cx.llmod, T_int(),
+ _str.buf("tag_discrim"));
+
+ // FIXME: Eventually we do want to export these, but we need
+ // to figure out what name they get first!
+ llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
+ llvm.LLVMSetGlobalConstant(discrim_gvar, True);
+ llvm.LLVMSetLinkage(discrim_gvar, lib.llvm.LLVMPrivateLinkage
+ as llvm.Linkage);
+
+ cx.discrims.insert(variant.id, discrim_gvar);
i += 1u;
}
@@ -4016,21 +5176,36 @@ fn trans_constants(@crate_ctxt cx, @ast.crate crate) {
fold.fold_crate[@crate_ctxt](cx, fld, crate);
}
+
+fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
+ ret cx.build.PtrToInt(v, T_int());
+}
+
+
+fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
+ ret cx.build.IntToPtr(v, t);
+}
+
fn p2i(ValueRef v) -> ValueRef {
ret llvm.LLVMConstPtrToInt(v, T_int());
}
+fn i2p(ValueRef v, TypeRef t) -> ValueRef {
+ ret llvm.LLVMConstIntToPtr(v, t);
+}
+
fn trans_exit_task_glue(@crate_ctxt cx) {
let vec[TypeRef] T_args = vec();
let vec[ValueRef] V_args = vec();
auto llfn = cx.glues.exit_task_glue;
- let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 3u);
auto fcx = @rec(llfn=llfn,
lltaskptr=lltaskptr,
- llclosure=C_null(T_opaque_closure_ptr()),
+ llenv=C_null(T_opaque_closure_ptr(cx.tn)),
+ llretptr=C_null(T_ptr(T_nil())),
mutable llself=none[ValueRef],
- mutable llretptr=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
llargs=new_def_hash[ValueRef](),
llobjfields=new_def_hash[ValueRef](),
lllocals=new_def_hash[ValueRef](),
@@ -4043,9 +5218,9 @@ fn trans_exit_task_glue(@crate_ctxt cx) {
}
fn create_typedefs(@crate_ctxt cx) {
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_crate"), T_crate());
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_task"), T_task());
- llvm.LLVMAddTypeName(cx.llmod, _str.buf("rust_tydesc"), T_tydesc());
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("crate"), T_crate(cx.tn));
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("task"), T_task(cx.tn));
+ llvm.LLVMAddTypeName(cx.llmod, _str.buf("tydesc"), T_tydesc(cx.tn));
}
fn create_crate_constant(@crate_ctxt cx) {
@@ -4075,12 +5250,37 @@ fn create_crate_constant(@crate_ctxt cx) {
exit_task_glue_off, // size_t main_exit_task_glue_off
C_null(T_int()), // int n_rust_syms
C_null(T_int()), // int n_c_syms
- C_null(T_int()) // int n_libs
+ C_null(T_int()), // int n_libs
+ C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
));
llvm.LLVMSetInitializer(cx.crate_ptr, crate_val);
}
+fn find_main_fn(@crate_ctxt cx) -> ValueRef {
+ auto e = sep() + "main";
+ let ValueRef v = C_nil();
+ let uint n = 0u;
+ for each (tup(str,ValueRef) i in cx.item_names.items()) {
+ if (_str.ends_with(i._0, e)) {
+ n += 1u;
+ v = i._1;
+ }
+ }
+ alt (n) {
+ case (0u) {
+ cx.sess.err("main fn not found");
+ }
+ case (1u) {
+ ret v;
+ }
+ case (_) {
+ cx.sess.err("multiple main fns found");
+ }
+ }
+ fail;
+}
+
fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
auto T_main_args = vec(T_int(), T_int());
auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int());
@@ -4100,8 +5300,7 @@ fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
auto llargc = llvm.LLVMGetParam(llmain, 0u);
auto llargv = llvm.LLVMGetParam(llmain, 1u);
- check (cx.item_names.contains_key("_rust.main"));
- auto llrust_main = cx.item_names.get("_rust.main");
+ auto llrust_main = find_main_fn(cx);
//
// Emit the moral equivalent of:
@@ -4131,6 +5330,24 @@ fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {
ret intrinsics;
}
+
+fn trace_str(@block_ctxt cx, str s) {
+ trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.ccx, s))));
+}
+
+fn trace_word(@block_ctxt cx, ValueRef v) {
+ trans_upcall(cx, "upcall_trace_word", vec(v));
+}
+
+fn trace_ptr(@block_ctxt cx, ValueRef v) {
+ trace_word(cx, cx.build.PtrToInt(v, T_int()));
+}
+
+fn trap(@block_ctxt bcx) {
+ let vec[ValueRef] v = vec();
+ bcx.build.Call(bcx.fcx.ccx.intrinsics.get("llvm.trap"), v);
+}
+
fn check_module(ModuleRef llmod) {
auto pm = mk_pass_manager();
llvm.LLVMAddVerifierPass(pm.llpm);
@@ -4139,8 +5356,8 @@ fn check_module(ModuleRef llmod) {
// TODO: run the linter here also, once there are llvm-c bindings for it.
}
-fn make_no_op_type_glue(ModuleRef llmod) -> ValueRef {
- auto ty = T_fn(vec(T_taskptr(), T_ptr(T_i8())), T_void());
+fn make_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
+ auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
auto fun = decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
auto bb_name = _str.buf("_rust_no_op_type_glue_bb");
auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
@@ -4234,9 +5451,197 @@ fn make_bzero_glue(ModuleRef llmod) -> ValueRef {
ret fun;
}
-fn make_glues(ModuleRef llmod) -> @glue_fns {
- ret @rec(activate_glue = decl_glue(llmod, abi.activate_glue_name()),
- yield_glue = decl_glue(llmod, abi.yield_glue_name()),
+fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
+ /*
+ * Args to vec_append_glue:
+ *
+ * 0. (Implicit) task ptr
+ *
+ * 1. Pointer to the tydesc of the vec, so that we can tell if it's gc
+ * mem, and have a tydesc to pass to malloc if we're allocating anew.
+ *
+ * 2. Pointer to the tydesc of the vec's stored element type, so that
+ * elements can be copied to a newly alloc'ed vec if one must be
+ * created.
+ *
+ * 3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
+ *
+ * 4. Src vec (i.e. ptr to rust_vec).
+ *
+ * 5. Flag indicating whether to skip trailing null on dst.
+ *
+ */
+
+ auto ty = T_fn(vec(T_taskptr(tn),
+ T_ptr(T_tydesc(tn)),
+ T_ptr(T_tydesc(tn)),
+ T_ptr(T_opaque_vec_ptr()),
+ T_opaque_vec_ptr(), T_bool()),
+ T_void());
+
+ auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
+ ret llfn;
+}
+
+fn trans_vec_append_glue(@crate_ctxt cx) {
+
+ auto llfn = cx.glues.vec_append_glue;
+
+ let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
+ let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
+ let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
+ let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
+ let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
+ let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
+
+ auto fcx = @rec(llfn=llfn,
+ lltaskptr=lltaskptr,
+ llenv=C_null(T_ptr(T_nil())),
+ llretptr=C_null(T_ptr(T_nil())),
+ mutable llself=none[ValueRef],
+ mutable lliterbody=none[ValueRef],
+ llargs=new_def_hash[ValueRef](),
+ llobjfields=new_def_hash[ValueRef](),
+ lllocals=new_def_hash[ValueRef](),
+ lltydescs=new_def_hash[ValueRef](),
+ ccx=cx);
+
+ auto bcx = new_top_block_ctxt(fcx);
+
+ auto lldst_vec = bcx.build.Load(lldst_vec_ptr);
+
+ // First the dst vec needs to grow to accommodate the src vec.
+ // To do this we have to figure out how many bytes to add.
+
+ fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ }
+
+ fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
+ ret bcx.build.Store(fill,
+ bcx.build.GEP(v,
+ vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ }
+
+ fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
+ ValueRef skipnull) -> ValueRef {
+ auto f = bcx.build.Load(bcx.build.GEP(v,
+ vec(C_int(0),
+ C_int(abi.vec_elt_fill))));
+ ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
+ }
+
+ fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ auto p = bcx.build.GEP(v, vec(C_int(0),
+ C_int(abi.vec_elt_data)));
+ ret bcx.build.PointerCast(p, T_ptr(T_i8()));
+ }
+
+
+ fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
+ auto len = vec_fill(bcx, v);
+ ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
+ }
+
+ fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
+ ValueRef skipnull) -> ValueRef {
+ auto len = vec_fill_adjusted(bcx, v, skipnull);
+ ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
+ }
+
+
+ auto llcopy_dst_ptr = bcx.build.Alloca(T_int());
+ auto llnew_vec_res =
+ trans_upcall(bcx, "upcall_vec_grow",
+ vec(vp2i(bcx, lldst_vec),
+ vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
+ vp2i(bcx, llcopy_dst_ptr),
+ vp2i(bcx, llvec_tydesc)));
+
+ bcx = llnew_vec_res.bcx;
+ auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
+ T_opaque_vec_ptr());
+
+ put_vec_fill(bcx, llnew_vec, C_int(0));
+
+ auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
+ auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
+
+ auto pp0 = bcx.build.Alloca(T_ptr(T_i8()));
+ bcx.build.Store(vec_p0(bcx, llnew_vec), pp0);
+
+ bcx.build.CondBr(bcx.build.TruncOrBitCast
+ (bcx.build.Load(llcopy_dst_ptr),
+ T_i1()),
+ copy_dst_cx.llbb,
+ copy_src_cx.llbb);
+
+
+ fn copy_elts(@block_ctxt cx,
+ ValueRef elt_tydesc,
+ ValueRef dst,
+ ValueRef src,
+ ValueRef n_bytes) -> result {
+
+ auto src_lim = cx.build.GEP(src, vec(n_bytes));
+
+ auto elt_llsz =
+ cx.build.Load(cx.build.GEP(elt_tydesc,
+ vec(C_int(0),
+ C_int(abi.tydesc_field_size))));
+
+ fn take_one(ValueRef elt_tydesc,
+ @block_ctxt cx, ValueRef v) -> result {
+ call_tydesc_glue_full(cx, v,
+ elt_tydesc,
+ abi.tydesc_field_take_glue_off);
+ ret res(cx, v);
+ }
+
+ auto bcx = iter_sequence_raw(cx, src, src_lim,
+ elt_llsz, bind take_one(elt_tydesc,
+ _, _)).bcx;
+
+ ret call_memcpy(bcx, dst, src, n_bytes);
+ }
+
+ // Copy any dst elements in, omitting null if doing str.
+ auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
+ copy_dst_cx = copy_elts(copy_dst_cx,
+ llelt_tydesc,
+ copy_dst_cx.build.Load(pp0),
+ vec_p0(copy_dst_cx, lldst_vec),
+ n_bytes).bcx;
+
+ put_vec_fill(copy_dst_cx, llnew_vec, n_bytes);
+ copy_dst_cx.build.Store(vec_p1(copy_dst_cx, llnew_vec), pp0);
+ copy_dst_cx.build.Br(copy_src_cx.llbb);
+
+
+ // Copy any src elements in, carrying along null if doing str.
+ n_bytes = vec_fill(copy_src_cx, llsrc_vec);
+ copy_src_cx = copy_elts(copy_src_cx,
+ llelt_tydesc,
+ copy_src_cx.build.Load(pp0),
+ vec_p0(copy_src_cx, llsrc_vec),
+ n_bytes).bcx;
+
+ put_vec_fill(copy_src_cx, llnew_vec,
+ copy_src_cx.build.Add(vec_fill(copy_src_cx,
+ llnew_vec),
+ n_bytes));
+
+ // Write new_vec back through the alias we were given.
+ copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
+ copy_src_cx.build.RetVoid();
+}
+
+
+fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
+ ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
+ yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
/*
* Note: the signature passed to decl_cdecl_fn here looks unusual
* because it is. It corresponds neither to an upcall signature
@@ -4248,14 +5653,19 @@ fn make_glues(ModuleRef llmod) -> @glue_fns {
* this is the signature required to retrieve it.
*/
exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
- T_fn(vec(T_taskptr()), T_void())),
+ T_fn(vec(T_int(),
+ T_int(),
+ T_int(),
+ T_taskptr(tn)),
+ T_void())),
upcall_glues =
- _vec.init_fn[ValueRef](bind decl_upcall(llmod, _),
+ _vec.init_fn[ValueRef](bind decl_upcall_glue(llmod, tn, _),
abi.n_upcall_glues as uint),
- no_op_type_glue = make_no_op_type_glue(llmod),
+ no_op_type_glue = make_no_op_type_glue(llmod, tn),
memcpy_glue = make_memcpy_glue(llmod),
- bzero_glue = make_bzero_glue(llmod));
+ bzero_glue = make_bzero_glue(llmod),
+ vec_append_glue = make_vec_append_glue(llmod, tn));
}
fn trans_crate(session.session sess, @ast.crate crate, str output,
@@ -4267,33 +5677,40 @@ fn trans_crate(session.session sess, @ast.crate crate, str output,
llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
auto td = mk_target_data(x86.get_data_layout());
+ auto tn = mk_type_names();
let ValueRef crate_ptr =
- llvm.LLVMAddGlobal(llmod, T_crate(), _str.buf("rust_crate"));
+ llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));
llvm.LLVMSetModuleInlineAsm(llmod, _str.buf(x86.get_module_asm()));
auto intrinsics = declare_intrinsics(llmod);
- auto glues = make_glues(llmod);
+ auto glues = make_glues(llmod, tn);
auto hasher = ty.hash_ty;
auto eqer = ty.eq_ty;
- auto tydescs = map.mk_hashmap[@ty.t,ValueRef](hasher, eqer);
+ auto tag_sizes = map.mk_hashmap[@ty.t,uint](hasher, eqer);
+ auto tydescs = map.mk_hashmap[@ty.t,@tydesc_info](hasher, eqer);
+ let vec[ast.ty_param] obj_typarams = vec();
let vec[ast.obj_field] obj_fields = vec();
auto cx = @rec(sess = sess,
llmod = llmod,
td = td,
+ tn = tn,
crate_ptr = crate_ptr,
upcalls = new_str_hash[ValueRef](),
intrinsics = intrinsics,
item_names = new_str_hash[ValueRef](),
item_ids = new_def_hash[ValueRef](),
items = new_def_hash[@ast.item](),
- tags = new_def_hash[@tag_info](),
+ native_items = new_def_hash[@ast.native_item](),
+ tag_sizes = tag_sizes,
+ discrims = new_def_hash[ValueRef](),
fn_pairs = new_def_hash[ValueRef](),
consts = new_def_hash[ValueRef](),
obj_methods = new_def_hash[()](),
tydescs = tydescs,
+ obj_typarams = obj_typarams,
obj_fields = obj_fields,
glues = glues,
names = namegen(0),
@@ -4302,12 +5719,12 @@ fn trans_crate(session.session sess, @ast.crate crate, str output,
create_typedefs(cx);
collect_items(cx, crate);
- resolve_tag_types(cx, crate);
collect_tag_ctors(cx, crate);
trans_constants(cx, crate);
trans_mod(cx, crate.node.module);
trans_exit_task_glue(cx);
+ trans_vec_append_glue(cx);
create_crate_constant(cx);
if (!shared) {
trans_main_fn(cx, cx.crate_ptr);
diff --git a/src/comp/middle/ty.rs b/src/comp/middle/ty.rs
index f27595a1..5a595db6 100644
--- a/src/comp/middle/ty.rs
+++ b/src/comp/middle/ty.rs
@@ -19,7 +19,10 @@ import util.common.span;
type arg = rec(ast.mode mode, @t ty);
type field = rec(ast.ident ident, @t ty);
-type method = rec(ast.ident ident, vec[arg] inputs, @t output);
+type method = rec(ast.proto proto,
+ ast.ident ident,
+ vec[arg] inputs,
+ @t output);
// NB: If you change this, you'll probably want to change the corresponding
// AST structure in front/ast.rs as well.
@@ -32,16 +35,19 @@ tag sty {
ty_machine(util.common.ty_mach);
ty_char;
ty_str;
- ty_tag(ast.def_id);
+ ty_tag(ast.def_id, vec[@t]);
ty_box(@t);
ty_vec(@t);
ty_tup(vec[@t]);
ty_rec(vec[field]);
- ty_fn(vec[arg], @t); // TODO: effect
+ ty_fn(ast.proto, vec[arg], @t); // TODO: effect
+ ty_native_fn(ast.native_abi, vec[arg], @t); // TODO: effect
ty_obj(vec[method]);
ty_var(int); // ephemeral type var
ty_local(ast.def_id); // type of a local var
- ty_param(ast.def_id); // fn type param
+ ty_param(ast.def_id); // fn/tag type param
+ ty_type;
+ ty_native;
// TODO: ty_fn_arg(@t), for a possibly-aliased function argument
}
@@ -103,6 +109,7 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
case (ast.ty_str) { s = "str"; }
case (ast.ty_box(?t)) { s = "@" + ast_ty_to_str(t); }
case (ast.ty_vec(?t)) { s = "vec[" + ast_ty_to_str(t) + "]"; }
+ case (ast.ty_type) { s = "type"; }
case (ast.ty_tup(?elts)) {
auto f = ast_ty_to_str;
@@ -118,9 +125,13 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
s += ")";
}
- case (ast.ty_fn(?inputs, ?output)) {
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
auto f = ast_fn_input_to_str;
- s = "fn(";
+ if (proto == ast.proto_fn) {
+ s = "fn(";
+ } else {
+ s = "iter(";
+ }
auto is = _vec.map[rec(ast.mode mode, @ast.ty ty),str](f, inputs);
s += _str.connect(is, ", ");
s += ")";
@@ -138,6 +149,7 @@ fn ast_ty_to_str(&@ast.ty ty) -> str {
s = "mutable " + ast_ty_to_str(t);
}
+
case (_) {
fail; // FIXME: typestate bug
}
@@ -157,6 +169,8 @@ fn path_to_str(&ast.path pth) -> str {
ret result;
}
+// FIXME use the pretty-printer for this once it has a concept of an
+// abstract stream
fn ty_to_str(&@t typ) -> str {
fn fn_input_to_str(&rec(ast.mode mode, @t ty) input) -> str {
@@ -170,10 +184,14 @@ fn ty_to_str(&@t typ) -> str {
ret s + ty_to_str(input.ty);
}
- fn fn_to_str(option.t[ast.ident] ident,
+ fn fn_to_str(ast.proto proto,
+ option.t[ast.ident] ident,
vec[arg] inputs, @t output) -> str {
auto f = fn_input_to_str;
auto s = "fn";
+ if (proto == ast.proto_iter) {
+ s = "iter";
+ }
alt (ident) {
case (some[ast.ident](?i)) {
s += " ";
@@ -193,7 +211,8 @@ fn ty_to_str(&@t typ) -> str {
}
fn method_to_str(&method m) -> str {
- ret fn_to_str(some[ast.ident](m.ident), m.inputs, m.output) + ";";
+ ret fn_to_str(m.proto, some[ast.ident](m.ident),
+ m.inputs, m.output) + ";";
}
fn field_to_str(&field f) -> str {
@@ -206,6 +225,7 @@ fn ty_to_str(&@t typ) -> str {
}
alt (typ.struct) {
+ case (ty_native) { s = "native"; }
case (ty_nil) { s = "()"; }
case (ty_bool) { s = "bool"; }
case (ty_int) { s = "int"; }
@@ -215,6 +235,7 @@ fn ty_to_str(&@t typ) -> str {
case (ty_str) { s = "str"; }
case (ty_box(?t)) { s = "@" + ty_to_str(t); }
case (ty_vec(?t)) { s = "vec[" + ty_to_str(t) + "]"; }
+ case (ty_type) { s = "type"; }
case (ty_tup(?elems)) {
auto f = ty_to_str;
@@ -228,13 +249,23 @@ fn ty_to_str(&@t typ) -> str {
s = "rec(" + _str.connect(strs, ",") + ")";
}
- case (ty_tag(_)) {
+ case (ty_tag(?id, ?tps)) {
// The user should never see this if the cname is set properly!
- s = "<tag>";
+ s = "<tag#" + util.common.istr(id._0) + ":" +
+ util.common.istr(id._1) + ">";
+ if (_vec.len[@t](tps) > 0u) {
+ auto f = ty_to_str;
+ auto strs = _vec.map[@t,str](f, tps);
+ s += "[" + _str.connect(strs, ",") + "]";
+ }
+ }
+
+ case (ty_fn(?proto, ?inputs, ?output)) {
+ s = fn_to_str(proto, none[ast.ident], inputs, output);
}
- case (ty_fn(?inputs, ?output)) {
- s = fn_to_str(none[ast.ident], inputs, output);
+ case (ty_native_fn(_, ?inputs, ?output)) {
+ s = fn_to_str(ast.proto_fn, none[ast.ident], inputs, output);
}
case (ty_obj(?meths)) {
@@ -280,13 +311,21 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
case (ty_machine(_)) { ret fld.fold_simple_ty(ty); }
case (ty_char) { ret fld.fold_simple_ty(ty); }
case (ty_str) { ret fld.fold_simple_ty(ty); }
- case (ty_tag(_)) { ret fld.fold_simple_ty(ty); }
+ case (ty_type) { ret fld.fold_simple_ty(ty); }
+ case (ty_native) { ret fld.fold_simple_ty(ty); }
case (ty_box(?subty)) {
ret rewrap(ty, ty_box(fold_ty(fld, subty)));
}
case (ty_vec(?subty)) {
ret rewrap(ty, ty_vec(fold_ty(fld, subty)));
}
+ case (ty_tag(?tid, ?subtys)) {
+ let vec[@t] new_subtys = vec();
+ for (@t subty in subtys) {
+ new_subtys += vec(fold_ty(fld, subty));
+ }
+ ret rewrap(ty, ty_tag(tid, new_subtys));
+ }
case (ty_tup(?subtys)) {
let vec[@t] new_subtys = vec();
for (@t subty in subtys) {
@@ -302,13 +341,21 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
}
ret rewrap(ty, ty_rec(new_fields));
}
- case (ty_fn(?args, ?ret_ty)) {
+ case (ty_fn(?proto, ?args, ?ret_ty)) {
let vec[arg] new_args = vec();
for (arg a in args) {
auto new_ty = fold_ty(fld, a.ty);
new_args += vec(rec(mode=a.mode, ty=new_ty));
}
- ret rewrap(ty, ty_fn(new_args, fold_ty(fld, ret_ty)));
+ ret rewrap(ty, ty_fn(proto, new_args, fold_ty(fld, ret_ty)));
+ }
+ case (ty_native_fn(?abi, ?args, ?ret_ty)) {
+ let vec[arg] new_args = vec();
+ for (arg a in args) {
+ auto new_ty = fold_ty(fld, a.ty);
+ new_args += vec(rec(mode=a.mode, ty=new_ty));
+ }
+ ret rewrap(ty, ty_native_fn(abi, new_args, fold_ty(fld, ret_ty)));
}
case (ty_obj(?methods)) {
let vec[method] new_methods = vec();
@@ -317,7 +364,8 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
for (arg a in m.inputs) {
new_args += vec(rec(mode=a.mode, ty=fold_ty(fld, a.ty)));
}
- new_methods += vec(rec(ident=m.ident, inputs=new_args,
+ new_methods += vec(rec(proto=m.proto, ident=m.ident,
+ inputs=new_args,
output=fold_ty(fld, m.output)));
}
ret rewrap(ty, ty_obj(new_methods));
@@ -327,7 +375,7 @@ fn fold_ty(ty_fold fld, @t ty) -> @t {
case (ty_param(_)) { ret fld.fold_simple_ty(ty); }
}
- ret ty;
+ fail;
}
// Type utilities
@@ -349,24 +397,44 @@ fn type_is_nil(@t ty) -> bool {
fail;
}
+
fn type_is_structural(@t ty) -> bool {
alt (ty.struct) {
- case (ty_tup(_)) { ret true; }
- case (ty_rec(_)) { ret true; }
- case (ty_tag(_)) { ret true; }
- case (ty_fn(_,_)) { ret true; }
- case (ty_obj(_)) { ret true; }
- case (_) { ret false; }
+ case (ty_tup(_)) { ret true; }
+ case (ty_rec(_)) { ret true; }
+ case (ty_tag(_,_)) { ret true; }
+ case (ty_fn(_,_,_)) { ret true; }
+ case (ty_obj(_)) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+fn type_is_sequence(@t ty) -> bool {
+ alt (ty.struct) {
+ case (ty_str) { ret true; }
+ case (ty_vec(_)) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+fn sequence_element_type(@t ty) -> @t {
+ alt (ty.struct) {
+ case (ty_str) { ret plain_ty(ty_machine(common.ty_u8)); }
+ case (ty_vec(?e)) { ret e; }
}
fail;
}
+
fn type_is_tup_like(@t ty) -> bool {
alt (ty.struct) {
- case (ty_tup(_)) { ret true; }
- case (ty_rec(_)) { ret true; }
- case (ty_tag(_)) { ret true; }
- case (_) { ret false; }
+ case (ty_box(_)) { ret true; }
+ case (ty_tup(_)) { ret true; }
+ case (ty_rec(_)) { ret true; }
+ case (ty_tag(_,_)) { ret true; }
+ case (_) { ret false; }
}
fail;
}
@@ -402,6 +470,17 @@ fn type_is_scalar(@t ty) -> bool {
case (ty_uint) { ret true; }
case (ty_machine(_)) { ret true; }
case (ty_char) { ret true; }
+ case (ty_type) { ret true; }
+ case (_) { ret false; }
+ }
+ fail;
+}
+
+// FIXME: should we just return true for native types in
+// type_is_scalar?
+fn type_is_native(@t ty) -> bool {
+ alt (ty.struct) {
+ case (ty_native) { ret true; }
case (_) { ret false; }
}
fail;
@@ -423,6 +502,13 @@ fn type_has_dynamic_size(@t ty) -> bool {
i += 1u;
}
}
+ case (ty_tag(_, ?subtys)) {
+ auto i = 0u;
+ while (i < _vec.len[@t](subtys)) {
+ if (type_has_dynamic_size(subtys.(i))) { ret true; }
+ i += 1u;
+ }
+ }
case (ty_param(_)) { ret true; }
case (_) { /* fall through */ }
}
@@ -547,23 +633,42 @@ fn count_ty_params(@t ty) -> uint {
// Type accessors for substructures of types
fn ty_fn_args(@t fty) -> vec[arg] {
- alt (fty.struct) {
- case (ty.ty_fn(?a, _)) { ret a; }
- }
+ alt (fty.struct) {
+ case (ty.ty_fn(_, ?a, _)) { ret a; }
+ case (ty.ty_native_fn(_, ?a, _)) { ret a; }
+ }
+ fail;
+}
+
+fn ty_fn_proto(@t fty) -> ast.proto {
+ alt (fty.struct) {
+ case (ty.ty_fn(?p, _, _)) { ret p; }
+ }
+ fail;
+}
+
+fn ty_fn_abi(@t fty) -> ast.native_abi {
+ alt (fty.struct) {
+ case (ty.ty_native_fn(?a, _, _)) { ret a; }
+ }
+ fail;
}
fn ty_fn_ret(@t fty) -> @t {
- alt (fty.struct) {
- case (ty.ty_fn(_, ?r)) { ret r; }
- }
+ alt (fty.struct) {
+ case (ty.ty_fn(_, _, ?r)) { ret r; }
+ case (ty.ty_native_fn(_, _, ?r)) { ret r; }
+ }
+ fail;
}
fn is_fn_ty(@t fty) -> bool {
- alt (fty.struct) {
- case (ty.ty_fn(_, _)) { ret true; }
- case (_) { ret false; }
- }
- ret false;
+ alt (fty.struct) {
+ case (ty.ty_fn(_, _, _)) { ret true; }
+ case (ty.ty_native_fn(_, _, _)) { ret true; }
+ case (_) { ret false; }
+ }
+ ret false;
}
@@ -571,7 +676,24 @@ fn is_fn_ty(@t fty) -> bool {
// Given an item, returns the associated type as well as a list of the IDs of
// its type parameters.
-fn item_ty(@ast.item it) -> tup(vec[ast.def_id], @t) {
+type ty_params_and_ty = tup(vec[ast.def_id], @t);
+fn native_item_ty(@ast.native_item it) -> ty_params_and_ty {
+ auto ty_params;
+ auto result_ty;
+ alt (it.node) {
+ case (ast.native_item_fn(_, _, ?tps, _, ?ann)) {
+ ty_params = tps;
+ result_ty = ann_to_type(ann);
+ }
+ }
+ let vec[ast.def_id] ty_param_ids = vec();
+ for (ast.ty_param tp in ty_params) {
+ ty_param_ids += vec(tp.id);
+ }
+ ret tup(ty_param_ids, result_ty);
+}
+
+fn item_ty(@ast.item it) -> ty_params_and_ty {
let vec[ast.ty_param] ty_params;
auto result_ty;
alt (it.node) {
@@ -591,8 +713,13 @@ fn item_ty(@ast.item it) -> tup(vec[ast.def_id], @t) {
result_ty = ann_to_type(ann);
}
case (ast.item_tag(_, _, ?tps, ?did)) {
+ // Create a new generic polytype.
ty_params = tps;
- result_ty = plain_ty(ty_tag(did));
+ let vec[@t] subtys = vec();
+ for (ast.ty_param tp in tps) {
+ subtys += vec(plain_ty(ty_param(tp.id)));
+ }
+ result_ty = plain_ty(ty_tag(did, subtys));
}
case (ast.item_obj(_, _, ?tps, _, ?ann)) {
ty_params = tps;
@@ -628,6 +755,7 @@ fn block_ty(&ast.block b) -> @t {
fn pat_ty(@ast.pat pat) -> @t {
alt (pat.node) {
case (ast.pat_wild(?ann)) { ret ann_to_type(ann); }
+ case (ast.pat_lit(_, ?ann)) { ret ann_to_type(ann); }
case (ast.pat_bind(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.pat_tag(_, _, _, ?ann)) { ret ann_to_type(ann); }
}
@@ -638,7 +766,7 @@ fn expr_ty(@ast.expr expr) -> @t {
alt (expr.node) {
case (ast.expr_vec(_, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_tup(_, ?ann)) { ret ann_to_type(ann); }
- case (ast.expr_rec(_, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_rec(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_bind(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_call(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_binary(_, _, _, ?ann)) { ret ann_to_type(ann); }
@@ -647,6 +775,8 @@ fn expr_ty(@ast.expr expr) -> @t {
case (ast.expr_cast(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_if(_, _, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_for(_, _, _, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_for_each(_, _, _, ?ann))
+ { ret ann_to_type(ann); }
case (ast.expr_while(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_do_while(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_alt(_, _, ?ann)) { ret ann_to_type(ann); }
@@ -657,6 +787,14 @@ fn expr_ty(@ast.expr expr) -> @t {
case (ast.expr_field(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_index(_, _, ?ann)) { ret ann_to_type(ann); }
case (ast.expr_path(_, _, ?ann)) { ret ann_to_type(ann); }
+ case (ast.expr_ext(_, _, _, _, ?ann)) { ret ann_to_type(ann); }
+
+ case (ast.expr_fail) { ret plain_ty(ty_nil); }
+ case (ast.expr_log(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_check_expr(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_ret(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_put(_)) { ret plain_ty(ty_nil); }
+ case (ast.expr_be(_)) { ret plain_ty(ty_nil); }
}
fail;
}
@@ -726,7 +864,10 @@ fn is_lval(@ast.expr expr) -> bool {
}
}
-// Type unification
+// Type unification via Robinson's algorithm (Robinson 1965). Implemented as
+// described in Hoder and Voronkov:
+//
+// http://www.cs.man.ac.uk/~hoderk/ubench/unification_full.pdf
fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
-> unify_result {
@@ -746,81 +887,137 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_mismatch, expected, actual);
}
- fn unify_fn(&hashmap[int,@ty.t] bindings,
- @ty.t expected,
- @ty.t actual,
- &unify_handler handler,
- vec[arg] expected_inputs, @t expected_output,
- vec[arg] actual_inputs, @t actual_output)
- -> unify_result {
- auto expected_len = _vec.len[arg](expected_inputs);
- auto actual_len = _vec.len[arg](actual_inputs);
- if (expected_len != actual_len) {
- ret ures_err(terr_arg_count, expected, actual);
- }
+ tag fn_common_res {
+ fn_common_res_err(unify_result);
+ fn_common_res_ok(vec[arg], @t);
+ }
- // TODO: as above, we should have an iter2 iterator.
- let vec[arg] result_ins = vec();
- auto i = 0u;
- while (i < expected_len) {
- auto expected_input = expected_inputs.(i);
- auto actual_input = actual_inputs.(i);
-
- // This should be safe, I think?
- auto result_mode;
- if (mode_is_alias(expected_input.mode) ||
- mode_is_alias(actual_input.mode)) {
- result_mode = ast.alias;
- } else {
- result_mode = ast.val;
+ fn unify_fn_common(@hashmap[int,@ty.t] bindings,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> fn_common_res {
+ auto expected_len = _vec.len[arg](expected_inputs);
+ auto actual_len = _vec.len[arg](actual_inputs);
+ if (expected_len != actual_len) {
+ ret fn_common_res_err(ures_err(terr_arg_count,
+ expected, actual));
+ }
+
+ // TODO: as above, we should have an iter2 iterator.
+ let vec[arg] result_ins = vec();
+ auto i = 0u;
+ while (i < expected_len) {
+ auto expected_input = expected_inputs.(i);
+ auto actual_input = actual_inputs.(i);
+
+ // This should be safe, I think?
+ auto result_mode;
+ if (mode_is_alias(expected_input.mode) ||
+ mode_is_alias(actual_input.mode)) {
+ result_mode = ast.alias;
+ } else {
+ result_mode = ast.val;
+ }
+
+ auto result = unify_step(bindings,
+ actual_input.ty,
+ expected_input.ty,
+ handler);
+
+ alt (result) {
+ case (ures_ok(?rty)) {
+ result_ins += vec(rec(mode=result_mode,
+ ty=rty));
+ }
+
+ case (_) {
+ ret fn_common_res_err(result);
+ }
+ }
+
+ i += 1u;
}
+ // Check the output.
auto result = unify_step(bindings,
- actual_input.ty,
- expected_input.ty,
+ expected_output,
+ actual_output,
handler);
-
alt (result) {
- case (ures_ok(?rty)) {
- result_ins += vec(rec(mode=result_mode,
- ty=rty));
- }
+ case (ures_ok(?rty)) {
+ ret fn_common_res_ok(result_ins, rty);
+ }
- case (_) {
- ret result;
- }
+ case (_) {
+ ret fn_common_res_err(result);
+ }
}
+ }
- i += 1u;
- }
+ fn unify_fn(@hashmap[int,@ty.t] bindings,
+ ast.proto e_proto,
+ ast.proto a_proto,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> unify_result {
- // Check the output.
- auto result_out;
- auto result = unify_step(bindings,
- expected_output,
- actual_output,
- handler);
- alt (result) {
- case (ures_ok(?rty)) {
- result_out = rty;
+ if (e_proto != a_proto) {
+ ret ures_err(terr_mismatch, expected, actual);
}
-
- case (_) {
- ret result;
+ auto t = unify_fn_common(bindings, expected, actual,
+ handler, expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ alt (t) {
+ case (fn_common_res_err(?r)) {
+ ret r;
+ }
+ case (fn_common_res_ok(?result_ins, ?result_out)) {
+ auto t2 = plain_ty(ty.ty_fn(e_proto, result_ins, result_out));
+ ret ures_ok(t2);
+ }
}
- }
+ }
- auto t = plain_ty(ty.ty_fn(result_ins, result_out));
- ret ures_ok(t);
+ fn unify_native_fn(@hashmap[int,@ty.t] bindings,
+ ast.native_abi e_abi,
+ ast.native_abi a_abi,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[arg] expected_inputs, @t expected_output,
+ vec[arg] actual_inputs, @t actual_output)
+ -> unify_result {
+ if (e_abi != a_abi) {
+ ret ures_err(terr_mismatch, expected, actual);
+ }
+ auto t = unify_fn_common(bindings, expected, actual,
+ handler, expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ alt (t) {
+ case (fn_common_res_err(?r)) {
+ ret r;
+ }
+ case (fn_common_res_ok(?result_ins, ?result_out)) {
+ auto t2 = plain_ty(ty.ty_native_fn(e_abi, result_ins,
+ result_out));
+ ret ures_ok(t2);
+ }
+ }
}
- fn unify_obj(&hashmap[int,@ty.t] bindings,
- @ty.t expected,
- @ty.t actual,
- &unify_handler handler,
- vec[method] expected_meths,
- vec[method] actual_meths) -> unify_result {
+ fn unify_obj(@hashmap[int,@ty.t] bindings,
+ @ty.t expected,
+ @ty.t actual,
+ &unify_handler handler,
+ vec[method] expected_meths,
+ vec[method] actual_meths) -> unify_result {
let vec[method] result_meths = vec();
let uint i = 0u;
let uint expected_len = _vec.len[method](expected_meths);
@@ -830,32 +1027,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_meth_count, expected, actual);
}
- // FIXME: work around buggy typestate logic for 'alt', sigh.
- fn is_ok(&unify_result r) -> bool {
- alt (r) {
- case (ures_ok(?tfn)) {
- ret true;
- }
- case (_) {}
- }
- ret false;
- }
-
- fn append_if_ok(&method e_meth,
- &unify_result r, &mutable vec[method] result_meths) {
- alt (r) {
- case (ures_ok(?tfn)) {
- alt (tfn.struct) {
- case (ty_fn(?ins, ?out)) {
- result_meths += vec(rec(inputs = ins,
- output = out
- with e_meth));
- }
- }
- }
- }
- }
-
while (i < expected_len) {
auto e_meth = expected_meths.(i);
auto a_meth = actual_meths.(i);
@@ -863,40 +1034,69 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_err(terr_obj_meths(e_meth.ident, a_meth.ident),
expected, actual);
}
- auto r = unify_fn(bindings, expected, actual, handler,
+ auto r = unify_fn(bindings,
+ e_meth.proto, a_meth.proto,
+ expected, actual, handler,
e_meth.inputs, e_meth.output,
a_meth.inputs, a_meth.output);
- if (!is_ok(r)) {
- ret r;
+ alt (r) {
+ case (ures_ok(?tfn)) {
+ alt (tfn.struct) {
+ case (ty_fn(?proto, ?ins, ?out)) {
+ result_meths += vec(rec(inputs = ins,
+ output = out
+ with e_meth));
+ }
+ }
+ }
+ case (_) {
+ ret r;
+ }
}
- append_if_ok(e_meth, r, result_meths);
i += 1u;
}
auto t = plain_ty(ty_obj(result_meths));
ret ures_ok(t);
}
- fn unify_step(&hashmap[int,@ty.t] bindings, @ty.t expected, @ty.t actual,
- &unify_handler handler) -> unify_result {
+ fn resolve(@hashmap[int,@t] bindings, @t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_var(?id)) {
+ alt (bindings.find(id)) {
+ case (some[@t](?typ2)) {
+ ret resolve(bindings, typ2);
+ }
+ case (none[@t]) {
+ // fall through
+ }
+ }
+ }
+ case (_) {
+ // fall through
+ }
+ }
+ ret typ;
+ }
+
+ fn unify_step(@hashmap[int,@ty.t] bindings, @ty.t in_expected,
+ @ty.t in_actual, &unify_handler handler) -> unify_result {
+
+ // Resolve any bindings.
+ auto expected = resolve(bindings, in_expected);
+ auto actual = resolve(bindings, in_actual);
+
// TODO: rewrite this using tuple pattern matching when available, to
// avoid all this rightward drift and spikiness.
+ // TODO: occurs check, to make sure we don't loop forever when
+ // unifying e.g. 'a and option['a]
+
alt (actual.struct) {
// If the RHS is a variable type, then just do the appropriate
// binding.
case (ty.ty_var(?actual_id)) {
- alt (bindings.find(actual_id)) {
- case (some[@ty.t](?actual_ty)) {
- // FIXME: change the binding here?
- // FIXME: "be"
- ret unify_step(bindings, expected, actual_ty,
- handler);
- }
- case (none[@ty.t]) {
- bindings.insert(actual_id, expected);
- ret ures_ok(expected);
- }
- }
+ bindings.insert(actual_id, expected);
+ ret ures_ok(expected);
}
case (ty.ty_local(?actual_id)) {
auto actual_ty = handler.resolve_local(actual_id);
@@ -938,14 +1138,45 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
case (ty.ty_machine(_)) { ret struct_cmp(expected, actual); }
case (ty.ty_char) { ret struct_cmp(expected, actual); }
case (ty.ty_str) { ret struct_cmp(expected, actual); }
+ case (ty.ty_type) { ret struct_cmp(expected, actual); }
+ case (ty.ty_native) { ret struct_cmp(expected, actual); }
- case (ty.ty_tag(?expected_id)) {
+ case (ty.ty_tag(?expected_id, ?expected_tps)) {
alt (actual.struct) {
- case (ty.ty_tag(?actual_id)) {
- if (expected_id._0 == actual_id._0 &&
- expected_id._1 == actual_id._1) {
- ret ures_ok(expected);
+ case (ty.ty_tag(?actual_id, ?actual_tps)) {
+ if (expected_id._0 != actual_id._0 ||
+ expected_id._1 != actual_id._1) {
+ ret ures_err(terr_mismatch, expected, actual);
}
+
+ // TODO: factor this cruft out, see the TODO in the
+ // ty.ty_tup case
+ let vec[@ty.t] result_tps = vec();
+ auto i = 0u;
+ auto expected_len = _vec.len[@ty.t](expected_tps);
+ while (i < expected_len) {
+ auto expected_tp = expected_tps.(i);
+ auto actual_tp = actual_tps.(i);
+
+ auto result = unify_step(bindings,
+ expected_tp,
+ actual_tp,
+ handler);
+
+ alt (result) {
+ case (ures_ok(?rty)) {
+ append[@ty.t](result_tps, rty);
+ }
+ case (_) {
+ ret result;
+ }
+ }
+
+ i += 1u;
+ }
+
+ ret ures_ok(plain_ty(ty.ty_tag(expected_id,
+ result_tps)));
}
case (_) { /* fall through */ }
}
@@ -970,8 +1201,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -995,8 +1224,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -1045,8 +1272,6 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_ok(plain_ty(ty.ty_tup(result_elems)));
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
@@ -1106,20 +1331,19 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
ret ures_ok(plain_ty(ty.ty_rec(result_fields)));
}
- // TODO: ty_var
-
case (_) {
ret ures_err(terr_mismatch, expected, actual);
}
}
}
- case (ty.ty_fn(?expected_inputs, ?expected_output)) {
+ case (ty.ty_fn(?ep, ?expected_inputs, ?expected_output)) {
alt (actual.struct) {
- case (ty.ty_fn(?actual_inputs, ?actual_output)) {
- ret unify_fn(bindings, expected, actual, handler,
- expected_inputs, expected_output,
- actual_inputs, actual_output);
+ case (ty.ty_fn(?ap, ?actual_inputs, ?actual_output)) {
+ ret unify_fn(bindings, ep, ap,
+ expected, actual, handler,
+ expected_inputs, expected_output,
+ actual_inputs, actual_output);
}
case (_) {
@@ -1128,35 +1352,40 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
}
}
- case (ty.ty_obj(?expected_meths)) {
- alt (actual.struct) {
- case (ty.ty_obj(?actual_meths)) {
- ret unify_obj(bindings, expected, actual, handler,
- expected_meths, actual_meths);
- }
- case (_) {
- ret ures_err(terr_mismatch, expected, actual);
+ case (ty.ty_native_fn(?e_abi, ?expected_inputs,
+ ?expected_output)) {
+ alt (actual.struct) {
+ case (ty.ty_native_fn(?a_abi, ?actual_inputs,
+ ?actual_output)) {
+ ret unify_native_fn(bindings, e_abi, a_abi,
+ expected, actual, handler,
+ expected_inputs, expected_output,
+ actual_inputs, actual_output);
+ }
+ case (_) {
+ ret ures_err(terr_mismatch, expected, actual);
+ }
}
- }
}
- case (ty.ty_var(?expected_id)) {
- alt (bindings.find(expected_id)) {
- case (some[@ty.t](?expected_ty)) {
- // FIXME: change the binding here?
- // FIXME: "be"
- ret unify_step(bindings,
- expected_ty,
- actual,
- handler);
+ case (ty.ty_obj(?expected_meths)) {
+ alt (actual.struct) {
+ case (ty.ty_obj(?actual_meths)) {
+ ret unify_obj(bindings, expected, actual, handler,
+ expected_meths, actual_meths);
}
- case (none[@ty.t]) {
- bindings.insert(expected_id, actual);
- ret ures_ok(actual);
+ case (_) {
+ ret ures_err(terr_mismatch, expected, actual);
}
}
}
+ case (ty.ty_var(?expected_id)) {
+ // Add a binding.
+ bindings.insert(expected_id, actual);
+ ret ures_ok(actual);
+ }
+
case (ty.ty_local(?expected_id)) {
auto expected_ty = handler.resolve_local(expected_id);
auto result = unify_step(bindings,
@@ -1182,13 +1411,43 @@ fn unify(@ty.t expected, @ty.t actual, &unify_handler handler)
fail;
}
+ // Performs type binding substitution.
+ fn substitute(@hashmap[int,@t] bindings, @t typ) -> @t {
+ state obj folder(@hashmap[int,@t] bindings) {
+ fn fold_simple_ty(@t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_var(?id)) {
+ alt (bindings.find(id)) {
+ case (some[@t](?typ2)) {
+ ret substitute(bindings, typ2);
+ }
+ case (none[@t]) {
+ ret typ;
+ }
+ }
+ }
+ case (_) {
+ ret typ;
+ }
+ }
+ }
+ }
+
+ ret ty.fold_ty(folder(bindings), typ);
+ }
+
fn hash_int(&int x) -> uint { ret x as uint; }
fn eq_int(&int a, &int b) -> bool { ret a == b; }
auto hasher = hash_int;
auto eqer = eq_int;
- auto bindings = map.mk_hashmap[int,@ty.t](hasher, eqer);
+ auto bindings = @map.mk_hashmap[int,@ty.t](hasher, eqer);
- ret unify_step(bindings, expected, actual, handler);
+ auto ures = unify_step(bindings, expected, actual, handler);
+ alt (ures) {
+ case (ures_ok(?t)) { ret ures_ok(substitute(bindings, t)); }
+ case (_) { ret ures; }
+ }
+ fail; // not reached
}
fn type_err_to_str(&ty.type_err err) -> str {
@@ -1231,9 +1490,10 @@ fn type_err_to_str(&ty.type_err err) -> str {
}
}
-// Type parameter resolution, used in translation
+// Type parameter resolution, used in translation and typechecking
-fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
+fn resolve_ty_params(ty_params_and_ty ty_params_and_polyty,
+ @t monoty) -> vec[@t] {
obj resolve_ty_params_handler(@hashmap[ast.def_id,@t] bindings) {
fn resolve_local(ast.def_id id) -> @t { log "resolve local"; fail; }
fn record_local(ast.def_id id, @t ty) { log "record local"; fail; }
@@ -1249,8 +1509,6 @@ fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
}
}
- auto ty_params_and_polyty = item_ty(item);
-
auto bindings = @new_def_hash[@t]();
auto handler = resolve_ty_params_handler(bindings);
@@ -1274,6 +1532,47 @@ fn resolve_ty_params(@ast.item item, @t monoty) -> vec[@t] {
ret result_tys;
}
+// Performs type parameter replacement using the supplied mapping from
+// parameter IDs to types.
+fn replace_type_params(@t typ, hashmap[ast.def_id,@t] param_map) -> @t {
+ state obj param_replacer(hashmap[ast.def_id,@t] param_map) {
+ fn fold_simple_ty(@t typ) -> @t {
+ alt (typ.struct) {
+ case (ty_param(?param_def)) {
+ if (param_map.contains_key(param_def)) {
+ ret param_map.get(param_def);
+ } else {
+ ret typ;
+ }
+ }
+ case (_) {
+ ret typ;
+ }
+ }
+ }
+ }
+ auto replacer = param_replacer(param_map);
+ ret fold_ty(replacer, typ);
+}
+
+// Substitutes the type parameters specified by @ty_params with the
+// corresponding types in @bound in the given type. The two vectors must have
+// the same length.
+fn substitute_ty_params(vec[ast.ty_param] ty_params, vec[@t] bound, @t ty)
+ -> @t {
+ auto ty_param_len = _vec.len[ast.ty_param](ty_params);
+ check (ty_param_len == _vec.len[@t](bound));
+
+ auto bindings = common.new_def_hash[@t]();
+ auto i = 0u;
+ while (i < ty_param_len) {
+ bindings.insert(ty_params.(i).id, bound.(i));
+ i += 1u;
+ }
+
+ ret replace_type_params(ty, bindings);
+}
+
// Local Variables:
// mode: rust
// fill-column: 78;
diff --git a/src/comp/middle/typeck.rs b/src/comp/middle/typeck.rs
index d778ffa9..5c7f963c 100644
--- a/src/comp/middle/typeck.rs
+++ b/src/comp/middle/typeck.rs
@@ -25,13 +25,20 @@ import middle.ty.type_is_scalar;
import std._str;
import std._uint;
import std._vec;
+import std.map;
import std.map.hashmap;
import std.option;
import std.option.none;
import std.option.some;
type ty_table = hashmap[ast.def_id, @ty.t];
-type ty_item_table = hashmap[ast.def_id,@ast.item];
+
+tag any_item {
+ any_item_rust(@ast.item);
+ any_item_native(@ast.native_item, ast.native_abi);
+}
+
+type ty_item_table = hashmap[ast.def_id,any_item];
type crate_ctxt = rec(session.session sess,
@ty_table item_types,
@@ -72,6 +79,65 @@ fn generalize_ty(@crate_ctxt cx, @ty.t t) -> @ty.t {
ret ty.fold_ty(generalizer, t);
}
+// Substitutes the user's explicit types for the parameters in a path
+// expression.
+fn substitute_ty_params(&@crate_ctxt ccx,
+ @ty.t typ,
+ vec[@ast.ty] supplied,
+ &span sp) -> @ty.t {
+ state obj ty_substituter(@crate_ctxt ccx,
+ @mutable uint i,
+ vec[@ast.ty] supplied,
+ @hashmap[int,@ty.t] substs) {
+ fn fold_simple_ty(@ty.t typ) -> @ty.t {
+ alt (typ.struct) {
+ case (ty.ty_var(?vid)) {
+ alt (substs.find(vid)) {
+ case (some[@ty.t](?resolved_ty)) {
+ ret resolved_ty;
+ }
+ case (none[@ty.t]) {
+ if (i >= _vec.len[@ast.ty](supplied)) {
+ // Just leave it as an unresolved parameter
+ // for now. (We will error out later.)
+ ret typ;
+ }
+
+ auto result = ast_ty_to_ty_crate(ccx,
+ supplied.(*i));
+ *i += 1u;
+ substs.insert(vid, result);
+ ret result;
+ }
+ }
+ }
+ case (_) { ret typ; }
+ }
+ }
+ }
+
+ fn hash_int(&int x) -> uint { ret x as uint; }
+ fn eq_int(&int a, &int b) -> bool { ret a == b; }
+ auto hasher = hash_int;
+ auto eqer = eq_int;
+ auto substs = @map.mk_hashmap[int,@ty.t](hasher, eqer);
+
+ auto subst_count = @mutable 0u;
+ auto substituter = ty_substituter(ccx, subst_count, supplied, substs);
+
+ auto result = ty.fold_ty(substituter, typ);
+
+ auto supplied_len = _vec.len[@ast.ty](supplied);
+ if ((*subst_count) != supplied_len) {
+ ccx.sess.span_err(sp, "expected " + _uint.to_str(*subst_count, 10u) +
+ " type parameter(s) but found " +
+ _uint.to_str(supplied_len, 10u) + " parameter(s)");
+ fail;
+ }
+
+ ret result;
+}
+
// Parses the programmer's textual representation of a type into our internal
// notion of a type. `getter` is a function that returns the type
// corresponding to a definition ID.
@@ -81,23 +147,6 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
ret rec(mode=arg.mode, ty=ast_ty_to_ty(getter, arg.ty));
}
- fn replace_type_params(@ty.t t, ty_table param_map) -> @ty.t {
- state obj param_replacer(ty_table param_map) {
- fn fold_simple_ty(@ty.t t) -> @ty.t {
- alt (t.struct) {
- case (ty.ty_param(?param_def)) {
- ret param_map.get(param_def);
- }
- case (_) {
- ret t;
- }
- }
- }
- }
- auto replacer = param_replacer(param_map);
- ret ty.fold_ty(replacer, t);
- }
-
fn instantiate(ty_getter getter, ast.def_id id,
vec[@ast.ty] args) -> @ty.t {
// TODO: maybe record cname chains so we can do
@@ -113,7 +162,7 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
auto param = params.(i);
param_map.insert(param.id, ast_ty_to_ty(getter, arg));
}
- ret replace_type_params(ty_and_params.ty, param_map);
+ ret ty.replace_type_params(ty_and_params.ty, param_map);
}
auto mut = ast.imm;
@@ -145,10 +194,10 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
sty = ty.ty_rec(flds);
}
- case (ast.ty_fn(?inputs, ?output)) {
+ case (ast.ty_fn(?proto, ?inputs, ?output)) {
auto f = bind ast_arg_to_arg(getter, _);
auto i = _vec.map[ast.ty_arg, arg](f, inputs);
- sty = ty.ty_fn(i, ast_ty_to_ty(getter, output));
+ sty = ty.ty_fn(proto, i, ast_ty_to_ty(getter, output));
}
case (ast.ty_path(?path, ?def)) {
@@ -157,6 +206,9 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
case (ast.def_ty(?id)) {
sty = instantiate(getter, id, path.node.types).struct;
}
+ case (ast.def_native_ty(?id)) {
+ sty = instantiate(getter, id, path.node.types).struct;
+ }
case (ast.def_obj(?id)) {
sty = instantiate(getter, id, path.node.types).struct;
}
@@ -181,7 +233,8 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
auto ins = _vec.map[ast.ty_arg, arg](f, m.inputs);
auto out = ast_ty_to_ty(getter, m.output);
append[ty.method](tmeths,
- rec(ident=m.ident,
+ rec(proto=m.proto,
+ ident=m.ident,
inputs=ins,
output=out));
}
@@ -192,23 +245,36 @@ fn ast_ty_to_ty(ty_getter getter, &@ast.ty ast_ty) -> @ty.t {
ret @rec(struct=sty, mut=mut, cname=cname);
}
+fn actual_type(@ty.t t, @ast.item item) -> @ty.t {
+ alt (item.node) {
+ case (ast.item_obj(_,_,_,_,_)) {
+ // An obj used as a type name refers to the output type of the
+ // item (constructor).
+ ret middle.ty.ty_fn_ret(t);
+ }
+ case (_) { }
+ }
+
+ ret t;
+}
+
// A convenience function to use a crate_ctxt to resolve names for
// ast_ty_to_ty.
fn ast_ty_to_ty_crate(@crate_ctxt ccx, &@ast.ty ast_ty) -> @ty.t {
fn getter(@crate_ctxt ccx, ast.def_id id) -> ty_and_params {
check (ccx.item_items.contains_key(id));
check (ccx.item_types.contains_key(id));
- auto item = ccx.item_items.get(id);
+ auto it = ccx.item_items.get(id);
auto ty = ccx.item_types.get(id);
- auto params = ty_params_of_item(item);
-
- alt (item.node) {
- case (ast.item_obj(_,_,_,_,_)) {
- // An obj used as a type name refers to the output type of the
- // item (constructor).
- ty = middle.ty.ty_fn_ret(ty);
+ auto params;
+ alt (it) {
+ case (any_item_rust(?item)) {
+ ty = actual_type(ty, item);
+ params = ty_params_of_item(item);
}
- case (_) { }
+ case (any_item_native(?native_item, _)) {
+ params = ty_params_of_native_item(native_item);
+ }
}
ret rec(params = params, ty = ty);
@@ -238,6 +304,18 @@ fn ty_params_of_item(@ast.item item) -> vec[ast.ty_param] {
}
}
+fn ty_params_of_native_item(@ast.native_item item) -> vec[ast.ty_param] {
+ alt (item.node) {
+ case (ast.native_item_fn(_, _, ?p, _, _)) {
+ ret p;
+ }
+ case (_) {
+ let vec[ast.ty_param] r = vec();
+ ret r;
+ }
+ }
+}
+
// Item collection - a pair of bootstrap passes:
//
// 1. Collect the IDs of all type items (typedefs) and store them in a table.
@@ -249,6 +327,34 @@ fn ty_params_of_item(@ast.item item) -> vec[ast.ty_param] {
// We then annotate the AST with the resulting types and return the annotated
// AST, along with a table mapping item IDs to their types.
+fn ty_of_fn_decl(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ fn(&@ast.ty ast_ty) -> @ty.t convert,
+ fn(&ast.arg a) -> arg ty_of_arg,
+ &ast.fn_decl decl,
+ ast.proto proto,
+ ast.def_id def_id) -> @ty.t {
+ auto input_tys = _vec.map[ast.arg,arg](ty_of_arg, decl.inputs);
+ auto output_ty = convert(decl.output);
+ auto t_fn = plain_ty(ty.ty_fn(proto, input_tys, output_ty));
+ item_to_ty.insert(def_id, t_fn);
+ ret t_fn;
+}
+
+fn ty_of_native_fn_decl(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ fn(&@ast.ty ast_ty) -> @ty.t convert,
+ fn(&ast.arg a) -> arg ty_of_arg,
+ &ast.fn_decl decl,
+ ast.native_abi abi,
+ ast.def_id def_id) -> @ty.t {
+ auto input_tys = _vec.map[ast.arg,arg](ty_of_arg, decl.inputs);
+ auto output_ty = convert(decl.output);
+ auto t_fn = plain_ty(ty.ty_native_fn(abi, input_tys, output_ty));
+ item_to_ty.insert(def_id, t_fn);
+ ret t_fn;
+}
+
fn collect_item_types(session.session sess, @ast.crate crate)
-> tup(@ast.crate, @ty_table, @ty_item_table) {
@@ -256,17 +362,20 @@ fn collect_item_types(session.session sess, @ast.crate crate)
@ty_table item_to_ty,
ast.def_id id) -> ty_and_params {
check (id_to_ty_item.contains_key(id));
- auto item = id_to_ty_item.get(id);
- auto ty = ty_of_item(id_to_ty_item, item_to_ty, item);
- auto params = ty_params_of_item(item);
-
- alt (item.node) {
- case (ast.item_obj(_,_,_,_,_)) {
- // An obj used as a type name refers to the output type of the
- // item (constructor).
- ty = middle.ty.ty_fn_ret(ty);
+ auto it = id_to_ty_item.get(id);
+ auto ty;
+ auto params;
+ alt (it) {
+ case (any_item_rust(?item)) {
+ ty = ty_of_item(id_to_ty_item, item_to_ty, item);
+ ty = actual_type(ty, item);
+ params = ty_params_of_item(item);
+ }
+ case (any_item_native(?native_item, ?abi)) {
+ ty = ty_of_native_item(id_to_ty_item, item_to_ty,
+ native_item, abi);
+ params = ty_params_of_native_item(native_item);
}
- case (_) { }
}
ret rec(params = params, ty = ty);
@@ -285,9 +394,10 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto get = bind getter(id_to_ty_item, item_to_ty, _);
auto convert = bind ast_ty_to_ty(get, _);
auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
- auto inputs = _vec.map[ast.arg,arg](f, m.node.meth.inputs);
- auto output = convert(m.node.meth.output);
- ret rec(ident=m.node.ident, inputs=inputs, output=output);
+ auto inputs = _vec.map[ast.arg,arg](f, m.node.meth.decl.inputs);
+ auto output = convert(m.node.meth.decl.output);
+ ret rec(proto=m.node.meth.proto, ident=m.node.ident,
+ inputs=inputs, output=output);
}
fn ty_of_obj(@ty_item_table id_to_ty_item,
@@ -318,7 +428,7 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto t_field = ast_ty_to_ty(g, f.ty);
append[arg](t_inputs, rec(mode=ast.alias, ty=t_field));
}
- auto t_fn = plain_ty(ty.ty_fn(t_inputs, t_obj));
+ auto t_fn = plain_ty(ty.ty_fn(ast.proto_fn, t_inputs, t_obj));
ret t_fn;
}
@@ -336,15 +446,9 @@ fn collect_item_types(session.session sess, @ast.crate crate)
}
case (ast.item_fn(?ident, ?fn_info, _, ?def_id, _)) {
- // TODO: handle ty-params
-
auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
- auto input_tys = _vec.map[ast.arg,arg](f, fn_info.inputs);
- auto output_ty = convert(fn_info.output);
-
- auto t_fn = plain_ty(ty.ty_fn(input_tys, output_ty));
- item_to_ty.insert(def_id, t_fn);
- ret t_fn;
+ ret ty_of_fn_decl(id_to_ty_item, item_to_ty, convert, f,
+ fn_info.decl, fn_info.proto, def_id);
}
case (ast.item_obj(?ident, ?obj_info, _, ?def_id, _)) {
@@ -369,28 +473,67 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ret ty_;
}
- case (ast.item_tag(_, _, _, ?def_id)) {
- auto t = plain_ty(ty.ty_tag(def_id));
+ case (ast.item_tag(_, _, ?tps, ?def_id)) {
+ // Create a new generic polytype.
+ let vec[@ty.t] subtys = vec();
+ for (ast.ty_param tp in tps) {
+ subtys += vec(plain_ty(ty.ty_param(tp.id)));
+ }
+ auto t = plain_ty(ty.ty_tag(def_id, subtys));
item_to_ty.insert(def_id, t);
ret t;
}
case (ast.item_mod(_, _, _)) { fail; }
+ case (ast.item_native_mod(_, _, _)) { fail; }
+ }
+ }
+
+ fn ty_of_native_item(@ty_item_table id_to_ty_item,
+ @ty_table item_to_ty,
+ @ast.native_item it,
+ ast.native_abi abi) -> @ty.t {
+ alt (it.node) {
+ case (ast.native_item_fn(?ident, ?fn_decl, ?params, ?def_id, _)) {
+ auto get = bind getter(id_to_ty_item, item_to_ty, _);
+ auto convert = bind ast_ty_to_ty(get, _);
+ auto f = bind ty_of_arg(id_to_ty_item, item_to_ty, _);
+ ret ty_of_native_fn_decl(id_to_ty_item, item_to_ty, convert,
+ f, fn_decl, abi, def_id);
+ }
+ case (ast.native_item_ty(_, ?def_id)) {
+ if (item_to_ty.contains_key(def_id)) {
+ // Avoid repeating work.
+ ret item_to_ty.get(def_id);
+ }
+ auto x =
+ @rec(struct=ty.ty_native, mut=ast.imm, cname=none[str]);
+ item_to_ty.insert(def_id, x);
+ ret x;
+ }
}
}
fn get_tag_variant_types(@ty_item_table id_to_ty_item,
@ty_table item_to_ty,
&ast.def_id tag_id,
- &vec[ast.variant] variants) -> vec[ast.variant] {
+ &vec[ast.variant] variants,
+ &vec[ast.ty_param] ty_params)
+ -> vec[ast.variant] {
let vec[ast.variant] result = vec();
+ // Create a set of parameter types shared among all the variants.
+ let vec[@ty.t] ty_param_tys = vec();
+ for (ast.ty_param tp in ty_params) {
+ ty_param_tys += vec(plain_ty(ty.ty_param(tp.id)));
+ }
+
for (ast.variant variant in variants) {
- // Nullary tag constructors get truned into constants; n-ary tag
+ // Nullary tag constructors get turned into constants; n-ary tag
// constructors get turned into functions.
auto result_ty;
if (_vec.len[ast.variant_arg](variant.args) == 0u) {
- result_ty = plain_ty(ty.ty_tag(tag_id));
+ result_ty = plain_ty(ty.ty_tag(tag_id, ty_param_tys));
} else {
// As above, tell ast_ty_to_ty() that trans_ty_item_to_ty()
// should be called to resolve named types.
@@ -401,8 +544,8 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto arg_ty = ast_ty_to_ty(f, va.ty);
args += vec(rec(mode=ast.alias, ty=arg_ty));
}
- auto tag_t = plain_ty(ty.ty_tag(tag_id));
- result_ty = plain_ty(ty.ty_fn(args, tag_t));
+ auto tag_t = plain_ty(ty.ty_tag(tag_id, ty_param_tys));
+ result_ty = plain_ty(ty.ty_fn(ast.proto_fn, args, tag_t));
}
item_to_ty.insert(variant.id, result_ty);
@@ -416,25 +559,40 @@ fn collect_item_types(session.session sess, @ast.crate crate)
// First pass: collect all type item IDs.
auto module = crate.node.module;
- auto id_to_ty_item = @common.new_def_hash[@ast.item]();
+ auto id_to_ty_item = @common.new_def_hash[any_item]();
fn collect(&@ty_item_table id_to_ty_item, @ast.item i)
-> @ty_item_table {
alt (i.node) {
case (ast.item_ty(_, _, _, ?def_id, _)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (ast.item_tag(_, _, _, ?def_id)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (ast.item_obj(_, _, _, ?def_id, _)) {
- id_to_ty_item.insert(def_id, i);
+ id_to_ty_item.insert(def_id, any_item_rust(i));
}
case (_) { /* empty */ }
}
ret id_to_ty_item;
}
+ fn collect_native(&@ty_item_table id_to_ty_item, @ast.native_item i)
+ -> @ty_item_table {
+ alt (i.node) {
+ case (ast.native_item_ty(_, ?def_id)) {
+ // The abi of types is not used.
+ id_to_ty_item.insert(def_id,
+ any_item_native(i,
+ ast.native_abi_cdecl));
+ }
+ case (_) {
+ }
+ }
+ ret id_to_ty_item;
+ }
auto fld_1 = fold.new_identity_fold[@ty_item_table]();
- fld_1 = @rec(update_env_for_item = bind collect(_, _)
+ fld_1 = @rec(update_env_for_item = bind collect(_, _),
+ update_env_for_native_item = bind collect_native(_, _)
with *fld_1);
fold.fold_crate[@ty_item_table](id_to_ty_item, fld_1, crate);
@@ -445,22 +603,34 @@ fn collect_item_types(session.session sess, @ast.crate crate)
type env = rec(session.session sess,
@ty_item_table id_to_ty_item,
- @ty_table item_to_ty);
+ @ty_table item_to_ty,
+ ast.native_abi abi);
let @env e = @rec(sess=sess,
id_to_ty_item=id_to_ty_item,
- item_to_ty=item_to_ty);
+ item_to_ty=item_to_ty,
+ abi=ast.native_abi_cdecl);
fn convert(&@env e, @ast.item i) -> @env {
+ auto abi = e.abi;
alt (i.node) {
case (ast.item_mod(_, _, _)) {
// ignore item_mod, it has no type.
}
+ case (ast.item_native_mod(_, ?native_mod, _)) {
+ // ignore item_native_mod, it has no type.
+ abi = native_mod.abi;
+ }
case (_) {
// This call populates the ty_table with the converted type of
// the item in passing; we don't need to do anything else.
ty_of_item(e.id_to_ty_item, e.item_to_ty, i);
}
}
+ ret @rec(abi=abi with *e);
+ }
+
+ fn convert_native(&@env e, @ast.native_item i) -> @env {
+ ty_of_native_item(e.id_to_ty_item, e.item_to_ty, i, e.abi);
ret e;
}
@@ -484,9 +654,19 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ret @fold.respan[ast.item_](sp, item);
}
+ fn fold_native_item_fn(&@env e, &span sp, ast.ident i,
+ &ast.fn_decl d, vec[ast.ty_param] ty_params,
+ ast.def_id id, ast.ann a) -> @ast.native_item {
+ check (e.item_to_ty.contains_key(id));
+ auto ty = e.item_to_ty.get(id);
+ auto item = ast.native_item_fn(i, d, ty_params, id,
+ ast.ann_type(ty));
+ ret @fold.respan[ast.native_item_](sp, item);
+ }
+
fn get_ctor_obj_methods(@ty.t t) -> vec[method] {
alt (t.struct) {
- case (ty.ty_fn(_,?tobj)) {
+ case (ty.ty_fn(_,_,?tobj)) {
alt (tobj.struct) {
case (ty.ty_obj(?tm)) {
ret tm;
@@ -521,7 +701,8 @@ fn collect_item_types(session.session sess, @ast.crate crate)
let method meth_ty = meth_tys.(ix);
let ast.method_ m_;
let @ast.method m;
- auto meth_tfn = plain_ty(ty.ty_fn(meth_ty.inputs,
+ auto meth_tfn = plain_ty(ty.ty_fn(meth_ty.proto,
+ meth_ty.inputs,
meth_ty.output));
m_ = rec(ann=ast.ann_type(meth_tfn) with meth.node);
m = @rec(node=m_ with *meth);
@@ -558,7 +739,9 @@ fn collect_item_types(session.session sess, @ast.crate crate)
ast.def_id id) -> @ast.item {
auto variants_t = get_tag_variant_types(e.id_to_ty_item,
e.item_to_ty,
- id, variants);
+ id,
+ variants,
+ ty_params);
auto item = ast.item_tag(i, variants_t, ty_params, id);
ret @fold.respan[ast.item_](sp, item);
}
@@ -566,8 +749,10 @@ fn collect_item_types(session.session sess, @ast.crate crate)
auto fld_2 = fold.new_identity_fold[@env]();
fld_2 =
@rec(update_env_for_item = bind convert(_,_),
+ update_env_for_native_item = bind convert_native(_,_),
fold_item_const = bind fold_item_const(_,_,_,_,_,_,_),
fold_item_fn = bind fold_item_fn(_,_,_,_,_,_,_),
+ fold_native_item_fn = bind fold_native_item_fn(_,_,_,_,_,_,_),
fold_item_obj = bind fold_item_obj(_,_,_,_,_,_,_),
fold_item_ty = bind fold_item_ty(_,_,_,_,_,_,_),
fold_item_tag = bind fold_item_tag(_,_,_,_,_,_)
@@ -705,13 +890,17 @@ fn are_compatible(&@fn_ctxt fcx, @ty.t expected, @ty.t actual) -> bool {
// TODO: enforce this via a predicate.
fn demand_pat(&@fn_ctxt fcx, @ty.t expected, @ast.pat pat) -> @ast.pat {
- auto p_1 = ast.pat_wild(ast.ann_none); // FIXME: typestate botch
+ auto p_1;
alt (pat.node) {
case (ast.pat_wild(?ann)) {
auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
p_1 = ast.pat_wild(ast.ann_type(t));
}
+ case (ast.pat_lit(?lit, ?ann)) {
+ auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
+ p_1 = ast.pat_lit(lit, ast.ann_type(t));
+ }
case (ast.pat_bind(?id, ?did, ?ann)) {
auto t = demand(fcx, pat.span, expected, ann_to_type(ann));
fcx.locals.insert(did, t);
@@ -735,12 +924,12 @@ fn demand_pat(&@fn_ctxt fcx, @ty.t expected, @ast.pat pat) -> @ast.pat {
auto subpats_len = _vec.len[@ast.pat](subpats);
alt (variant_ty.struct) {
- case (ty.ty_tag(_)) {
+ case (ty.ty_tag(_, _)) {
// Nullary tag variant.
check (subpats_len == 0u);
p_1 = ast.pat_tag(id, subpats, vdef_opt, ast.ann_type(t));
}
- case (ty.ty_fn(?args, ?tag_ty)) {
+ case (ty.ty_fn(_, ?args, ?tag_ty)) {
let vec[@ast.pat] new_subpats = vec();
auto i = 0u;
for (arg a in args) {
@@ -771,9 +960,7 @@ fn demand_expr(&@fn_ctxt fcx, @ty.t expected, @ast.expr e) -> @ast.expr {
fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
autoderef_kind adk) -> @ast.expr {
- // FIXME: botch to work around typestate bug in rustboot
- let vec[@ast.expr] v = vec();
- auto e_1 = ast.expr_vec(v, ast.ann_none);
+ auto e_1;
alt (e.node) {
case (ast.expr_vec(?es_0, ?ann)) {
@@ -811,20 +998,50 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
}
e_1 = ast.expr_tup(elts_1, ast.ann_type(t));
}
- case (ast.expr_rec(?fields_0, ?ann)) {
+ case (ast.expr_rec(?fields_0, ?base_0, ?ann)) {
+
+ auto base_1 = base_0;
+
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
let vec[ast.field] fields_1 = vec();
alt (t.struct) {
case (ty.ty_rec(?field_tys)) {
- auto i = 0u;
- for (ast.field field_0 in fields_0) {
- check (_str.eq(field_0.ident, field_tys.(i).ident));
- auto e_1 = demand_expr(fcx, field_tys.(i).ty,
- field_0.expr);
- fields_1 += vec(rec(mut=field_0.mut,
- ident=field_0.ident,
- expr=e_1));
- i += 1u;
+ alt (base_0) {
+ case (none[@ast.expr]) {
+ auto i = 0u;
+ for (ast.field field_0 in fields_0) {
+ check (_str.eq(field_0.ident,
+ field_tys.(i).ident));
+ auto e_1 = demand_expr(fcx,
+ field_tys.(i).ty,
+ field_0.expr);
+ fields_1 += vec(rec(mut=field_0.mut,
+ ident=field_0.ident,
+ expr=e_1));
+ i += 1u;
+ }
+ }
+ case (some[@ast.expr](?bx)) {
+
+ base_1 =
+ some[@ast.expr](demand_expr(fcx, t, bx));
+
+ let vec[field] base_fields = vec();
+
+ for (ast.field field_0 in fields_0) {
+
+ for (ty.field ft in field_tys) {
+ if (_str.eq(field_0.ident, ft.ident)) {
+ auto e_1 = demand_expr(fcx, ft.ty,
+ field_0.expr);
+ fields_1 +=
+ vec(rec(mut=field_0.mut,
+ ident=field_0.ident,
+ expr=e_1));
+ }
+ }
+ }
+ }
}
}
case (_) {
@@ -832,7 +1049,7 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
fail;
}
}
- e_1 = ast.expr_rec(fields_1, ast.ann_type(t));
+ e_1 = ast.expr_rec(fields_1, base_1, ast.ann_type(t));
}
case (ast.expr_bind(?sube, ?es, ?ann)) {
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
@@ -868,6 +1085,7 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
auto t = demand_full(fcx, e.span, expected,
ann_to_type(ann), adk);
auto then_1 = demand_block(fcx, expected, then_0);
+
auto else_1;
alt (else_0) {
case (none[@ast.expr]) { else_1 = none[@ast.expr]; }
@@ -882,6 +1100,10 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
e_1 = ast.expr_for(decl, seq, bloc, ast.ann_type(t));
}
+ case (ast.expr_for_each(?decl, ?seq, ?bloc, ?ann)) {
+ auto t = demand(fcx, e.span, expected, ann_to_type(ann));
+ e_1 = ast.expr_for_each(decl, seq, bloc, ast.ann_type(t));
+ }
case (ast.expr_while(?cond, ?bloc, ?ann)) {
auto t = demand(fcx, e.span, expected, ann_to_type(ann));
e_1 = ast.expr_while(cond, bloc, ast.ann_type(t));
@@ -924,6 +1146,21 @@ fn demand_expr_full(&@fn_ctxt fcx, @ty.t expected, @ast.expr e,
ann_to_type(ann), adk);
e_1 = ast.expr_path(pth, d, ast.ann_type(t));
}
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, ?ann)) {
+ auto t = demand_full(fcx, e.span, expected,
+ ann_to_type(ann), adk);
+ e_1 = ast.expr_ext(p, args, body, expanded, ast.ann_type(t));
+ }
+ case (ast.expr_fail) { e_1 = e.node; }
+ case (ast.expr_log(_)) { e_1 = e.node; }
+ case (ast.expr_ret(_)) { e_1 = e.node; }
+ case (ast.expr_put(_)) { e_1 = e.node; }
+ case (ast.expr_be(_)) { e_1 = e.node; }
+ case (ast.expr_check_expr(_)) { e_1 = e.node; }
+ case (_) {
+ fcx.ccx.sess.unimpl("type unification for expression variant");
+ fail;
+ }
}
ret @fold.respan[ast.expr_](e.span, e_1);
@@ -989,6 +1226,9 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
case (ast.pat_wild(_)) {
new_pat = ast.pat_wild(ast.ann_type(next_ty_var(fcx.ccx)));
}
+ case (ast.pat_lit(?lt, _)) {
+ new_pat = ast.pat_lit(lt, ast.ann_type(check_lit(lt)));
+ }
case (ast.pat_bind(?id, ?def_id, _)) {
auto ann = ast.ann_type(next_ty_var(fcx.ccx));
new_pat = ast.pat_bind(id, def_id, ann);
@@ -1000,7 +1240,7 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
auto last_id = p.node.idents.(len - 1u);
alt (t.struct) {
// N-ary variants have function types.
- case (ty.ty_fn(?args, ?tag_ty)) {
+ case (ty.ty_fn(_, ?args, ?tag_ty)) {
auto arg_len = _vec.len[arg](args);
auto subpats_len = _vec.len[@ast.pat](subpats);
if (arg_len != subpats_len) {
@@ -1024,7 +1264,9 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
}
// Nullary variants have tag types.
- case (ty.ty_tag(?tid)) {
+ case (ty.ty_tag(?tid, _)) {
+ // TODO: ty params
+
auto subpats_len = _vec.len[@ast.pat](subpats);
if (subpats_len > 0u) {
// TODO: pluralize properly
@@ -1038,7 +1280,8 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
fail; // TODO: recover
}
- auto ann = ast.ann_type(plain_ty(ty.ty_tag(tid)));
+ let vec[@ty.t] tys = vec(); // FIXME
+ auto ann = ast.ann_type(plain_ty(ty.ty_tag(tid, tys)));
new_pat = ast.pat_tag(p, subpats, vdef_opt, ann);
}
}
@@ -1049,6 +1292,90 @@ fn check_pat(&@fn_ctxt fcx, @ast.pat pat) -> @ast.pat {
}
fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
+ // A generic function to factor out common logic from call and bind
+ // expressions.
+ fn check_call_or_bind(&@fn_ctxt fcx, &@ast.expr f,
+ &vec[option.t[@ast.expr]] args)
+ -> tup(@ast.expr, vec[option.t[@ast.expr]]) {
+
+ // Check the function.
+ auto f_0 = check_expr(fcx, f);
+
+ // Check the arguments and generate the argument signature.
+ let vec[option.t[@ast.expr]] args_0 = vec();
+ let vec[arg] arg_tys_0 = vec();
+ for (option.t[@ast.expr] a_opt in args) {
+ alt (a_opt) {
+ case (some[@ast.expr](?a)) {
+ auto a_0 = check_expr(fcx, a);
+ args_0 += vec(some[@ast.expr](a_0));
+
+ // FIXME: this breaks aliases. We need a ty_fn_arg.
+ auto arg_ty = rec(mode=ast.val, ty=expr_ty(a_0));
+ append[arg](arg_tys_0, arg_ty);
+ }
+ case (none[@ast.expr]) {
+ args_0 += vec(none[@ast.expr]);
+
+ // FIXME: breaks aliases too?
+ auto typ = next_ty_var(fcx.ccx);
+ append[arg](arg_tys_0, rec(mode=ast.val, ty=typ));
+ }
+ }
+ }
+
+ auto rt_0 = next_ty_var(fcx.ccx);
+ auto t_0;
+ alt (expr_ty(f_0).struct) {
+ case (ty.ty_fn(?proto, _, _)) {
+ t_0 = plain_ty(ty.ty_fn(proto, arg_tys_0, rt_0));
+ }
+ case (ty.ty_native_fn(?abi, _, _)) {
+ t_0 = plain_ty(ty.ty_native_fn(abi, arg_tys_0, rt_0));
+ }
+ case (_) {
+ log "check_call_or_bind(): fn expr doesn't have fn type";
+ fail;
+ }
+ }
+
+ // Unify and write back to the function.
+ auto f_1 = demand_expr(fcx, t_0, f_0);
+
+ // Take the argument types out of the resulting function type.
+ auto t_1 = expr_ty(f_1);
+
+ if (!ty.is_fn_ty(t_1)) {
+ fcx.ccx.sess.span_err(f_1.span,
+ "mismatched types: callee has " +
+ "non-function type: " +
+ ty_to_str(t_1));
+ }
+
+ let vec[arg] arg_tys_1 = ty.ty_fn_args(t_1);
+ let @ty.t rt_1 = ty.ty_fn_ret(t_1);
+
+ // Unify and write back to the arguments.
+ auto i = 0u;
+ let vec[option.t[@ast.expr]] args_1 = vec();
+ while (i < _vec.len[option.t[@ast.expr]](args_0)) {
+ alt (args_0.(i)) {
+ case (some[@ast.expr](?e_0)) {
+ auto arg_ty_1 = arg_tys_1.(i);
+ auto e_1 = demand_expr(fcx, arg_ty_1.ty, e_0);
+ append[option.t[@ast.expr]](args_1, some[@ast.expr](e_1));
+ }
+ case (none[@ast.expr]) {
+ append[option.t[@ast.expr]](args_1, none[@ast.expr]);
+ }
+ }
+
+ i += 1u;
+ }
+
+ ret tup(f_1, args_1);
+ }
+
alt (expr.node) {
case (ast.expr_lit(?lit, _)) {
auto ty = check_lit(lit);
@@ -1103,6 +1430,9 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
}
}
+ case (ast._mutable) {
+ oper_t = @rec(mut=ast.mut with *oper_t);
+ }
case (_) { oper_t = strip_boxes(oper_t); }
}
ret @fold.respan[ast.expr_](expr.span,
@@ -1132,13 +1462,18 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
check (fcx.ccx.item_types.contains_key(id));
t = generalize_ty(fcx.ccx, fcx.ccx.item_types.get(id));
}
+ case (ast.def_native_fn(?id)) {
+ check (fcx.ccx.item_types.contains_key(id));
+ t = generalize_ty(fcx.ccx, fcx.ccx.item_types.get(id));
+ }
case (ast.def_const(?id)) {
check (fcx.ccx.item_types.contains_key(id));
t = fcx.ccx.item_types.get(id);
}
case (ast.def_variant(_, ?variant_id)) {
check (fcx.ccx.item_types.contains_key(variant_id));
- t = fcx.ccx.item_types.get(variant_id);
+ t = generalize_ty(fcx.ccx,
+ fcx.ccx.item_types.get(variant_id));
}
case (ast.def_binding(?id)) {
check (fcx.locals.contains_key(id));
@@ -1161,11 +1496,92 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
}
+ // Substitute type parameters if the user provided some.
+ if (_vec.len[@ast.ty](pth.node.types) > 0u) {
+ t = substitute_ty_params(fcx.ccx, t, pth.node.types,
+ expr.span);
+ }
+
ret @fold.respan[ast.expr_](expr.span,
ast.expr_path(pth, defopt,
ast.ann_type(t)));
}
+ case (ast.expr_ext(?p, ?args, ?body, ?expanded, _)) {
+ auto exp_ = check_expr(fcx, expanded);
+ auto t = expr_ty(exp_);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_ext(p, args, body, exp_,
+ ast.ann_type(t)));
+ }
+
+ case (ast.expr_fail) {
+ ret expr;
+ }
+
+ case (ast.expr_ret(?expr_opt)) {
+ alt (expr_opt) {
+ case (none[@ast.expr]) {
+ auto nil = plain_ty(ty.ty_nil);
+ if (!are_compatible(fcx, fcx.ret_ty, nil)) {
+ fcx.ccx.sess.err("ret; in function "
+ + "returning non-nil");
+ }
+
+ ret expr;
+ }
+
+ case (some[@ast.expr](?e)) {
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_ret(some(expr_1)));
+ }
+ }
+ }
+
+ case (ast.expr_put(?expr_opt)) {
+ alt (expr_opt) {
+ case (none[@ast.expr]) {
+ auto nil = plain_ty(ty.ty_nil);
+ if (!are_compatible(fcx, fcx.ret_ty, nil)) {
+ fcx.ccx.sess.err("put; in function "
+ + "putting non-nil");
+ }
+
+ ret expr;
+ }
+
+ case (some[@ast.expr](?e)) {
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_put(some(expr_1)));
+ }
+ }
+ }
+
+ case (ast.expr_be(?e)) {
+ /* FIXME: prove instead of check */
+ check (ast.is_call_expr(e));
+ auto expr_0 = check_expr(fcx, e);
+ auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_be(expr_1));
+ }
+
+ case (ast.expr_log(?e)) {
+ auto expr_t = check_expr(fcx, e);
+ ret @fold.respan[ast.expr_](expr.span, ast.expr_log(expr_t));
+ }
+
+ case (ast.expr_check_expr(?e)) {
+ auto expr_t = check_expr(fcx, e);
+ demand(fcx, expr.span, plain_ty(ty.ty_bool), expr_ty(expr_t));
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_check_expr(expr_t));
+ }
+
case (ast.expr_assign(?lhs, ?rhs, _)) {
auto lhs_0 = check_expr(fcx, lhs);
auto rhs_0 = check_expr(fcx, rhs);
@@ -1238,6 +1654,17 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
body_1, ann));
}
+ case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
+ auto decl_1 = check_decl_local(fcx, decl);
+ auto seq_1 = check_expr(fcx, seq);
+ auto body_1 = check_block(fcx, body);
+
+ auto ann = ast.ann_type(plain_ty(ty.ty_nil));
+ ret @fold.respan[ast.expr_](expr.span,
+ ast.expr_for_each(decl_1, seq_1,
+ body_1, ann));
+ }
+
case (ast.expr_while(?cond, ?body, _)) {
auto cond_0 = check_expr(fcx, cond);
auto cond_1 = demand_expr(fcx, plain_ty(ty.ty_bool), cond_0);
@@ -1324,96 +1751,71 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
}
case (ast.expr_bind(?f, ?args, _)) {
- auto f_0 = check_expr(fcx, f);
- auto t_0 = expr_ty(f_0);
-
- if (!ty.is_fn_ty(t_0)) {
- fcx.ccx.sess.span_err(f_0.span,
- "mismatched types: bind callee has " +
- "non-function type: " +
- ty_to_str(t_0));
- }
-
- let vec[arg] arg_tys_0 = ty.ty_fn_args(t_0);
- let @ty.t rt_0 = ty.ty_fn_ret(t_0);
- let vec[option.t[@ast.expr]] args_1 = vec();
-
- let uint i = 0u;
-
- let vec[arg] residual_args = vec();
- for (option.t[@ast.expr] a in args) {
- alt (a) {
- case (none[@ast.expr]) {
- append[arg](residual_args,
- arg_tys_0.(i));
- append[option.t[@ast.expr]](args_1,
- none[@ast.expr]);
- }
- case (some[@ast.expr](?sa)) {
- auto arg_1 = check_expr(fcx, sa);
- auto arg_t = expr_ty(arg_1);
- demand_expr(fcx, arg_tys_0.(i).ty, arg_1);
- append[option.t[@ast.expr]](args_1,
- some[@ast.expr](arg_1));
+ // Call the generic checker.
+ auto result = check_call_or_bind(fcx, f, args);
+
+ // Pull the argument and return types out.
+ auto proto_1;
+ let vec[ty.arg] arg_tys_1 = vec();
+ auto rt_1;
+ alt (expr_ty(result._0).struct) {
+ case (ty.ty_fn(?proto, ?arg_tys, ?rt)) {
+ proto_1 = proto;
+ rt_1 = rt;
+
+ // For each blank argument, add the type of that argument
+ // to the resulting function type.
+ auto i = 0u;
+ while (i < _vec.len[option.t[@ast.expr]](args)) {
+ alt (args.(i)) {
+ case (some[@ast.expr](_)) { /* no-op */ }
+ case (none[@ast.expr]) {
+ arg_tys_1 += vec(arg_tys.(i));
+ }
+ }
+ i += 1u;
}
}
- i += 1u;
+ case (_) {
+ log "LHS of bind expr didn't have a function type?!";
+ fail;
+ }
}
- let @ty.t t_1 = plain_ty(ty.ty_fn(residual_args, rt_0));
+ auto t_1 = plain_ty(ty.ty_fn(proto_1, arg_tys_1, rt_1));
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_bind(f_0, args_1,
+ ast.expr_bind(result._0, result._1,
ast.ann_type(t_1)));
-
}
case (ast.expr_call(?f, ?args, _)) {
-
- // Check the function.
- auto f_0 = check_expr(fcx, f);
-
- // Check the arguments and generate the argument signature.
- let vec[@ast.expr] args_0 = vec();
- let vec[arg] arg_tys_0 = vec();
- for (@ast.expr a in args) {
- auto a_0 = check_expr(fcx, a);
- append[@ast.expr](args_0, a_0);
-
- // FIXME: this breaks aliases. We need a ty_fn_arg.
- append[arg](arg_tys_0, rec(mode=ast.val, ty=expr_ty(a_0)));
+ let vec[option.t[@ast.expr]] args_opt_0 = vec();
+ for (@ast.expr arg in args) {
+ args_opt_0 += vec(some[@ast.expr](arg));
}
- auto rt_0 = next_ty_var(fcx.ccx);
- auto t_0 = plain_ty(ty.ty_fn(arg_tys_0, rt_0));
-
- // Unify and write back to the function.
- auto f_1 = demand_expr(fcx, t_0, f_0);
- // Take the argument types out of the resulting function type.
- auto t_1 = expr_ty(f_1);
+ // Call the generic checker.
+ auto result = check_call_or_bind(fcx, f, args_opt_0);
- if (!ty.is_fn_ty(t_1)) {
- fcx.ccx.sess.span_err(f_1.span,
- "mismatched types: callee has " +
- "non-function type: " +
- ty_to_str(t_1));
- }
-
- let vec[arg] arg_tys_1 = ty.ty_fn_args(t_1);
- let @ty.t rt_1 = ty.ty_fn_ret(t_1);
-
- // Unify and write back to the arguments.
- auto i = 0u;
+ // Pull out the arguments.
let vec[@ast.expr] args_1 = vec();
- while (i < _vec.len[@ast.expr](args_0)) {
- auto arg_ty_1 = arg_tys_1.(i);
- auto e = demand_expr(fcx, arg_ty_1.ty, args_0.(i));
- append[@ast.expr](args_1, e);
+ for (option.t[@ast.expr] arg in result._1) {
+ args_1 += vec(option.get[@ast.expr](arg));
+ }
- i += 1u;
+ // Pull the return type out of the type of the function.
+ auto rt_1 = plain_ty(ty.ty_nil); // FIXME: typestate botch
+ alt (expr_ty(result._0).struct) {
+ case (ty.ty_fn(_,_,?rt)) { rt_1 = rt; }
+ case (ty.ty_native_fn(_, _, ?rt)) { rt_1 = rt; }
+ case (_) {
+ log "LHS of call expr didn't have a function type?!";
+ fail;
+ }
}
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_call(f_1, args_1,
+ ast.expr_call(result._0, args_1,
ast.ann_type(rt_1)));
}
@@ -1478,7 +1880,10 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
ast.expr_tup(elts_1, ann));
}
- case (ast.expr_rec(?fields, _)) {
+ case (ast.expr_rec(?fields, ?base, _)) {
+
+ auto base_1 = base;
+
let vec[ast.field] fields_1 = vec();
let vec[field] fields_t = vec();
@@ -1492,9 +1897,52 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
append[field](fields_t, rec(ident=f.ident, ty=expr_t));
}
- auto ann = ast.ann_type(plain_ty(ty.ty_rec(fields_t)));
+ auto ann = ast.ann_none;
+
+ alt (base) {
+ case (none[@ast.expr]) {
+ ann = ast.ann_type(plain_ty(ty.ty_rec(fields_t)));
+ }
+
+ case (some[@ast.expr](?bexpr)) {
+ auto bexpr_1 = check_expr(fcx, bexpr);
+ auto bexpr_t = expr_ty(bexpr_1);
+
+ let vec[field] base_fields = vec();
+
+ alt (bexpr_t.struct) {
+ case (ty.ty_rec(?flds)) {
+ base_fields = flds;
+ }
+ case (_) {
+ fcx.ccx.sess.span_err
+ (expr.span,
+ "record update non-record base");
+ }
+ }
+
+ ann = ast.ann_type(bexpr_t);
+
+ for (ty.field f in fields_t) {
+ auto found = false;
+ for (ty.field bf in base_fields) {
+ if (_str.eq(f.ident, bf.ident)) {
+ demand(fcx, expr.span, f.ty, bf.ty);
+ found = true;
+ }
+ }
+ if (!found) {
+ fcx.ccx.sess.span_err
+ (expr.span,
+ "unknown field in record update: "
+ + f.ident);
+ }
+ }
+ }
+ }
+
ret @fold.respan[ast.expr_](expr.span,
- ast.expr_rec(fields_1, ann));
+ ast.expr_rec(fields_1, base_1, ann));
}
case (ast.expr_field(?base, ?field, _)) {
@@ -1537,7 +1985,8 @@ fn check_expr(&@fn_ctxt fcx, @ast.expr expr) -> @ast.expr {
"bad index on obj");
}
auto meth = methods.(ix);
- auto t = plain_ty(ty.ty_fn(meth.inputs, meth.output));
+ auto t = plain_ty(ty.ty_fn(meth.proto,
+ meth.inputs, meth.output));
auto ann = ast.ann_type(t);
ret @fold.respan[ast.expr_](expr.span,
ast.expr_field(base_1,
@@ -1664,43 +2113,6 @@ fn check_stmt(&@fn_ctxt fcx, &@ast.stmt stmt) -> @ast.stmt {
ret stmt;
}
- case (ast.stmt_ret(?expr_opt)) {
- alt (expr_opt) {
- case (none[@ast.expr]) {
- auto nil = plain_ty(ty.ty_nil);
- if (!are_compatible(fcx, fcx.ret_ty, nil)) {
- fcx.ccx.sess.err("ret; in function "
- + "returning non-nil");
- }
-
- ret stmt;
- }
-
- case (some[@ast.expr](?expr)) {
- auto expr_0 = check_expr(fcx, expr);
- auto expr_1 = demand_expr(fcx, fcx.ret_ty, expr_0);
- ret @fold.respan[ast.stmt_](stmt.span,
- ast.stmt_ret(some(expr_1)));
- }
- }
- }
-
- case (ast.stmt_log(?expr)) {
- auto expr_t = check_expr(fcx, expr);
- ret @fold.respan[ast.stmt_](stmt.span, ast.stmt_log(expr_t));
- }
-
- case (ast.stmt_check_expr(?expr)) {
- auto expr_t = check_expr(fcx, expr);
- demand(fcx, expr.span, plain_ty(ty.ty_bool), expr_ty(expr_t));
- ret @fold.respan[ast.stmt_](stmt.span,
- ast.stmt_check_expr(expr_t));
- }
-
- case (ast.stmt_fail) {
- ret stmt;
- }
-
case (ast.stmt_expr(?expr)) {
auto expr_t = check_expr(fcx, expr);
ret @fold.respan[ast.stmt_](stmt.span, ast.stmt_expr(expr_t));
@@ -1744,9 +2156,8 @@ fn check_const(&@crate_ctxt ccx, &span sp, ast.ident ident, @ast.ty t,
ret @fold.respan[ast.item_](sp, item);
}
-fn check_fn(&@crate_ctxt ccx, ast.effect effect,
- bool is_iter, vec[ast.arg] inputs,
- @ast.ty output, &ast.block body) -> ast._fn {
+fn check_fn(&@crate_ctxt ccx, &ast.fn_decl decl, ast.proto proto,
+ &ast.block body) -> ast._fn {
auto local_ty_table = @common.new_def_hash[@ty.t]();
// FIXME: duplicate work: the item annotation already has the arg types
@@ -1760,12 +2171,12 @@ fn check_fn(&@crate_ctxt ccx, ast.effect effect,
}
// Store the type of each argument in the table.
- for (ast.arg arg in inputs) {
+ for (ast.arg arg in decl.inputs) {
auto input_ty = ast_ty_to_ty_crate(ccx, arg.ty);
local_ty_table.insert(arg.id, input_ty);
}
- let @fn_ctxt fcx = @rec(ret_ty = ast_ty_to_ty_crate(ccx, output),
+ let @fn_ctxt fcx = @rec(ret_ty = ast_ty_to_ty_crate(ccx, decl.output),
locals = local_ty_table,
ccx = ccx);
@@ -1773,8 +2184,9 @@ fn check_fn(&@crate_ctxt ccx, ast.effect effect,
auto block_t = check_block(fcx, body);
auto block_wb = writeback(fcx, block_t);
- auto fn_t = rec(effect=effect, is_iter=is_iter,
- inputs=inputs, output=output, body=block_wb);
+ auto fn_t = rec(decl=decl,
+ proto=proto,
+ body=block_wb);
ret fn_t;
}
@@ -1787,13 +2199,13 @@ fn check_item_fn(&@crate_ctxt ccx, &span sp, ast.ident ident, &ast._fn f,
// again here, we can extract them.
let vec[arg] inputs = vec();
- for (ast.arg arg in f.inputs) {
+ for (ast.arg arg in f.decl.inputs) {
auto input_ty = ast_ty_to_ty_crate(ccx, arg.ty);
inputs += vec(rec(mode=arg.mode, ty=input_ty));
}
- auto output_ty = ast_ty_to_ty_crate(ccx, f.output);
- auto fn_sty = ty.ty_fn(inputs, output_ty);
+ auto output_ty = ast_ty_to_ty_crate(ccx, f.decl.output);
+ auto fn_sty = ty.ty_fn(f.proto, inputs, output_ty);
auto fn_ann = ast.ann_type(plain_ty(fn_sty));
auto item = ast.item_fn(ident, f, ty_params, id, fn_ann);
@@ -1825,7 +2237,7 @@ fn check_crate(session.session sess, @ast.crate crate) -> @ast.crate {
auto fld = fold.new_identity_fold[@crate_ctxt]();
fld = @rec(update_env_for_item = bind update_obj_fields(_, _),
- fold_fn = bind check_fn(_,_,_,_,_,_),
+ fold_fn = bind check_fn(_,_,_,_),
fold_item_fn = bind check_item_fn(_,_,_,_,_,_,_)
with *fld);
ret fold.fold_crate[@crate_ctxt](ccx, fld, result._0);
diff --git a/src/comp/pretty/pp.rs b/src/comp/pretty/pp.rs
new file mode 100644
index 00000000..43a9220f
--- /dev/null
+++ b/src/comp/pretty/pp.rs
@@ -0,0 +1,207 @@
+import std.io;
+import std._vec;
+import std._str;
+
+tag boxtype {box_h; box_v; box_hv; box_align;}
+tag contexttype {cx_h; cx_v;}
+
+tag token {
+ brk(uint);
+ word(str);
+ cword(str); // closing token
+ open(boxtype, uint);
+ close;
+}
+
+type context = rec(contexttype tp, uint indent);
+
+type ps = @rec(mutable vec[context] context,
+ uint width,
+ mutable vec[token] buffered,
+ mutable uint scandepth,
+ mutable uint bufferedcol,
+ mutable uint col,
+ mutable bool start_of_line);
+
+fn mkstate(uint width) -> ps {
+ let vec[context] stack = vec(rec(tp=cx_v, indent=0u));
+ let vec[token] buff = vec();
+ ret @rec(mutable context=stack,
+ width=width,
+ mutable buffered=buff,
+ mutable scandepth=0u,
+ mutable bufferedcol=0u,
+ mutable col=0u,
+ mutable start_of_line=true);
+}
+
+impure fn push_context(ps p, contexttype tp, uint indent) {
+ before_print(p, false);
+ p.context = _vec.push[context](p.context, rec(tp=tp, indent=base_indent(p)
+ + indent));
+}
+
+impure fn pop_context(ps p) {
+ p.context = _vec.pop[context](p.context);
+}
+
+impure fn add_token(ps p, token tok) {
+ if (p.scandepth == 0u) {do_token(p, tok);}
+ else {buffer_token(p, tok);}
+}
+
+impure fn buffer_token(ps p, token tok) {
+ p.buffered += vec(tok);
+ p.bufferedcol += token_size(tok);
+ alt (p.buffered.(0)) {
+ case (brk(_)) {
+ alt (tok) {
+ case (brk(_)) {
+ if (p.scandepth == 1u) {finish_break_scan(p);}
+ }
+ case (open(box_h,_)) {p.scandepth += 1u;}
+ case (open(_,_)) {finish_break_scan(p);}
+ case (close) {
+ p.scandepth -= 1u;
+ if (p.scandepth == 0u) {finish_break_scan(p);}
+ }
+ case (_) {}
+ }
+ }
+ case (open(_,_)) {
+ if (p.bufferedcol > p.width) {finish_block_scan(p, cx_v);}
+ else {
+ alt (tok) {
+ case (open(_,_)) {p.scandepth += 1u;}
+ case (close) {
+ p.scandepth -= 1u;
+ if (p.scandepth == 0u) {finish_block_scan(p, cx_h);}
+ }
+ case (_) {}
+ }
+ }
+ }
+ }
+}
+
+impure fn finish_block_scan(ps p, contexttype tp) {
+ auto indent;
+ alt (p.buffered.(0)){
+ case (open(box_hv,?ind)) {
+ indent = ind;
+ }
+ case (open(box_align, _)) {
+ indent = p.col - base_indent(p);
+ }
+ }
+ p.scandepth = 0u;
+ push_context(p, tp, indent);
+ for (token t in _vec.shift[token](p.buffered)) {add_token(p, t);}
+}
+
+impure fn finish_break_scan(ps p) {
+ if (p.bufferedcol > p.width) {
+ write_str("\n");
+ p.col = 0u;
+ }
+ else {
+ auto width;
+ alt (p.buffered.(0)) {case(brk(?w)) {width = w;}}
+ auto i = 0u;
+ while (i < width) {write_str(" "); i+=1u;}
+ p.col += width;
+ }
+ p.scandepth = 0u;
+ for (token t in _vec.shift[token](p.buffered)) {add_token(p, t);}
+}
+
+impure fn start_scan(ps p, token tok) {
+ p.buffered = vec(tok);
+ p.scandepth = 1u;
+ p.bufferedcol = p.col;
+}
+
+fn cur_context(ps p) -> context {
+ ret p.context.(_vec.len[context](p.context)-1u);
+}
+fn base_indent(ps p) -> uint {
+ auto i = _vec.len[context](p.context);
+ while (i > 0u) {
+ i -= 1u;
+ auto cx = p.context.(i);
+ if (cx.tp == cx_v) {ret cx.indent;}
+ }
+}
+
+impure fn do_token(ps p, token tok) {
+ alt (tok) {
+ case (brk(?sz)) {
+ alt (cur_context(p).tp) {
+ case (cx_h) {
+ before_print(p, false);
+ start_scan(p, tok);
+ }
+ case (cx_v) {
+ write_str("\n");
+ p.col = 0u;
+ p.start_of_line = true;
+ }
+ }
+ }
+ case (word(?w)) {
+ before_print(p, false);
+ write_str(w);
+ p.col += _str.byte_len(w); // TODO char_len
+ }
+ case (cword(?w)) {
+ before_print(p, true);
+ write_str(w);
+ p.col += _str.byte_len(w); // TODO char_len
+ }
+ case (open(?tp, ?indent)) {
+ alt (tp) {
+ case (box_hv) {start_scan(p, tok);}
+ case (box_align) {start_scan(p, tok);}
+ case (box_h) {push_context(p, cx_h, indent);}
+ case (box_v) {push_context(p, cx_v, indent);}
+ }
+ }
+ case (close) {pop_context(p);}
+ }
+}
+
+impure fn before_print(ps p, bool closing) {
+ if (p.start_of_line) {
+ p.start_of_line = false;
+ auto ind;
+ if (closing) {ind = base_indent(p);}
+ else {ind = cur_context(p).indent;}
+ p.col = ind;
+ while (ind > 0u) {write_str(" "); ind -= 1u;}
+ }
+}
+
+fn write_str(str s) {
+ io.writefd(1, _str.bytes(s));
+}
+
+fn token_size(token tok) -> uint {
+ alt (tok) {
+ case (brk(?sz)) {ret sz;}
+ case (word(?w)) {ret _str.byte_len(w);}
+ case (cword(?w)) {ret _str.byte_len(w);}
+ case (open(_, _)) {ret 0u;} // TODO exception for V blocks?
+ case (close) {ret 0u;}
+ }
+}
+
+impure fn box(ps p, uint indent) {add_token(p, open(box_hv, indent));}
+impure fn abox(ps p) {add_token(p, open(box_align, 0u));}
+impure fn vbox(ps p, uint indent) {add_token(p, open(box_v, indent));}
+impure fn hbox(ps p, uint indent) {add_token(p, open(box_h, indent));}
+impure fn end(ps p) {add_token(p, close);}
+impure fn wrd(ps p, str wrd) {add_token(p, word(wrd));}
+impure fn cwrd(ps p, str wrd) {add_token(p, cword(wrd));}
+impure fn space(ps p) {add_token(p, brk(1u));}
+impure fn spaces(ps p, uint n) {add_token(p, brk(n));}
+impure fn line(ps p) {add_token(p, brk(0u));}
diff --git a/src/comp/pretty/pprust.rs b/src/comp/pretty/pprust.rs
new file mode 100644
index 00000000..cab778f1
--- /dev/null
+++ b/src/comp/pretty/pprust.rs
@@ -0,0 +1,708 @@
+import std._vec;
+import std._str;
+import std.option;
+import front.ast;
+import pp.box; import pp.abox; import pp.vbox;
+import pp.end; import pp.wrd; import pp.space; import pp.line;
+import pp.ps;
+
+import foo = std.io;
+
+const uint indent_unit = 2u;
+const int as_prec = 5;
+
+impure fn print_ast(ast._mod _mod) {
+ auto s = pp.mkstate(80u);
+ for (@ast.view_item vitem in _mod.view_items) {print_view_item(s, vitem);}
+ line(s);
+ for (@ast.item item in _mod.items) {print_item(s, item);}
+}
+
+impure fn hbox(ps s) {
+ pp.hbox(s, indent_unit);
+}
+impure fn wrd1(ps s, str word) {
+ wrd(s, word);
+ space(s);
+}
+impure fn popen(ps s) {
+ wrd(s, "(");
+ abox(s);
+}
+impure fn pclose(ps s) {
+ end(s);
+ wrd(s, ")");
+}
+impure fn bopen(ps s) {
+ wrd1(s, "{");
+ vbox(s, indent_unit);
+ line(s);
+}
+impure fn bclose(ps s) {
+ end(s);
+ pp.cwrd(s, "}");
+}
+impure fn commasep[IN](ps s, vec[IN] elts, impure fn (ps, IN) op) {
+ auto first = true;
+ for (IN elt in elts) {
+ if (first) {first = false;}
+ else {wrd1(s, ",");}
+ op(s, elt);
+ }
+}
+
+impure fn print_type(ps s, @ast.ty ty) {
+ hbox(s);
+ alt (ty.node) {
+ case (ast.ty_nil) {wrd(s, "()");}
+ case (ast.ty_bool) {wrd(s, "bool");}
+ case (ast.ty_int) {wrd(s, "int");}
+ case (ast.ty_uint) {wrd(s, "uint");}
+ case (ast.ty_machine(?tm)) {wrd(s, util.common.ty_mach_to_str(tm));}
+ case (ast.ty_char) {wrd(s, "char");}
+ case (ast.ty_str) {wrd(s, "str");}
+ case (ast.ty_box(?t)) {wrd(s, "@"); print_type(s, t);}
+ case (ast.ty_vec(?t)) {wrd(s, "vec["); print_type(s, t); wrd(s, "]");}
+ case (ast.ty_type) {wrd(s, "type");}
+ case (ast.ty_tup(?elts)) {
+ wrd(s, "tup");
+ popen(s);
+ auto f = print_type;
+ commasep[@ast.ty](s, elts, f);
+ pclose(s);
+ }
+ case (ast.ty_rec(?fields)) {
+ wrd(s, "rec");
+ popen(s);
+ impure fn print_field(ps s, ast.ty_field f) {
+ hbox(s);
+ print_type(s, f.ty);
+ space(s);
+ wrd(s, f.ident);
+ end(s);
+ }
+ auto f = print_field;
+ commasep[ast.ty_field](s, fields, f);
+ pclose(s);
+ }
+ case (ast.ty_fn(?proto,?inputs,?output)) {
+ if (proto == ast.proto_fn) {wrd(s, "fn");}
+ else {wrd(s, "iter");}
+ popen(s);
+ impure fn print_arg(ps s, ast.ty_arg input) {
+ if (middle.ty.mode_is_alias(input.mode)) {wrd(s, "&");}
+ print_type(s, input.ty);
+ }
+ auto f = print_arg;
+ commasep[ast.ty_arg](s, inputs, f);
+ pclose(s);
+ if (output.node != ast.ty_nil) {
+ space(s);
+ hbox(s);
+ wrd1(s, "->");
+ print_type(s, output);
+ end(s);
+ }
+ }
+ case (ast.ty_path(?path,_)) {
+ print_path(s, path);
+ }
+ case (ast.ty_mutable(?t)) {
+ wrd1(s, "mutable");
+ print_type(s, t);
+ }
+ }
+ end(s);
+}
+
+impure fn print_item(ps s, @ast.item item) {
+ hbox(s);
+ alt (item.node) {
+ case (ast.item_const(?id, ?ty, ?expr, _, _)) {
+ wrd1(s, "const");
+ print_type(s, ty);
+ space(s);
+ wrd1(s, id);
+ wrd1(s, "=");
+ print_expr(s, expr);
+ wrd(s, ";");
+ }
+ case (ast.item_fn(?name,?_fn,?typarams,_,_)) {
+ print_fn(s, _fn.decl, name, typarams);
+ space(s);
+ print_block(s, _fn.body);
+ }
+ case (ast.item_mod(?id,?_mod,_)) {
+ wrd1(s, "mod");
+ wrd1(s, id);
+ bopen(s);
+ for (@ast.item itm in _mod.items) {print_item(s, itm);}
+ bclose(s);
+ }
+ case (ast.item_native_mod(?id,?nmod,_)) {
+ wrd1(s, "native");
+ alt (nmod.abi) {
+ case (ast.native_abi_rust) {wrd1(s, "\"rust\"");}
+ case (ast.native_abi_cdecl) {wrd1(s, "\"cdecl\"");}
+ }
+ wrd1(s, "mod");
+ wrd1(s, id);
+ bopen(s);
+ for (@ast.native_item item in nmod.items) {
+ hbox(s);
+ alt (item.node) {
+ case (ast.native_item_ty(?id,_)) {
+ wrd1(s, "type");
+ wrd(s, id);
+ }
+ case (ast.native_item_fn(?id,?decl,?typarams,_,_)) {
+ print_fn(s, decl, id, typarams);
+ }
+ }
+ wrd(s, ";");
+ end(s);
+ }
+ bclose(s);
+ }
+ case (ast.item_ty(?id,?ty,?params,_,_)) {
+ wrd1(s, "type");
+ wrd(s, id);
+ print_type_params(s, params);
+ space(s);
+ wrd1(s, "=");
+ print_type(s, ty);
+ wrd(s, ";");
+ }
+ case (ast.item_tag(?id,?variants,?params,_)) {
+ wrd1(s, "tag");
+ wrd(s, id);
+ print_type_params(s, params);
+ space(s);
+ bopen(s);
+ for (ast.variant v in variants) {
+ wrd(s, v.name);
+ if (_vec.len[ast.variant_arg](v.args) > 0u) {
+ popen(s);
+ impure fn print_variant_arg(ps s, ast.variant_arg arg) {
+ print_type(s, arg.ty);
+ }
+ auto f = print_variant_arg;
+ commasep[ast.variant_arg](s, v.args, f);
+ pclose(s);
+ }
+ wrd(s, ";");
+ line(s);
+ }
+ bclose(s);
+ }
+ case (ast.item_obj(?id,?_obj,?params,_,_)) {
+ wrd1(s, "obj");
+ wrd(s, id);
+ print_type_params(s, params);
+ popen(s);
+ impure fn print_field(ps s, ast.obj_field field) {
+ hbox(s);
+ print_type(s, field.ty);
+ space(s);
+ wrd(s, field.ident);
+ end(s);
+ }
+ auto f = print_field;
+ commasep[ast.obj_field](s, _obj.fields, f);
+ pclose(s);
+ space(s);
+ bopen(s);
+ for (@ast.method meth in _obj.methods) {
+ hbox(s);
+ let vec[ast.ty_param] typarams = vec();
+ print_fn(s, meth.node.meth.decl, meth.node.ident, typarams);
+ space(s);
+ print_block(s, meth.node.meth.body);
+ end(s);
+ line(s);
+ }
+ alt (_obj.dtor) {
+ case (option.some[ast.block](?dtor)) {
+ hbox(s);
+ wrd1(s, "close");
+ print_block(s, dtor);
+ end(s);
+ line(s);
+ }
+ case (_) {}
+ }
+ bclose(s);
+ }
+ }
+ end(s);
+ line(s);
+ line(s);
+}
+
+impure fn print_block(ps s, ast.block blk) {
+ bopen(s);
+ for (@ast.stmt st in blk.node.stmts) {
+ alt (st.node) {
+ case (ast.stmt_decl(?decl)) {print_decl(s, decl);}
+ case (ast.stmt_expr(?expr)) {print_expr(s, expr);}
+ }
+ if (front.parser.stmt_ends_with_semi(st)) {wrd(s, ";");}
+ line(s);
+ }
+ alt (blk.node.expr) {
+ case (option.some[@ast.expr](?expr)) {
+ print_expr(s, expr);
+ line(s);
+ }
+ case (_) {}
+ }
+ bclose(s);
+}
+
+impure fn print_literal(ps s, @ast.lit lit) {
+ alt (lit.node) {
+ case (ast.lit_str(?st)) {print_string(s, st);}
+ case (ast.lit_char(?ch)) {
+ wrd(s, "'" + escape_str(_str.from_bytes(vec(ch as u8)), '\'') + "'");
+ }
+ case (ast.lit_int(?val)) {
+ wrd(s, util.common.istr(val));
+ }
+ case (ast.lit_uint(?val)) { // TODO clipping? uistr?
+ wrd(s, util.common.istr(val as int) + "u");
+ }
+ case (ast.lit_mach_int(?mach,?val)) {
+ wrd(s, util.common.istr(val as int));
+ wrd(s, util.common.ty_mach_to_str(mach));
+ }
+ case (ast.lit_nil) {wrd(s, "()");}
+ case (ast.lit_bool(?val)) {
+ if (val) {wrd(s, "true");} else {wrd(s, "false");}
+ }
+ }
+}
+
+impure fn print_expr(ps s, @ast.expr expr) {
+ auto pe = print_expr;
+ hbox(s);
+ alt (expr.node) {
+ case (ast.expr_vec(?exprs,_)) {
+ wrd(s, "vec");
+ popen(s);
+ commasep[@ast.expr](s, exprs, pe);
+ pclose(s);
+ }
+ case (ast.expr_tup(?exprs,_)) {
+ impure fn printElt(ps s, ast.elt elt) {
+ hbox(s);
+ if (elt.mut == ast.mut) {wrd1(s, "mutable");}
+ print_expr(s, elt.expr);
+ end(s);
+ }
+ wrd(s, "tup");
+ popen(s);
+ auto f = printElt;
+ commasep[ast.elt](s, exprs, f);
+ pclose(s);
+ }
+ case (ast.expr_rec(?fields,_,_)) {
+ impure fn print_field(ps s, ast.field field) {
+ hbox(s);
+ if (field.mut == ast.mut) {wrd1(s, "mutable");}
+ wrd(s, field.ident);
+ wrd(s, "=");
+ print_expr(s, field.expr);
+ end(s);
+ }
+ wrd(s, "rec");
+ popen(s);
+ auto f = print_field;
+ commasep[ast.field](s, fields, f);
+ pclose(s);
+ }
+ case (ast.expr_call(?func,?args,_)) {
+ print_expr(s, func);
+ popen(s);
+ commasep[@ast.expr](s, args, pe);
+ pclose(s);
+ }
+ case (ast.expr_bind(?func,?args,_)) {
+ impure fn print_opt(ps s, option.t[@ast.expr] expr) {
+ alt (expr) {
+ case (option.some[@ast.expr](?expr)) {
+ print_expr(s, expr);
+ }
+ case (_) {wrd(s, "_");}
+ }
+ }
+ wrd1(s, "bind");
+ print_expr(s, func);
+ popen(s);
+ auto f = print_opt;
+ commasep[option.t[@ast.expr]](s, args, f);
+ pclose(s);
+ }
+ case (ast.expr_binary(?op,?lhs,?rhs,_)) {
+ auto prec = operator_prec(op);
+ print_maybe_parens(s, lhs, prec);
+ space(s);
+ wrd1(s, ast.binop_to_str(op));
+ print_maybe_parens(s, rhs, prec + 1);
+ }
+ case (ast.expr_unary(?op,?expr,_)) {
+ wrd(s, ast.unop_to_str(op));
+ if (op == ast._mutable) {space(s);}
+ print_expr(s, expr);
+ }
+ case (ast.expr_lit(?lit,_)) {
+ print_literal(s, lit);
+ }
+ case (ast.expr_cast(?expr,?ty,_)) {
+ print_maybe_parens(s, expr, as_prec);
+ space(s);
+ wrd1(s, "as");
+ print_type(s, ty);
+ }
+ case (ast.expr_if(?test,?block,?elseopt,_)) {
+ wrd1(s, "if");
+ popen(s);
+ print_expr(s, test);
+ pclose(s);
+ space(s);
+ print_block(s, block);
+ alt (elseopt) {
+ case (option.some[@ast.expr](?_else)) {
+ space(s);
+ wrd1(s, "else");
+ print_expr(s, _else);
+ }
+ }
+ }
+ case (ast.expr_while(?test,?block,_)) {
+ wrd1(s, "while");
+ popen(s);
+ print_expr(s, test);
+ pclose(s);
+ space(s);
+ print_block(s, block);
+ }
+ case (ast.expr_for(?decl,?expr,?block,_)) {
+ wrd1(s, "for");
+ popen(s);
+ print_decl(s, decl);
+ space(s);
+ wrd1(s, "in");
+ print_expr(s, expr);
+ pclose(s);
+ space(s);
+ print_block(s, block);
+ }
+ case (ast.expr_for_each(?decl,?expr,?block,_)) {
+ wrd1(s, "for each");
+ popen(s);
+ print_decl(s, decl);
+ space(s);
+ wrd1(s, "in");
+ print_expr(s, expr);
+ space(s);
+ print_block(s, block);
+ }
+ case (ast.expr_do_while(?block,?expr,_)) {
+ wrd1(s, "do");
+ space(s);
+ print_block(s, block);
+ space(s);
+ wrd1(s, "while");
+ popen(s);
+ print_expr(s, expr);
+ pclose(s);
+ }
+ case (ast.expr_alt(?expr,?arms,_)) {
+ wrd1(s, "alt");
+ popen(s);
+ print_expr(s, expr);
+ pclose(s);
+ space(s);
+ bopen(s);
+ for (ast.arm arm in arms) {
+ hbox(s);
+ wrd1(s, "case");
+ popen(s);
+ print_pat(s, arm.pat);
+ pclose(s);
+ space(s);
+ print_block(s, arm.block);
+ end(s);
+ line(s);
+ }
+ bclose(s);
+ }
+ case (ast.expr_block(?block,_)) {
+ print_block(s, block);
+ }
+ case (ast.expr_assign(?lhs,?rhs,_)) {
+ print_expr(s, lhs);
+ space(s);
+ wrd1(s, "=");
+ print_expr(s, rhs);
+ }
+ case (ast.expr_assign_op(?op,?lhs,?rhs,_)) {
+ print_expr(s, lhs);
+ space(s);
+ wrd(s, ast.binop_to_str(op));
+ wrd1(s, "=");
+ print_expr(s, rhs);
+ }
+ case (ast.expr_field(?expr,?id,_)) {
+ print_expr(s, expr);
+ wrd(s, ".");
+ wrd(s, id);
+ }
+ case (ast.expr_index(?expr,?index,_)) {
+ print_expr(s, expr);
+ wrd(s, ".");
+ popen(s);
+ print_expr(s, index);
+ pclose(s);
+ }
+ case (ast.expr_path(?path,_,_)) {
+ print_path(s, path);
+ }
+ case (ast.expr_fail) {
+ wrd(s, "fail");
+ }
+ case (ast.expr_ret(?result)) {
+ wrd(s, "ret");
+ alt (result) {
+ case (option.some[@ast.expr](?expr)) {
+ space(s);
+ print_expr(s, expr);
+ }
+ case (_) {}
+ }
+ }
+ case (ast.expr_put(?result)) {
+ wrd(s, "put");
+ alt (result) {
+ case (option.some[@ast.expr](?expr)) {
+ space(s);
+ print_expr(s, expr);
+ }
+ case (_) {}
+ }
+ }
+ case (ast.expr_be(?result)) {
+ wrd1(s, "be");
+ print_expr(s, result);
+ }
+ case (ast.expr_log(?expr)) {
+ wrd1(s, "log");
+ print_expr(s, expr);
+ }
+ case (ast.expr_check_expr(?expr)) {
+ wrd1(s, "check");
+ print_expr(s, expr);
+ }
+ case (_) {wrd(s, "X");}
+ // TODO expr_ext(path, vec[@expr], option.t[@expr], @expr, ann);
+ }
+ end(s);
+}
+
+impure fn print_decl(ps s, @ast.decl decl) {
+ hbox(s);
+ alt (decl.node) {
+ case (ast.decl_local(?loc)) {
+ alt (loc.ty) {
+ case (option.some[@ast.ty](?ty)) {
+ wrd1(s, "let");
+ print_type(s, ty);
+ space(s);
+ }
+ case (_) {
+ wrd1(s, "auto");
+ }
+ }
+ wrd(s, loc.ident);
+ alt (loc.init) {
+ case (option.some[@ast.expr](?init)) {
+ space(s);
+ wrd1(s, "=");
+ print_expr(s, init);
+ }
+ case (_) {}
+ }
+ }
+ case (ast.decl_item(?item)) {
+ print_item(s, item);
+ }
+ }
+ end(s);
+}
+
+impure fn print_path(ps s, ast.path path) {
+ auto first = true;
+ for (str id in path.node.idents) {
+ if (first) {first = false;}
+ else {wrd(s, ".");}
+ wrd(s, id);
+ }
+ if (_vec.len[@ast.ty](path.node.types) > 0u) {
+ wrd(s, "[");
+ auto f = print_type;
+ commasep[@ast.ty](s, path.node.types, f);
+ wrd(s, "]");
+ }
+}
+
+impure fn print_pat(ps s, @ast.pat pat) {
+ alt (pat.node) {
+ case (ast.pat_wild(_)) {wrd(s, "_");}
+ case (ast.pat_bind(?id,_,_)) {wrd(s, "?" + id);}
+ case (ast.pat_lit(?lit,_)) {print_literal(s, lit);}
+ case (ast.pat_tag(?path,?args,_,_)) {
+ print_path(s, path);
+ if (_vec.len[@ast.pat](args) > 0u) {
+ popen(s);
+ auto f = print_pat;
+ commasep[@ast.pat](s, args, f);
+ pclose(s);
+ }
+ }
+ }
+}
+
+impure fn print_fn(ps s, ast.fn_decl decl, str name,
+ vec[ast.ty_param] typarams) {
+ alt (decl.effect) {
+ case (ast.eff_impure) {wrd1(s, "impure");}
+ case (ast.eff_unsafe) {wrd1(s, "unsafe");}
+ case (_) {}
+ }
+ wrd1(s, "fn");
+ wrd(s, name);
+ print_type_params(s, typarams);
+ popen(s);
+ impure fn print_arg(ps s, ast.arg x) {
+ hbox(s);
+ print_type(s, x.ty);
+ space(s);
+ wrd(s, x.ident);
+ end(s);
+ }
+ auto f = print_arg;
+ commasep[ast.arg](s, decl.inputs, f);
+ pclose(s);
+ if (decl.output.node != ast.ty_nil) {
+ space(s);
+ hbox(s);
+ wrd1(s, "->");
+ print_type(s, decl.output);
+ end(s);
+ }
+}
+
+impure fn print_type_params(ps s, vec[ast.ty_param] params) {
+ if (_vec.len[ast.ty_param](params) > 0u) {
+ wrd(s, "[");
+ impure fn printParam(ps s, ast.ty_param param) {wrd(s, param.ident);}
+ auto f = printParam;
+ commasep[ast.ty_param](s, params, f);
+ wrd(s, "]");
+ }
+}
+
+impure fn print_view_item(ps s, @ast.view_item item) {
+ hbox(s);
+ alt (item.node) {
+ case (ast.view_item_use(?id,?mta,_)) {
+ wrd1(s, "use");
+ wrd(s, id);
+ if (_vec.len[@ast.meta_item](mta) > 0u) {
+ popen(s);
+ impure fn print_meta(ps s, @ast.meta_item item) {
+ hbox(s);
+ wrd1(s, item.node.name);
+ wrd1(s, "=");
+ print_string(s, item.node.value);
+ end(s);
+ }
+ auto f = print_meta;
+ commasep[@ast.meta_item](s, mta, f);
+ pclose(s);
+ }
+ }
+ case (ast.view_item_import(?id,?ids,_,_)) {
+ wrd1(s, "import");
+ if (!_str.eq(id, ids.(_vec.len[str](ids)-1u))) {
+ wrd1(s, id);
+ wrd1(s, "=");
+ }
+ auto first = true;
+ for (str elt in ids) {
+ if (first) {first = false;}
+ else {wrd(s, ".");}
+ wrd(s, elt);
+ }
+ }
+ case (ast.view_item_export(?id)) {
+ wrd1(s, "export");
+ wrd(s, id);
+ }
+ }
+ end(s);
+ wrd(s, ";");
+ line(s);
+}
+
+// FIXME: The fact that this builds up the table anew for every call is
+// not good. Eventually, table should be a const.
+fn operator_prec(ast.binop op) -> int {
+ for (front.parser.op_spec spec in front.parser.prec_table()) {
+ if (spec.op == op) {ret spec.prec;}
+ }
+ fail;
+}
+
+impure fn print_maybe_parens(ps s, @ast.expr expr, int outer_prec) {
+ auto add_them;
+ alt (expr.node) {
+ case (ast.expr_binary(?op,_,_,_)) {
+ add_them = operator_prec(op) < outer_prec;
+ }
+ case (ast.expr_cast(_,_,_)) {
+ add_them = as_prec < outer_prec;
+ }
+ case (_) {
+ add_them = false;
+ }
+ }
+ if (add_them) {popen(s);}
+ print_expr(s, expr);
+ if (add_them) {pclose(s);}
+}
+
+// TODO non-ascii
+fn escape_str(str st, char to_escape) -> str {
+ let str out = "";
+ auto len = _str.byte_len(st);
+ auto i = 0u;
+ while (i < len) {
+ alt (st.(i) as char) {
+ case ('\n') {out += "\\n";}
+ case ('\t') {out += "\\t";}
+ case ('\r') {out += "\\r";}
+ case ('\\') {out += "\\\\";}
+ case (?cur) {
+ if (cur == to_escape) {out += "\\";}
+ out += cur as u8;
+ }
+ }
+ i += 1u;
+ }
+ ret out;
+}
+
+impure fn print_string(ps s, str st) {
+ wrd(s, "\""); wrd(s, escape_str(st, '"')); wrd(s, "\"");
+}
diff --git a/src/comp/rustc.rc b/src/comp/rustc.rc
index b439632c..e4833409 100644
--- a/src/comp/rustc.rc
+++ b/src/comp/rustc.rc
@@ -5,9 +5,12 @@ use std;
mod front {
mod ast;
+ mod extfmt;
mod lexer;
mod parser;
+ mod pretty;
mod token;
+ mod eval;
}
mod middle {
@@ -28,6 +31,11 @@ mod driver {
mod session;
}
+mod pretty {
+ mod pp;
+ mod pprust;
+}
+
mod util {
mod common;
}
@@ -38,7 +46,6 @@ auth middle.trans.copy_args_to_allocas = impure;
auth middle.trans.trans_block = impure;
auth lib.llvm = unsafe;
-
mod lib {
alt (target_os) {
case ("win32") {
diff --git a/src/comp/util/common.rs b/src/comp/util/common.rs
index 56f30e07..071acea2 100644
--- a/src/comp/util/common.rs
+++ b/src/comp/util/common.rs
@@ -2,8 +2,10 @@ import std._uint;
import std._int;
import front.ast;
+
+type filename = str;
type pos = rec(uint line, uint col);
-type span = rec(str filename, pos lo, pos hi);
+type span = rec(filename filename, pos lo, pos hi);
type spanned[T] = rec(T node, span span);
tag ty_mach {
diff --git a/src/lib/_str.rs b/src/lib/_str.rs
index 6b7ac018..0e0e7650 100644
--- a/src/lib/_str.rs
+++ b/src/lib/_str.rs
@@ -96,25 +96,10 @@ fn buf(str s) -> sbuf {
}
fn bytes(str s) -> vec[u8] {
- /* FIXME (issue #58):
- * Should be...
- *
- * fn ith(str s, uint i) -> u8 {
- * ret s.(i);
- * }
- * ret _vec.init_fn[u8](bind ith(s, _), byte_len(s));
- *
- * but we do not correctly decrement refcount of s when
- * the binding dies, so we have to do this manually.
- */
- let uint n = _str.byte_len(s);
- let vec[u8] v = _vec.alloc[u8](n);
- let uint i = 0u;
- while (i < n) {
- v += vec(s.(i));
- i += 1u;
+ fn ith(str s, uint i) -> u8 {
+ ret s.(i);
}
- ret v;
+ ret _vec.init_fn[u8](bind ith(s, _), byte_len(s));
}
fn from_bytes(vec[u8] v) : is_utf8(v) -> str {
diff --git a/src/lib/_io.rs b/src/lib/io.rs
index f285f6c8..0c4eb39e 100644
--- a/src/lib/_io.rs
+++ b/src/lib/io.rs
@@ -85,37 +85,34 @@ fn new_buf_reader(str path) -> buf_reader {
ret fd_buf_reader(fd, new_buf());
}
-/**
- * FIXME (issue #150): This should be
- *
- * type fileflag = tag(append(), create(), truncate());
- *
- * but then the tag value ctors are not found from crate-importers of std, so
- * we manually simulate the enum below.
- */
-type fileflag = uint;
-fn append() -> uint { ret 0u; }
-fn create() -> uint { ret 1u; }
-fn truncate() -> uint { ret 2u; }
+tag fileflag {
+ append;
+ create;
+ truncate;
+}
+
+fn writefd(int fd, vec[u8] v) {
+ auto len = _vec.len[u8](v);
+ auto count = 0u;
+ auto vbuf;
+ while (count < len) {
+ vbuf = _vec.buf_off[u8](v, count);
+ auto nout = os.libc.write(fd, vbuf, len);
+ if (nout < 0) {
+ log "error dumping buffer";
+ log sys.rustrt.last_os_error();
+ fail;
+ }
+ count += nout as uint;
+ }
+}
fn new_buf_writer(str path, vec[fileflag] flags) -> buf_writer {
state obj fd_buf_writer(int fd) {
fn write(vec[u8] v) {
- auto len = _vec.len[u8](v);
- auto count = 0u;
- auto vbuf;
- while (count < len) {
- vbuf = _vec.buf_off[u8](v, count);
- auto nout = os.libc.write(fd, vbuf, len);
- if (nout < 0) {
- log "error dumping buffer";
- log sys.rustrt.last_os_error();
- fail;
- }
- count += nout as uint;
- }
+ writefd(fd, v);
}
drop {
@@ -129,13 +126,9 @@ fn new_buf_writer(str path, vec[fileflag] flags) -> buf_writer {
for (fileflag f in flags) {
alt (f) {
- // FIXME (issue #150): cf comment above defn of fileflag type
- //case (append()) { fflags |= os.libc_constants.O_APPEND(); }
- //case (create()) { fflags |= os.libc_constants.O_CREAT(); }
- //case (truncate()) { fflags |= os.libc_constants.O_TRUNC(); }
- case (0u) { fflags |= os.libc_constants.O_APPEND(); }
- case (1u) { fflags |= os.libc_constants.O_CREAT(); }
- case (2u) { fflags |= os.libc_constants.O_TRUNC(); }
+ case (append) { fflags |= os.libc_constants.O_APPEND(); }
+ case (create) { fflags |= os.libc_constants.O_CREAT(); }
+ case (truncate) { fflags |= os.libc_constants.O_TRUNC(); }
}
}
diff --git a/src/lib/sha1.rs b/src/lib/sha1.rs
new file mode 100644
index 00000000..2a6b74d4
--- /dev/null
+++ b/src/lib/sha1.rs
@@ -0,0 +1,284 @@
+/*
+ * A SHA-1 implementation derived from Paul E. Jones's reference
+ * implementation, which is written for clarity, not speed. At some
+ * point this will want to be rewritten.
+ */
+
+import std._vec;
+import std._str;
+
+export sha1;
+export mk_sha1;
+
+state type sha1 = state obj {
+ // Provide message input as bytes
+ fn input(&vec[u8]);
+
+ // Provide message input as string
+ fn input_str(&str);
+
+ // Read the digest as a vector of 20 bytes. After
+ // calling this no further input may provided
+ // until reset is called
+ fn result() -> vec[u8];
+
+ // Reset the sha1 state for reuse. This is called
+ // automatically during construction
+ fn reset();
+};
+
+// Some unexported constants
+const uint digest_buf_len = 5;
+const uint msg_block_len = 64;
+
+// Builds a sha1 object
+fn mk_sha1() -> sha1 {
+
+ state type sha1state = rec(vec[mutable u32] h,
+ mutable u32 len_low,
+ mutable u32 len_high,
+ vec[mutable u8] msg_block,
+ mutable uint msg_block_idx,
+ mutable bool computed);
+
+ impure fn add_input(&sha1state st, &vec[u8] msg) {
+ // FIXME: Should be typestate precondition
+ check (!st.computed);
+
+ for (u8 element in msg) {
+ st.msg_block.(st.msg_block_idx) = element;
+ st.msg_block_idx += 1u;
+
+ st.len_low += 8u32;
+ if (st.len_low == 0u32) {
+ st.len_high += 1u32;
+ if (st.len_high == 0u32) {
+ // FIXME: Need better failure mode
+ fail;
+ }
+ }
+
+ if (st.msg_block_idx == msg_block_len) {
+ process_msg_block(st);
+ }
+ }
+ }
+
+ impure fn process_msg_block(&sha1state st) {
+
+ // FIXME: Make precondition
+ check (_vec.len[mutable u32](st.h) == digest_buf_len);
+
+ // Constants
+ auto k = vec(0x5A827999u32,
+ 0x6ED9EBA1u32,
+ 0x8F1BBCDCu32,
+ 0xCA62C1D6u32);
+
+ let int t; // Loop counter
+ let vec[mutable u32] w = _vec.init_elt[mutable u32](0u32, 80u);
+
+ // Initialize the first 16 words of the vector w
+ t = 0;
+ while (t < 16) {
+ w.(t) = (st.msg_block.(t * 4) as u32) << 24u32;
+ w.(t) = w.(t) | ((st.msg_block.(t * 4 + 1) as u32) << 16u32);
+ w.(t) = w.(t) | ((st.msg_block.(t * 4 + 2) as u32) << 8u32);
+ w.(t) = w.(t) | (st.msg_block.(t * 4 + 3) as u32);
+ t += 1;
+ }
+
+ // Initialize the rest of vector w
+ while (t < 80) {
+ auto val = w.(t-3) ^ w.(t-8) ^ w.(t-14) ^ w.(t-16);
+ w.(t) = circular_shift(1u32, val);
+ t += 1;
+ }
+
+ auto a = st.h.(0);
+ auto b = st.h.(1);
+ auto c = st.h.(2);
+ auto d = st.h.(3);
+ auto e = st.h.(4);
+
+ let u32 temp;
+
+ t = 0;
+ while (t < 20) {
+ temp = circular_shift(5u32, a)
+ + ((b & c) | ((~b) & d)) + e + w.(t) + k.(0);
+ e = d;
+ d = c;
+ c = circular_shift(30u32, b);
+ b = a;
+ a = temp;
+ t += 1;
+ }
+
+ while (t < 40) {
+ temp = circular_shift(5u32, a)
+ + (b ^ c ^ d) + e + w.(t) + k.(1);
+ e = d;
+ d = c;
+ c = circular_shift(30u32, b);
+ b = a;
+ a = temp;
+ t += 1;
+ }
+
+ while (t < 60) {
+ temp = circular_shift(5u32, a)
+ + ((b & c) | (b & d) | (c & d)) + e + w.(t) + k.(2);
+ e = d;
+ d = c;
+ c = circular_shift(30u32, b);
+ b = a;
+ a = temp;
+ t += 1;
+ }
+
+ while (t < 80) {
+ temp = circular_shift(5u32, a)
+ + (b ^ c ^ d) + e + w.(t) + k.(3);
+ e = d;
+ d = c;
+ c = circular_shift(30u32, b);
+ b = a;
+ a = temp;
+ t += 1;
+ }
+
+ st.h.(0) = st.h.(0) + a;
+ st.h.(1) = st.h.(1) + b;
+ st.h.(2) = st.h.(2) + c;
+ st.h.(3) = st.h.(3) + d;
+ st.h.(4) = st.h.(4) + e;
+
+ st.msg_block_idx = 0u;
+ }
+
+ fn circular_shift(u32 bits, u32 word) -> u32 {
+ // FIXME: This is a workaround for a rustboot
+ // "unrecognized quads" codegen bug
+ auto bits_hack = bits;
+ ret (word << bits_hack) | (word >> (32u32 - bits));
+ }
+
+ impure fn mk_result(&sha1state st) -> vec[u8] {
+ if (!st.computed) {
+ pad_msg(st);
+ st.computed = true;
+ }
+
+ let vec[u8] res = vec();
+ for (u32 hpart in st.h) {
+ res += (hpart >> 24u32) & 0xFFu32 as u8;
+ res += (hpart >> 16u32) & 0xFFu32 as u8;
+ res += (hpart >> 8u32) & 0xFFu32 as u8;
+ res += hpart & 0xFFu32 as u8;
+ }
+ ret res;
+ }
+
+ /*
+ * According to the standard, the message must be padded to an even
+ * 512 bits. The first padding bit must be a '1'. The last 64 bits
+ * represent the length of the original message. All bits in between
+ * should be 0. This function will pad the message according to those
+ * rules by filling the msg_block vector accordingly. It will also
+ * call process_msg_block() appropriately. When it returns, it
+ * can be assumed that the message digest has been computed.
+ */
+ impure fn pad_msg(&sha1state st) {
+ // FIXME: Should be a precondition
+ check (_vec.len[mutable u8](st.msg_block) == msg_block_len);
+
+ /*
+ * Check to see if the current message block is too small to hold
+ * the initial padding bits and length. If so, we will pad the
+ * block, process it, and then continue padding into a second block.
+ */
+ if (st.msg_block_idx > 55u) {
+ st.msg_block.(st.msg_block_idx) = 0x80u8;
+ st.msg_block_idx += 1u;
+
+ while (st.msg_block_idx < msg_block_len) {
+ st.msg_block.(st.msg_block_idx) = 0u8;
+ st.msg_block_idx += 1u;
+ }
+
+ process_msg_block(st);
+ } else {
+ st.msg_block.(st.msg_block_idx) = 0x80u8;
+ st.msg_block_idx += 1u;
+ }
+
+ while (st.msg_block_idx < 56u) {
+ st.msg_block.(st.msg_block_idx) = 0u8;
+ st.msg_block_idx += 1u;
+ }
+
+ // Store the message length as the last 8 octets
+ st.msg_block.(56) = (st.len_high >> 24u32) & 0xFFu32 as u8;
+ st.msg_block.(57) = (st.len_high >> 16u32) & 0xFFu32 as u8;
+ st.msg_block.(58) = (st.len_high >> 8u32) & 0xFFu32 as u8;
+ st.msg_block.(59) = st.len_high & 0xFFu32 as u8;
+ st.msg_block.(60) = (st.len_low >> 24u32) & 0xFFu32 as u8;
+ st.msg_block.(61) = (st.len_low >> 16u32) & 0xFFu32 as u8;
+ st.msg_block.(62) = (st.len_low >> 8u32) & 0xFFu32 as u8;
+ st.msg_block.(63) = st.len_low & 0xFFu32 as u8;
+
+ process_msg_block(st);
+ }
+
+ state obj sha1(sha1state st) {
+
+ fn reset() {
+ // FIXME: Should be typestate precondition
+ check (_vec.len[mutable u32](st.h) == digest_buf_len);
+
+ st.len_low = 0u32;
+ st.len_high = 0u32;
+ st.msg_block_idx = 0u;
+
+ st.h.(0) = 0x67452301u32;
+ st.h.(1) = 0xEFCDAB89u32;
+ st.h.(2) = 0x98BADCFEu32;
+ st.h.(3) = 0x10325476u32;
+ st.h.(4) = 0xC3D2E1F0u32;
+
+ st.computed = false;
+ }
+
+ fn input(&vec[u8] msg) {
+ add_input(st, msg);
+ }
+
+ fn input_str(&str msg) {
+ add_input(st, _str.bytes(msg));
+ }
+
+ fn result() -> vec[u8] {
+ ret mk_result(st);
+ }
+ }
+
+ auto st = rec(h = _vec.init_elt[mutable u32](0u32, digest_buf_len),
+ mutable len_low = 0u32,
+ mutable len_high = 0u32,
+ msg_block = _vec.init_elt[mutable u8](0u8, msg_block_len),
+ mutable msg_block_idx = 0u,
+ mutable computed = false);
+ auto sh = sha1(st);
+ sh.reset();
+ ret sh;
+}
+
+// Local Variables:
+// mode: rust;
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
diff --git a/src/lib/std.rc b/src/lib/std.rc
index 102aa4d1..4ad422a3 100644
--- a/src/lib/std.rc
+++ b/src/lib/std.rc
@@ -14,7 +14,7 @@ mod _str;
// General IO and system-services modules.
-mod _io;
+mod io;
mod sys;
mod _task;
@@ -25,7 +25,7 @@ mod util;
// Authorize various rule-bendings.
-auth _io = unsafe;
+auth io = unsafe;
auth _str = unsafe;
auth _vec = unsafe;
auth _task = unsafe;
@@ -57,6 +57,7 @@ mod dbg;
mod bitv;
mod sort;
mod path;
+mod sha1;
// Local Variables:
// mode: rust;
diff --git a/src/rt/memory_region.cpp b/src/rt/memory_region.cpp
index fb19620f..48220290 100644
--- a/src/rt/memory_region.cpp
+++ b/src/rt/memory_region.cpp
@@ -1,7 +1,10 @@
#include "rust_internal.h"
#include "memory_region.h"
-#define TRACK_ALLOCATIONS
+// NB: please do not commit code with this uncommented. It's
+// hugely expensive and should only be used as a last resort.
+//
+// #define TRACK_ALLOCATIONS
memory_region::memory_region(rust_srv *srv, bool synchronized) :
_srv(srv), _parent(NULL), _live_allocations(0),
diff --git a/src/rt/rust.cpp b/src/rt/rust.cpp
index 0ea167a4..46fcb22e 100644
--- a/src/rt/rust.cpp
+++ b/src/rt/rust.cpp
@@ -78,7 +78,7 @@ command_line_args : public dom_owned<command_line_args>
extern "C" CDECL int
rust_start(uintptr_t main_fn, rust_crate const *crate, int argc,
- char **argv) {
+ char **argv) {
rust_srv *srv = new rust_srv();
rust_kernel *kernel = new rust_kernel(srv);
@@ -87,7 +87,8 @@ rust_start(uintptr_t main_fn, rust_crate const *crate, int argc,
rust_dom *dom = handle->referent();
command_line_args *args = new (dom) command_line_args(dom, argc, argv);
- dom->log(rust_log::DOM, "startup: %d args", args->argc);
+ dom->log(rust_log::DOM, "startup: %d args in 0x%" PRIxPTR,
+ args->argc, (uintptr_t)args->args);
for (int i = 0; i < args->argc; i++) {
dom->log(rust_log::DOM,
"startup: arg[%d] = '%s'", i, args->argv[i]);
@@ -99,7 +100,8 @@ rust_start(uintptr_t main_fn, rust_crate const *crate, int argc,
uintptr_t main_args[4] = {0, 0, 0, (uintptr_t)args->args};
dom->root_task->start(crate->get_exit_task_glue(),
- main_fn, (uintptr_t)&main_args, sizeof(main_args));
+ crate->abi_tag, main_fn,
+ (uintptr_t)&main_args, sizeof(main_args));
int ret = dom->start_main_loop();
delete args;
kernel->destroy_domain(dom);
diff --git a/src/rt/rust_crate_cache.cpp b/src/rt/rust_crate_cache.cpp
index adf1bbfc..62fd7c01 100644
--- a/src/rt/rust_crate_cache.cpp
+++ b/src/rt/rust_crate_cache.cpp
@@ -49,7 +49,8 @@ rust_crate_cache::c_sym::c_sym(rust_dom *dom, lib *library, char const *name)
dom->log(rust_log::CACHE, "resolved symbol '%s' to 0x%" PRIxPTR,
name, val);
} else {
- dom->log(rust_log::CACHE, "unresolved symbol '%s', null lib handle",
+ dom->log(rust_log::CACHE | rust_log::ERR,
+ "unresolved symbol '%s', null lib handle",
name);
}
}
@@ -79,7 +80,7 @@ rust_crate_cache::rust_sym::rust_sym(rust_dom *dom,
typedef rust_crate_reader::die die;
rust_crate const *crate = (rust_crate*)crate_sym->get_val();
if (!crate) {
- dom->log(rust_log::CACHE,
+ dom->log(rust_log::CACHE | rust_log::ERR,
"failed to resolve symbol, null crate symbol");
return;
}
diff --git a/src/rt/rust_internal.h b/src/rt/rust_internal.h
index 61716703..42b61801 100644
--- a/src/rt/rust_internal.h
+++ b/src/rt/rust_internal.h
@@ -88,6 +88,10 @@ static size_t const TIME_SLICE_IN_MS = 10;
static intptr_t const CONST_REFCOUNT = 0x7badface;
+// ABI tags for rust_start, rust_task::start and friends.
+static uintptr_t const ABI_X86_RUSTBOOT_CDECL = 1;
+static uintptr_t const ABI_X86_RUSTC_FASTCALL = 2;
+
// This accounts for logging buffers.
static size_t const BUF_BYTES = 2048;
@@ -241,6 +245,8 @@ public:
size_t n_c_syms;
size_t n_libs;
+ uintptr_t abi_tag;
+
// Crates are immutable, constructed by the compiler.
uintptr_t get_image_base() const;
diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp
index 68882b21..1afbfdd6 100644
--- a/src/rt/rust_task.cpp
+++ b/src/rt/rust_task.cpp
@@ -123,6 +123,7 @@ rust_task::~rust_task()
void
rust_task::start(uintptr_t exit_task_glue,
+ uintptr_t spawnee_abi,
uintptr_t spawnee_fn,
uintptr_t args,
size_t callsz)
@@ -147,26 +148,29 @@ rust_task::start(uintptr_t exit_task_glue,
// The exit_task_glue frame we synthesize above the frame we activate:
*spp-- = (uintptr_t) 0; // closure-or-obj
*spp-- = (uintptr_t) this; // task
- *spp-- = (uintptr_t) 0; // output
- *spp-- = (uintptr_t) 0; // retpc
+ *spp-- = (uintptr_t) 0x0; // output
+ *spp-- = (uintptr_t) 0x0; // retpc
uintptr_t exit_task_frame_base;
- for (size_t j = 0; j < n_callee_saves; ++j) {
+ if (spawnee_abi == ABI_X86_RUSTBOOT_CDECL) {
+ for (size_t j = 0; j < n_callee_saves; ++j) {
- // We want 'frame_base' to point to the old fp in this (exit-task)
- // frame, because we're going to inject this frame-pointer into the
- // callee-save frame pointer value in the *next* (spawnee) frame. A
- // cheap trick, but this means the spawnee frame will restore the
- // proper frame pointer of the glue frame as it runs its epilogue.
- if (j == callee_save_fp)
- exit_task_frame_base = (uintptr_t)spp;
+ // We want 'frame_base' to point to the old fp in this (exit-task)
+ // frame, because we're going to inject this frame-pointer into
+ // the callee-save frame pointer value in the *next* (spawnee)
+ // frame. A cheap trick, but this means the spawnee frame will
+ // restore the proper frame pointer of the glue frame as it runs
+ // its epilogue.
+ if (j == callee_save_fp)
+ exit_task_frame_base = (uintptr_t)spp;
- *spp-- = 0;
- }
+ *spp-- = 0;
+ }
- *spp-- = (uintptr_t) dom->root_crate; // crate ptr
- *spp-- = (uintptr_t) 0; // frame_glue_fns
+ *spp-- = (uintptr_t) dom->root_crate; // crate ptr
+ *spp-- = (uintptr_t) 0; // frame_glue_fns
+ }
// Copy args from spawner to spawnee.
if (args) {
@@ -174,12 +178,16 @@ rust_task::start(uintptr_t exit_task_glue,
src += 1; // spawn-call output slot
src += 1; // spawn-call task slot
src += 1; // spawn-call closure-or-obj slot
- // Memcpy all but the task and output pointers
- callsz -= (2 * sizeof(uintptr_t));
+
+ // Undo previous sp-- so we're pointing at the last word pushed.
+ ++spp;
+
+ // Memcpy all but the task, output and env pointers
+ callsz -= (3 * sizeof(uintptr_t));
spp = (uintptr_t*) (((uintptr_t)spp) - callsz);
memcpy(spp, src, callsz);
- // Move sp down to point to task cell.
+ // Move sp down to point to last implicit-arg cell (env).
spp--;
} else {
// We're at root, starting up.
@@ -188,10 +196,18 @@ rust_task::start(uintptr_t exit_task_glue,
// The *implicit* incoming args to the spawnee frame we're
// activating:
+ *spp-- = (uintptr_t) 0x0; // closure-or-obj
+
+ if (spawnee_abi == ABI_X86_RUSTBOOT_CDECL) {
+ // in CDECL mode we write the task + outptr to the spawnee stack.
+ *spp-- = (uintptr_t) this; // task
+ *spp-- = (uintptr_t) 0; // output addr
+ } else {
+ // in FASTCALL mode we don't, the outptr will be in ecx and the task
+ // in edx, and the activate_glue will make sure to set that up.
+ I(dom, spawnee_abi == ABI_X86_RUSTC_FASTCALL);
+ }
- *spp-- = (uintptr_t) 0; // closure-or-obj
- *spp-- = (uintptr_t) this; // task
- *spp-- = (uintptr_t) 0; // output addr
*spp-- = (uintptr_t) exit_task_glue; // retpc
// The context the activate_glue needs to switch stack.
diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h
index 9fbc67ac..5318ab71 100644
--- a/src/rt/rust_task.h
+++ b/src/rt/rust_task.h
@@ -56,6 +56,7 @@ rust_task : public maybe_proxy<rust_task>,
~rust_task();
void start(uintptr_t exit_task_glue,
+ uintptr_t spawnee_abi,
uintptr_t spawnee_fn,
uintptr_t args,
size_t callsz);
diff --git a/src/rt/rust_upcall.cpp b/src/rt/rust_upcall.cpp
index 7e2fac10..1dba1102 100644
--- a/src/rt/rust_upcall.cpp
+++ b/src/rt/rust_upcall.cpp
@@ -253,6 +253,10 @@ upcall_fail(rust_task *task,
task->log(rust_log::UPCALL | rust_log::ERR,
"upcall fail '%s', %s:%" PRIdPTR, expr, file, line);
task->fail(4);
+ if (getenv("RUST_TRAP_FAILURE")) {
+ // FIXME: x86-ism.
+ __asm__("int3");
+ }
}
/**
@@ -555,6 +559,7 @@ extern "C" CDECL rust_task *
upcall_start_task(rust_task *spawner,
rust_task *task,
uintptr_t exit_task_glue,
+ uintptr_t spawnee_abi,
uintptr_t spawnee_fn,
size_t callsz) {
LOG_UPCALL_ENTRY(spawner);
@@ -566,7 +571,8 @@ upcall_start_task(rust_task *spawner,
", spawnee 0x%" PRIxPTR
", callsz %" PRIdPTR ")", task->name, task, exit_task_glue,
spawnee_fn, callsz);
- task->start(exit_task_glue, spawnee_fn, spawner->rust_sp, callsz);
+ task->start(exit_task_glue, spawnee_abi, spawnee_fn,
+ spawner->rust_sp, callsz);
return task;
}
@@ -619,6 +625,7 @@ extern "C" CDECL maybe_proxy<rust_task> *
upcall_start_thread(rust_task *task,
rust_proxy<rust_task> *child_task_proxy,
uintptr_t exit_task_glue,
+ uintptr_t spawnee_abi,
uintptr_t spawnee_fn,
size_t callsz) {
LOG_UPCALL_ENTRY(task);
@@ -626,9 +633,11 @@ upcall_start_thread(rust_task *task,
rust_handle<rust_task> *child_task_handle = child_task_proxy->handle();
task->log(rust_log::UPCALL | rust_log::MEM | rust_log::TASK,
"exit_task_glue: " PTR ", spawnee_fn " PTR
- ", callsz %" PRIdPTR ")", exit_task_glue, spawnee_fn, callsz);
+ ", callsz %" PRIdPTR ")",
+ exit_task_glue, spawnee_fn, callsz);
rust_task *child_task = child_task_handle->referent();
- child_task->start(exit_task_glue, spawnee_fn, task->rust_sp, callsz);
+ child_task->start(exit_task_glue, spawnee_abi, spawnee_fn,
+ task->rust_sp, callsz);
#if defined(__WIN32__)
HANDLE thread;
thread = CreateThread(NULL, 0, rust_thread_start, child_task->dom, 0,
diff --git a/src/rt/test/rust_test_runtime.cpp b/src/rt/test/rust_test_runtime.cpp
index 1cde532e..e0e24156 100644
--- a/src/rt/test/rust_test_runtime.cpp
+++ b/src/rt/test/rust_test_runtime.cpp
@@ -54,6 +54,7 @@ rust_task_test::worker::run() {
kernel->create_domain(crate, "test");
rust_dom *domain = handle->referent();
domain->root_task->start(crate->get_exit_task_glue(),
+ ABI_X86_RUSTBOOT_CDECL,
(uintptr_t)&task_entry, (uintptr_t)NULL, 0);
domain->start_main_loop();
kernel->destroy_domain(domain);
diff --git a/src/test/compile-fail/reserved-dec.rs b/src/test/compile-fail/reserved-dec.rs
new file mode 100644
index 00000000..d8c204d9
--- /dev/null
+++ b/src/test/compile-fail/reserved-dec.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int dec = 0;
+}
diff --git a/src/test/compile-fail/reserved-f128.rs b/src/test/compile-fail/reserved-f128.rs
new file mode 100644
index 00000000..63d00f70
--- /dev/null
+++ b/src/test/compile-fail/reserved-f128.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int f128 = 0;
+}
diff --git a/src/test/compile-fail/reserved-f16.rs b/src/test/compile-fail/reserved-f16.rs
new file mode 100644
index 00000000..bfb14cd8
--- /dev/null
+++ b/src/test/compile-fail/reserved-f16.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int f16 = 0;
+}
diff --git a/src/test/compile-fail/reserved-f80.rs b/src/test/compile-fail/reserved-f80.rs
new file mode 100644
index 00000000..33e8bd5e
--- /dev/null
+++ b/src/test/compile-fail/reserved-f80.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int f80 = 0;
+}
diff --git a/src/test/compile-fail/reserved-m128.rs b/src/test/compile-fail/reserved-m128.rs
new file mode 100644
index 00000000..c4d36bf7
--- /dev/null
+++ b/src/test/compile-fail/reserved-m128.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int m128 = 0;
+}
diff --git a/src/test/compile-fail/reserved-m32.rs b/src/test/compile-fail/reserved-m32.rs
new file mode 100644
index 00000000..bdb3a427
--- /dev/null
+++ b/src/test/compile-fail/reserved-m32.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int m32 = 0;
+}
diff --git a/src/test/compile-fail/reserved-m64.rs b/src/test/compile-fail/reserved-m64.rs
new file mode 100644
index 00000000..034884a6
--- /dev/null
+++ b/src/test/compile-fail/reserved-m64.rs
@@ -0,0 +1,5 @@
+// error-pattern:reserved keyword
+
+fn main() {
+ let int m64 = 0;
+}
diff --git a/src/test/compile-fail/tail-non-call.rs b/src/test/compile-fail/tail-non-call.rs
new file mode 100644
index 00000000..00a451f6
--- /dev/null
+++ b/src/test/compile-fail/tail-non-call.rs
@@ -0,0 +1,10 @@
+// error-pattern: Non-call expression in tail call
+
+fn f() -> int {
+ auto x = 1;
+ be x;
+}
+
+fn main() {
+ auto y = f();
+}
diff --git a/src/test/compile-fail/tail-typeck.rs b/src/test/compile-fail/tail-typeck.rs
new file mode 100644
index 00000000..64beedb9
--- /dev/null
+++ b/src/test/compile-fail/tail-typeck.rs
@@ -0,0 +1,13 @@
+// error-pattern: mismatched types
+
+fn f() -> int {
+ be g();
+}
+
+fn g() -> uint {
+ ret 0u;
+}
+
+fn main() {
+ auto y = f();
+}
diff --git a/src/test/run-pass/alt-pattern-lit.rs b/src/test/run-pass/alt-pattern-lit.rs
new file mode 100644
index 00000000..91190260
--- /dev/null
+++ b/src/test/run-pass/alt-pattern-lit.rs
@@ -0,0 +1,17 @@
+fn altlit(int f) -> int {
+ alt (f) {
+ case (10) {
+ log "case 10";
+ ret 20;
+ }
+ case (11) {
+ log "case 11";
+ ret 22;
+ }
+ }
+}
+
+fn main() {
+ check (altlit(10) == 20);
+ check (altlit(11) == 22);
+}
diff --git a/src/test/run-pass/arith-unsigned.rs b/src/test/run-pass/arith-unsigned.rs
new file mode 100644
index 00000000..3fac3714
--- /dev/null
+++ b/src/test/run-pass/arith-unsigned.rs
@@ -0,0 +1,24 @@
+// Unsigned integer operations
+
+fn main() {
+ check (0u8 < 255u8);
+ check (0u8 <= 255u8);
+ check (255u8 > 0u8);
+ check (255u8 >= 0u8);
+ check (250u8 / 10u8 == 25u8);
+ check (255u8 % 10u8 == 5u8);
+ check (0u16 < 60000u16);
+ check (0u16 <= 60000u16);
+ check (60000u16 > 0u16);
+ check (60000u16 >= 0u16);
+ check (60000u16 / 10u16 == 6000u16);
+ check (60005u16 % 10u16 == 5u16);
+ check (0u32 < 4000000000u32);
+ check (0u32 <= 4000000000u32);
+ check (4000000000u32 > 0u32);
+ check (4000000000u32 >= 0u32);
+ check (4000000000u32 / 10u32 == 400000000u32);
+ check (4000000005u32 % 10u32 == 5u32);
+
+ // 64-bit numbers have some flakiness yet. Not tested
+}
diff --git a/src/test/run-pass/generic-box.rs b/src/test/run-pass/generic-box.rs
new file mode 100644
index 00000000..856f3aff
--- /dev/null
+++ b/src/test/run-pass/generic-box.rs
@@ -0,0 +1,8 @@
+fn box[T](&tup(T,T,T) x) -> @tup(T,T,T) {
+ ret @x;
+}
+
+fn main() {
+ let @tup(int,int,int) x = box[int](tup(1,2,3));
+ check (x._1 == 2);
+} \ No newline at end of file
diff --git a/src/test/run-pass/generic-fn-box.rs b/src/test/run-pass/generic-fn-box.rs
new file mode 100644
index 00000000..e821a784
--- /dev/null
+++ b/src/test/run-pass/generic-fn-box.rs
@@ -0,0 +1,9 @@
+fn f[T](@T x) -> @T {
+ ret x;
+}
+
+fn main() {
+ auto x = f(@3);
+ log *x;
+}
+
diff --git a/src/test/run-pass/generic-recursive-tag.rs b/src/test/run-pass/generic-recursive-tag.rs
index ad06345b..b9596b0d 100644
--- a/src/test/run-pass/generic-recursive-tag.rs
+++ b/src/test/run-pass/generic-recursive-tag.rs
@@ -1,8 +1,9 @@
tag list[T] {
cons(@T, @list[T]);
- nil();
+ nil;
}
fn main() {
- let list[int] a = cons[int](10, cons[int](12, cons[int](13, nil[int]())));
-} \ No newline at end of file
+ let list[int] a = cons[int](@10, @cons[int](@12, @cons[int](@13,
+ @nil[int])));
+}
diff --git a/src/test/run-pass/generic-tag.rs b/src/test/run-pass/generic-tag.rs
index 1fd88255..68d7c18f 100644
--- a/src/test/run-pass/generic-tag.rs
+++ b/src/test/run-pass/generic-tag.rs
@@ -6,4 +6,4 @@ tag option[T] {
fn main() {
let option[int] a = some[int](@10);
a = none[int];
-} \ No newline at end of file
+}
diff --git a/src/test/run-pass/generic-type-synonym.rs b/src/test/run-pass/generic-type-synonym.rs
index 4ddc8946..c3d2a9d5 100644
--- a/src/test/run-pass/generic-type-synonym.rs
+++ b/src/test/run-pass/generic-type-synonym.rs
@@ -1,4 +1,4 @@
type foo[T] = tup(T);
type bar[T] = foo[T];
-fn takebar[T](bar[T] b) {}
+fn takebar[T](&bar[T] b) {}
fn main() {} \ No newline at end of file
diff --git a/src/test/run-pass/lib-io.rs b/src/test/run-pass/lib-io.rs
index 66394435..0c0bcdcd 100644
--- a/src/test/run-pass/lib-io.rs
+++ b/src/test/run-pass/lib-io.rs
@@ -1,7 +1,7 @@
// -*- rust -*-
use std;
-import std._io;
+import std.io;
import std._str;
fn test_simple(str tmpfilebase) {
@@ -11,11 +11,11 @@ fn test_simple(str tmpfilebase) {
log frood;
{
- let _io.buf_writer out = _io.new_buf_writer(tmpfile, vec(_io.create()));
+ let io.buf_writer out = io.new_buf_writer(tmpfile, vec(io.create));
out.write(_str.bytes(frood));
}
- let _io.buf_reader inp = _io.new_buf_reader(tmpfile);
+ let io.buf_reader inp = io.new_buf_reader(tmpfile);
let str frood2 = _str.from_bytes(inp.read());
log frood2;
check (_str.eq(frood, frood2));
diff --git a/src/test/run-pass/lib-sha1.rs b/src/test/run-pass/lib-sha1.rs
new file mode 100644
index 00000000..57e3cdc8
--- /dev/null
+++ b/src/test/run-pass/lib-sha1.rs
@@ -0,0 +1,115 @@
+// -*- rust -*-
+
+use std;
+
+import std.sha1;
+import std._vec;
+import std._str;
+
+fn main() {
+
+ type test = rec(str input, vec[u8] output);
+
+ fn a_million_letter_a() -> str {
+ auto i = 0;
+ auto res = "";
+ while (i < 100000) {
+ res += "aaaaaaaaaa";
+ i += 1;
+ }
+ ret res;
+ }
+
+ // Test messages from FIPS 180-1
+ let vec[test] fips_180_1_tests =
+ vec(
+ rec(input = "abc",
+ output = vec(0xA9u8, 0x99u8, 0x3Eu8, 0x36u8, 0x47u8,
+ 0x06u8, 0x81u8, 0x6Au8, 0xBAu8, 0x3Eu8,
+ 0x25u8, 0x71u8, 0x78u8, 0x50u8, 0xC2u8,
+ 0x6Cu8, 0x9Cu8, 0xD0u8, 0xD8u8, 0x9Du8)
+ ),
+ rec(input = "abcdbcdecdefdefgefghfghighij"
+ + "hijkijkljklmklmnlmnomnopnopq",
+ output = vec(0x84u8, 0x98u8, 0x3Eu8, 0x44u8, 0x1Cu8,
+ 0x3Bu8, 0xD2u8, 0x6Eu8, 0xBAu8, 0xAEu8,
+ 0x4Au8, 0xA1u8, 0xF9u8, 0x51u8, 0x29u8,
+ 0xE5u8, 0xE5u8, 0x46u8, 0x70u8, 0xF1u8)
+ )
+ // FIXME: This test is disabled because it takes some
+ // minutes to run under rustboot+valgrind. It may be
+ // possible to reenable once things are more optimized.
+ /*,
+ rec(input = a_million_letter_a(),
+ output = vec(0x34u8, 0xAAu8, 0x97u8, 0x3Cu8, 0xD4u8,
+ 0xC4u8, 0xDAu8, 0xA4u8, 0xF6u8, 0x1Eu8,
+ 0xEBu8, 0x2Bu8, 0xDBu8, 0xADu8, 0x27u8,
+ 0x31u8, 0x65u8, 0x34u8, 0x01u8, 0x6Fu8)
+ )
+ */
+ );
+
+ // Examples from wikipedia
+ let vec[test] wikipedia_tests =
+ vec(
+ rec(input = "The quick brown fox jumps over the lazy dog",
+ output = vec(0x2fu8, 0xd4u8, 0xe1u8, 0xc6u8, 0x7au8,
+ 0x2du8, 0x28u8, 0xfcu8, 0xedu8, 0x84u8,
+ 0x9eu8, 0xe1u8, 0xbbu8, 0x76u8, 0xe7u8,
+ 0x39u8, 0x1bu8, 0x93u8, 0xebu8, 0x12u8)
+ ),
+ rec(input = "The quick brown fox jumps over the lazy cog",
+ output = vec(0xdeu8, 0x9fu8, 0x2cu8, 0x7fu8, 0xd2u8,
+ 0x5eu8, 0x1bu8, 0x3au8, 0xfau8, 0xd3u8,
+ 0xe8u8, 0x5au8, 0x0bu8, 0xd1u8, 0x7du8,
+ 0x9bu8, 0x10u8, 0x0du8, 0xb4u8, 0xb3u8)
+ )
+ );
+
+ auto tests = fips_180_1_tests + wikipedia_tests;
+
+ fn check_vec_eq(vec[u8] v0, vec[u8] v1) {
+ check (_vec.len[u8](v0) == _vec.len[u8](v1));
+ auto len = _vec.len[u8](v0);
+ auto i = 0u;
+ while (i < len) {
+ auto a = v0.(i);
+ auto b = v1.(i);
+ check (a == b);
+ i += 1u;
+ }
+ }
+
+ // Test that it works when accepting the message all at once
+ auto sh = sha1.mk_sha1();
+ for (test t in tests) {
+ sh.input_str(t.input);
+ auto out = sh.result();
+ check_vec_eq(t.output, out);
+ sh.reset();
+ }
+
+ // Test that it works when accepting the message in pieces
+ for (test t in tests) {
+ auto len = _str.byte_len(t.input);
+ auto left = len;
+ while (left > 0u) {
+ auto take = (left + 1u) / 2u;
+ sh.input_str(_str.substr(t.input, len - left, take));
+ left = left - take;
+ }
+ auto out = sh.result();
+ check_vec_eq(t.output, out);
+ sh.reset();
+ }
+}
+
+
+// Local Variables:
+// mode: rust;
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// compile-command: "make -k -C .. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
+// End:
diff --git a/src/test/run-pass/native2.rs b/src/test/run-pass/native2.rs
new file mode 100644
index 00000000..4815345a
--- /dev/null
+++ b/src/test/run-pass/native2.rs
@@ -0,0 +1,20 @@
+native "rust" mod rustrt {
+ type vbuf;
+ fn vec_buf[T](vec[T] v, uint offset) -> vbuf;
+}
+
+native "rust" mod bar = "foo" {
+}
+
+native mod zed {
+}
+
+native mod libc = "libc.dylib" {
+ fn write(int fd, rustrt.vbuf buf, uint count) -> int;
+}
+
+native "cdecl" mod baz {
+}
+
+fn main(vec[str] args) {
+}
diff --git a/src/test/run-pass/path.rs b/src/test/run-pass/path.rs
new file mode 100644
index 00000000..e94d32eb
--- /dev/null
+++ b/src/test/run-pass/path.rs
@@ -0,0 +1,8 @@
+mod foo {
+ fn bar(uint offset) {
+ }
+}
+
+fn main(vec[str] args) {
+ foo.bar(0u);
+}
diff --git a/src/test/run-pass/syntax-extension-fmt.rs b/src/test/run-pass/syntax-extension-fmt.rs
index 65e7647e..ebb09f96 100644
--- a/src/test/run-pass/syntax-extension-fmt.rs
+++ b/src/test/run-pass/syntax-extension-fmt.rs
@@ -1,5 +1,16 @@
use std;
+import std._str;
+
+fn test(str actual, str expected) {
+ log actual;
+ log expected;
+ check (_str.eq(actual, expected));
+}
+
fn main() {
- auto s = #fmt("hello %d friends and %s things", 10, "formatted");
- log s;
+ test(#fmt("hello %d friends and %s things", 10, "formatted"),
+ "hello 10 friends and formatted things");
+ test(#fmt("d: %d", 1), "d: 1");
+ test(#fmt("i: %i", 2), "i: 2");
+ test(#fmt("s: %s", "test"), "s: test");
}
diff --git a/src/test/run-pass/typestate-cfg-nesting.rs b/src/test/run-pass/typestate-cfg-nesting.rs
new file mode 100644
index 00000000..8f050646
--- /dev/null
+++ b/src/test/run-pass/typestate-cfg-nesting.rs
@@ -0,0 +1,26 @@
+
+fn f() {
+
+ auto x = 10;
+ auto y = 11;
+ if (true) {
+ alt (x) {
+ case (_) {
+ y = x;
+ }
+ }
+ } else {
+ }
+}
+
+fn main() {
+
+ auto x = 10;
+ auto y = 11;
+ if (true) {
+ while (false) {
+ y = x;
+ }
+ } else {
+ }
+}