diff options
| author | Mustafa Quraish <[email protected]> | 2022-01-31 02:55:50 -0500 |
|---|---|---|
| committer | Mustafa Quraish <[email protected]> | 2022-01-31 02:55:50 -0500 |
| commit | 85bcdbbd053ebc75f1e752fd723fab92a99910ce (patch) | |
| tree | d4fc198339925081ab7f9e6bfb3a59ca092dd2c8 /src/parser.c | |
| parent | Minor fixes to code generation (diff) | |
| download | cup-85bcdbbd053ebc75f1e752fd723fab92a99910ce.tar.xz cup-85bcdbbd053ebc75f1e752fd723fab92a99910ce.zip | |
Add ability to import other files
This still requires a lot of work to avoid duplicate imports, and
handle cyclic imports, but it is a good enough for small examples
which just want to include some common definitions from std/
Diffstat (limited to 'src/parser.c')
| -rw-r--r-- | src/parser.c | 42 |
1 files changed, 40 insertions, 2 deletions
diff --git a/src/parser.c b/src/parser.c index 128f18a..4dc891d 100644 --- a/src/parser.c +++ b/src/parser.c @@ -15,6 +15,10 @@ static Node *block_stack[BLOCK_STACK_SIZE]; static i64 block_stack_count = 0; static i64 cur_stack_offset = 0; +#define LEXER_STACK_SIZE 64 +static Lexer *lexer_stack[LEXER_STACK_SIZE]; +static i64 lexer_stack_count = 0; + Token do_assert_token(Token token, TokenType type, char *filename, int line) { @@ -576,20 +580,54 @@ Node *parse_func(Lexer *lexer) return func; } +void push_new_lexer(Lexer *lexer) +{ + assert(lexer_stack_count < LEXER_STACK_SIZE); + lexer_stack[lexer_stack_count++] = lexer; +} + +Lexer *remove_lexer() +{ + assert(lexer_stack_count > 0); + free(lexer_stack[--lexer_stack_count]); + if (lexer_stack_count == 0) + return NULL; + return lexer_stack[lexer_stack_count - 1]; +} + Node *parse_program(Lexer *lexer) { initialize_builtins(); Node *program = Node_new(AST_PROGRAM); - Token token; - while ((token = Lexer_peek(lexer)).type != TOKEN_EOF) { + + push_new_lexer(lexer); + + Token token = Lexer_peek(lexer); + while (token.type != TOKEN_EOF) { if (token.type == TOKEN_FN) { Node *func = parse_func(lexer); Node_add_child(program, func); + } else if (token.type == TOKEN_IMPORT) { + // TODO: Handle circular imports + // TODO: Handle complex import graphs (#pragma once) + // TODO: Validation of imports + // TODO: Have default directories to search for imports + Lexer_next(lexer); + token = assert_token(Lexer_next(lexer), TOKEN_STRINGLIT); + char *filename = token.value.as_string; + lexer = Lexer_new_open_file(filename); + push_new_lexer(lexer); } else { die_location(token.loc, "Unexpected token in parse_program: `%s`\n", token_type_to_str(token.type)); exit(1); break; } + + token = Lexer_peek(lexer); + while (token.type == TOKEN_EOF && lexer_stack_count > 1) { + lexer = remove_lexer(); + token = Lexer_peek(lexer); + } } return program; }
\ No newline at end of file |