From dace12310a059726490a181bd63d2bdae7f55d85 Mon Sep 17 00:00:00 2001 From: Josh Holtrop Date: Sun, 20 Aug 2023 16:53:54 -0400 Subject: [PATCH] wip --- assets/parser.c.erb | 54 ++++++++++++++++++++-------------------- lib/propane/generator.rb | 35 ++++++++++++++++++++------ 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/assets/parser.c.erb b/assets/parser.c.erb index 41283de..603ef74 100644 --- a/assets/parser.c.erb +++ b/assets/parser.c.erb @@ -1,4 +1,4 @@ -#include ".h" +#include "<%= File.basename(output_file).sub(%r{\.[a-z]+$}, "") %>.h" #include #include #include @@ -329,12 +329,12 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context, lexer_match_info_t longest_match; lexer_match_info_t attempt_match; *out_match_info = longest_match; - uint32_t current_state = lexer_mode_table[context.mode].state_table_offset; + uint32_t current_state = lexer_mode_table[context->mode].state_table_offset; for (;;) { - size_t const input_index = context.input_index + attempt_match.length; - uint8_t const * input = &context.input[input_index]; - size_t input_length = context.input_length - input_index; + size_t const input_index = context->input_index + attempt_match.length; + uint8_t const * input = &context->input[input_index]; + size_t input_length = context->input_length - input_index; <%= @grammar.prefix %>code_point_t code_point; uint8_t code_point_length; size_t result = <%= @grammar.prefix %>decode_code_point(input, input_length, &code_point, &code_point_length); @@ -428,7 +428,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context, static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) { <%= @grammar.prefix %>token_info_t token_info; - token_info.position = context.text_position; + token_info.position = context->text_position; token_info.token = INVALID_TOKEN_ID; *out_token_info = token_info; // TODO: remove lexer_match_info_t match_info; @@ -440,7 +440,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @ <%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state.token; if (match_info.accepting_state.code_id != INVALID_USER_CODE_ID) { - uint8_t const * match = &context.input[context.input_index]; + uint8_t const * match = &context->input[context->input_index]; <%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context, match_info.accepting_state.code_id, match, match_info.length, &token_info); /* An invalid token returned from lexer_user_code() means that the @@ -454,15 +454,15 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @ } /* Update the input position tracking. */ - context.input_index += match_info.length; - context.text_position.row += match_info.delta_position.row; + context->input_index += match_info.length; + context->text_position.row += match_info.delta_position.row; if (match_info.delta_position.row != 0u) { - context.text_position.col = match_info.delta_position.col; + context->text_position.col = match_info.delta_position.col; } else { - context.text_position.col += match_info.delta_position.col; + context->text_position.col += match_info.delta_position.col; } if (token_to_accept == INVALID_TOKEN_ID) @@ -481,15 +481,15 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @ case P_DECODE_ERROR: /* Update the input position tracking. */ - context.input_index += match_info.length; - context.text_position.row += match_info.delta_position.row; + context->input_index += match_info.length; + context->text_position.row += match_info.delta_position.row; if (match_info.delta_position.row != 0u) { - context.text_position.col = match_info.delta_position.col; + context->text_position.col = match_info.delta_position.col; } else { - context.text_position.col += match_info.delta_position.col; + context->text_position.col += match_info.delta_position.col; } return result; @@ -742,7 +742,7 @@ void state_values_stack_free(state_values_stack_t * stack) * * @return Parse value. */ -static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_value_stack_t * statevalues, uint32_t n_states) +static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states) { <%= @grammar.prefix %>value_t _pvalue; @@ -852,15 +852,15 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context) size_t shift_state = INVALID_ID; if (reduced_rule_set != INVALID_ID) { - shift_state = check_shift(state_value_stack_index(&statevalues, -1)->state_id, reduced_rule_set); + shift_state = check_shift(state_values_stack_index(&statevalues, -1)->state_id, reduced_rule_set); } if (shift_state == INVALID_ID) { - shift_state = check_shift(state_value_stack_index(&statevalues, -1)->state_id, token); + shift_state = check_shift(state_values_stack_index(&statevalues, -1)->state_id, token); if ((shift_state != INVALID_ID) && (token == TOKEN___EOF)) { /* Successful parse. */ - context.parse_result = state_value_stack_index(&statevalues, -1)->pvalue; + context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue; result = P_SUCCESS; break; } @@ -868,18 +868,18 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context) if (shift_state != INVALID_ID) { /* We have something to shift. */ - state_value_stack_push(&statevalues); - state_value_stack_index(&statevalues, -1)->state_id = shift_state; + state_values_stack_push(&statevalues); + state_values_stack_index(&statevalues, -1)->state_id = shift_state; if (reduced_rule_set == INVALID_ID) { /* We shifted a token, mark it consumed. */ token = INVALID_TOKEN_ID; - state_value_stack_index(&statevalues, -1)->pvalue = token_info.pvalue; + state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue; } else { /* We shifted a RuleSet. */ - state_value_stack_index(&statevalues, -1)->pvalue = reduced_parser_value; + state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value; <%= @grammar.prefix %>value_t new_parse_result; reduced_parser_value = new_parse_result; reduced_rule_set = INVALID_ID; @@ -887,13 +887,13 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context) continue; } - size_t reduce_index = check_reduce(state_value_stack_index(&statevalues, -1)->state_id, token); + size_t reduce_index = check_reduce(state_values_stack_index(&statevalues, -1)->state_id, token); if (reduce_index != INVALID_ID) { /* We have something to reduce. */ reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states); reduced_rule_set = parser_reduce_table[reduce_index].rule_set; - state_value_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states); + state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states); continue; } @@ -902,8 +902,8 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context) * the context text position to point to the token rather than the text * after it, so that if the caller wants to report the error position, * it will point to the correct position of the unexpected token. */ - context.text_position = token_info.position; - context.token = token; + context->text_position = token_info.position; + context->token = token; result = P_UNEXPECTED_TOKEN; break; } diff --git a/lib/propane/generator.rb b/lib/propane/generator.rb index 4a6fac4..87c8f02 100644 --- a/lib/propane/generator.rb +++ b/lib/propane/generator.rb @@ -27,8 +27,9 @@ class Propane end extensions.each do |extension| erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.#{extension}.erb")), trim_mode: "<>") + output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}") result = erb.result(binding.clone) - File.open(@output_file.sub(%r{\.[a-z]+$}, ".#{extension}"), "wb") do |fh| + File.open(output_file, "wb") do |fh| fh.write(result) end end @@ -204,10 +205,10 @@ class Propane code = code.gsub(/\$(\d+)/) do |match| index = $1.to_i case @language - when "d" - "statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}" when "c" "state_value_stack_index(statevalues, -1 - (int)n_states + #{index})->pvalue.v_#{rule.components[index - 1].ptypename}" + when "d" + "statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}" end end else @@ -220,7 +221,12 @@ class Propane unless mode_id raise Error.new("Lexer mode '#{mode_name}' not found") end - "context.mode = #{mode_id}u" + case @language + when "c" + "context->mode = #{mode_id}u" + when "d" + "context.mode = #{mode_id}u" + end end end code @@ -246,11 +252,26 @@ class Propane # Type. def get_type_for(max) if max <= 0xFF - "ubyte" + case @language + when "c" + "uint8_t" + when "d" + "ubyte" + end elsif max <= 0xFFFF - "ushort" + case @language + when "c" + "uint16_t" + when "d" + "ushort" + end else - "uint" + case @language + when "c" + "uint32_t" + else + "uint" + end end end