This commit is contained in:
Josh Holtrop 2023-08-20 16:53:54 -04:00
parent c7185edef0
commit dace12310a
2 changed files with 55 additions and 34 deletions

View File

@ -1,4 +1,4 @@
#include "<TBD>.h" #include "<%= File.basename(output_file).sub(%r{\.[a-z]+$}, "") %>.h"
#include <stdbool.h> #include <stdbool.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -329,12 +329,12 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
lexer_match_info_t longest_match; lexer_match_info_t longest_match;
lexer_match_info_t attempt_match; lexer_match_info_t attempt_match;
*out_match_info = longest_match; *out_match_info = longest_match;
uint32_t current_state = lexer_mode_table[context.mode].state_table_offset; uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
for (;;) for (;;)
{ {
size_t const input_index = context.input_index + attempt_match.length; size_t const input_index = context->input_index + attempt_match.length;
uint8_t const * input = &context.input[input_index]; uint8_t const * input = &context->input[input_index];
size_t input_length = context.input_length - input_index; size_t input_length = context->input_length - input_index;
<%= @grammar.prefix %>code_point_t code_point; <%= @grammar.prefix %>code_point_t code_point;
uint8_t code_point_length; uint8_t code_point_length;
size_t result = <%= @grammar.prefix %>decode_code_point(input, input_length, &code_point, &code_point_length); size_t result = <%= @grammar.prefix %>decode_code_point(input, input_length, &code_point, &code_point_length);
@ -428,7 +428,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
{ {
<%= @grammar.prefix %>token_info_t token_info; <%= @grammar.prefix %>token_info_t token_info;
token_info.position = context.text_position; token_info.position = context->text_position;
token_info.token = INVALID_TOKEN_ID; token_info.token = INVALID_TOKEN_ID;
*out_token_info = token_info; // TODO: remove *out_token_info = token_info; // TODO: remove
lexer_match_info_t match_info; lexer_match_info_t match_info;
@ -440,7 +440,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state.token; <%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state.token;
if (match_info.accepting_state.code_id != INVALID_USER_CODE_ID) if (match_info.accepting_state.code_id != INVALID_USER_CODE_ID)
{ {
uint8_t const * match = &context.input[context.input_index]; uint8_t const * match = &context->input[context->input_index];
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context, <%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
match_info.accepting_state.code_id, match, match_info.length, &token_info); match_info.accepting_state.code_id, match, match_info.length, &token_info);
/* An invalid token returned from lexer_user_code() means that the /* An invalid token returned from lexer_user_code() means that the
@ -454,15 +454,15 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
} }
/* Update the input position tracking. */ /* Update the input position tracking. */
context.input_index += match_info.length; context->input_index += match_info.length;
context.text_position.row += match_info.delta_position.row; context->text_position.row += match_info.delta_position.row;
if (match_info.delta_position.row != 0u) if (match_info.delta_position.row != 0u)
{ {
context.text_position.col = match_info.delta_position.col; context->text_position.col = match_info.delta_position.col;
} }
else else
{ {
context.text_position.col += match_info.delta_position.col; context->text_position.col += match_info.delta_position.col;
} }
if (token_to_accept == INVALID_TOKEN_ID) if (token_to_accept == INVALID_TOKEN_ID)
@ -481,15 +481,15 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
case P_DECODE_ERROR: case P_DECODE_ERROR:
/* Update the input position tracking. */ /* Update the input position tracking. */
context.input_index += match_info.length; context->input_index += match_info.length;
context.text_position.row += match_info.delta_position.row; context->text_position.row += match_info.delta_position.row;
if (match_info.delta_position.row != 0u) if (match_info.delta_position.row != 0u)
{ {
context.text_position.col = match_info.delta_position.col; context->text_position.col = match_info.delta_position.col;
} }
else else
{ {
context.text_position.col += match_info.delta_position.col; context->text_position.col += match_info.delta_position.col;
} }
return result; return result;
@ -742,7 +742,7 @@ void state_values_stack_free(state_values_stack_t * stack)
* *
* @return Parse value. * @return Parse value.
*/ */
static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_value_stack_t * statevalues, uint32_t n_states) static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states)
{ {
<%= @grammar.prefix %>value_t _pvalue; <%= @grammar.prefix %>value_t _pvalue;
@ -852,15 +852,15 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
size_t shift_state = INVALID_ID; size_t shift_state = INVALID_ID;
if (reduced_rule_set != INVALID_ID) if (reduced_rule_set != INVALID_ID)
{ {
shift_state = check_shift(state_value_stack_index(&statevalues, -1)->state_id, reduced_rule_set); shift_state = check_shift(state_values_stack_index(&statevalues, -1)->state_id, reduced_rule_set);
} }
if (shift_state == INVALID_ID) if (shift_state == INVALID_ID)
{ {
shift_state = check_shift(state_value_stack_index(&statevalues, -1)->state_id, token); shift_state = check_shift(state_values_stack_index(&statevalues, -1)->state_id, token);
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF)) if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
{ {
/* Successful parse. */ /* Successful parse. */
context.parse_result = state_value_stack_index(&statevalues, -1)->pvalue; context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue;
result = P_SUCCESS; result = P_SUCCESS;
break; break;
} }
@ -868,18 +868,18 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
if (shift_state != INVALID_ID) if (shift_state != INVALID_ID)
{ {
/* We have something to shift. */ /* We have something to shift. */
state_value_stack_push(&statevalues); state_values_stack_push(&statevalues);
state_value_stack_index(&statevalues, -1)->state_id = shift_state; state_values_stack_index(&statevalues, -1)->state_id = shift_state;
if (reduced_rule_set == INVALID_ID) if (reduced_rule_set == INVALID_ID)
{ {
/* We shifted a token, mark it consumed. */ /* We shifted a token, mark it consumed. */
token = INVALID_TOKEN_ID; token = INVALID_TOKEN_ID;
state_value_stack_index(&statevalues, -1)->pvalue = token_info.pvalue; state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue;
} }
else else
{ {
/* We shifted a RuleSet. */ /* We shifted a RuleSet. */
state_value_stack_index(&statevalues, -1)->pvalue = reduced_parser_value; state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
<%= @grammar.prefix %>value_t new_parse_result; <%= @grammar.prefix %>value_t new_parse_result;
reduced_parser_value = new_parse_result; reduced_parser_value = new_parse_result;
reduced_rule_set = INVALID_ID; reduced_rule_set = INVALID_ID;
@ -887,13 +887,13 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
continue; continue;
} }
size_t reduce_index = check_reduce(state_value_stack_index(&statevalues, -1)->state_id, token); size_t reduce_index = check_reduce(state_values_stack_index(&statevalues, -1)->state_id, token);
if (reduce_index != INVALID_ID) if (reduce_index != INVALID_ID)
{ {
/* We have something to reduce. */ /* We have something to reduce. */
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states); reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states);
reduced_rule_set = parser_reduce_table[reduce_index].rule_set; reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
state_value_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states); state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states);
continue; continue;
} }
@ -902,8 +902,8 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
* the context text position to point to the token rather than the text * the context text position to point to the token rather than the text
* after it, so that if the caller wants to report the error position, * after it, so that if the caller wants to report the error position,
* it will point to the correct position of the unexpected token. */ * it will point to the correct position of the unexpected token. */
context.text_position = token_info.position; context->text_position = token_info.position;
context.token = token; context->token = token;
result = P_UNEXPECTED_TOKEN; result = P_UNEXPECTED_TOKEN;
break; break;
} }

View File

@ -27,8 +27,9 @@ class Propane
end end
extensions.each do |extension| extensions.each do |extension|
erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.#{extension}.erb")), trim_mode: "<>") erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.#{extension}.erb")), trim_mode: "<>")
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
result = erb.result(binding.clone) result = erb.result(binding.clone)
File.open(@output_file.sub(%r{\.[a-z]+$}, ".#{extension}"), "wb") do |fh| File.open(output_file, "wb") do |fh|
fh.write(result) fh.write(result)
end end
end end
@ -204,10 +205,10 @@ class Propane
code = code.gsub(/\$(\d+)/) do |match| code = code.gsub(/\$(\d+)/) do |match|
index = $1.to_i index = $1.to_i
case @language case @language
when "d"
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
when "c" when "c"
"state_value_stack_index(statevalues, -1 - (int)n_states + #{index})->pvalue.v_#{rule.components[index - 1].ptypename}" "state_value_stack_index(statevalues, -1 - (int)n_states + #{index})->pvalue.v_#{rule.components[index - 1].ptypename}"
when "d"
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
end end
end end
else else
@ -220,9 +221,14 @@ class Propane
unless mode_id unless mode_id
raise Error.new("Lexer mode '#{mode_name}' not found") raise Error.new("Lexer mode '#{mode_name}' not found")
end end
case @language
when "c"
"context->mode = #{mode_id}u"
when "d"
"context.mode = #{mode_id}u" "context.mode = #{mode_id}u"
end end
end end
end
code code
end end
@ -246,13 +252,28 @@ class Propane
# Type. # Type.
def get_type_for(max) def get_type_for(max)
if max <= 0xFF if max <= 0xFF
case @language
when "c"
"uint8_t"
when "d"
"ubyte" "ubyte"
end
elsif max <= 0xFFFF elsif max <= 0xFFFF
case @language
when "c"
"uint16_t"
when "d"
"ushort" "ushort"
end
else
case @language
when "c"
"uint32_t"
else else
"uint" "uint"
end end
end end
end
end end