propane/assets/parser.d.erb
Josh Holtrop f17efe8c82 Add RuleSet#id to use when reducing
Parser will know what state to go to after reducing a Rule based on the
RuleSet ID.
Start on Parser class.
2022-06-21 20:07:27 -04:00

305 lines
9.4 KiB
Plaintext

<% if @grammar.modulename %>
module <%= @grammar.modulename %>;
<% end %>
class <%= @classname %>
{
enum
{
<% @grammar.tokens.each_with_index do |token, index| %>
TOKEN_<%= token.c_name %> = <%= index %>,
<% end %>
TOKEN_EOF = <%= TOKEN_EOF %>,
TOKEN_DECODE_ERROR = <%= TOKEN_DECODE_ERROR %>,
TOKEN_DROP = <%= TOKEN_DROP %>,
TOKEN_NONE = <%= TOKEN_NONE %>,
}
static immutable string TokenNames[] = [
<% @grammar.tokens.each_with_index do |token, index| %>
"<%= token.name %>",
<% end %>
];
static class Decoder
{
enum
{
CODE_POINT_INVALID = 0xFFFFFFFE,
CODE_POINT_EOF = 0xFFFFFFFF,
}
struct DecodedCodePoint
{
uint code_point;
uint code_point_length;
}
static DecodedCodePoint decode_code_point(const(ubyte) * input, size_t input_length)
{
if (input_length == 0u)
{
return DecodedCodePoint(CODE_POINT_EOF, 0u);
}
ubyte c = *input;
uint code_point;
uint code_point_length;
if ((c & 0x80u) == 0u)
{
code_point = c;
code_point_length = 1u;
}
else
{
ubyte following_bytes;
if ((c & 0xE0u) == 0xC0u)
{
code_point = c & 0x1Fu;
following_bytes = 1u;
}
else if ((c & 0xF0u) == 0xE0u)
{
code_point = c & 0x0Fu;
following_bytes = 2u;
}
else if ((c & 0xF8u) == 0xF0u)
{
code_point = c & 0x07u;
following_bytes = 3u;
}
else if ((c & 0xFCu) == 0xF8u)
{
code_point = c & 0x03u;
following_bytes = 4u;
}
else if ((c & 0xFEu) == 0xFCu)
{
code_point = c & 0x01u;
following_bytes = 5u;
}
else
{
return DecodedCodePoint(CODE_POINT_INVALID, 0u);
}
if (input_length <= following_bytes)
{
return DecodedCodePoint(CODE_POINT_INVALID, 0u);
}
code_point_length = following_bytes + 1u;
while (following_bytes-- > 0u)
{
input++;
ubyte b = *input;
if ((b & 0xC0u) != 0u)
{
return DecodedCodePoint(CODE_POINT_INVALID, 0u);
}
code_point = (code_point << 6u) | b;
}
}
return DecodedCodePoint(code_point, code_point_length);
}
}
static class Lexer
{
private struct Transition
{
uint first;
uint last;
uint destination;
}
private struct State
{
uint transition_table_index;
uint n_transitions;
uint accepts;
}
<% transition_table, state_table = @lexer.build_tables %>
private static immutable Transition transitions[] = [
<% transition_table.each do |transition_table_entry| %>
Transition(<%= transition_table_entry[:first] %>u, <%= transition_table_entry[:last] %>u, <%= transition_table_entry[:destination] %>u),
<% end %>
];
private static const State states[] = [
<% state_table.each do |state_table_entry| %>
State(<%= state_table_entry[:transition_table_index] %>u, <%= state_table_entry[:n_transitions] %>u, <%= state_table_entry[:accepts] %>u),
<% end %>
];
struct LexedToken
{
size_t row;
size_t col;
size_t length;
uint token;
}
private const(ubyte) * m_input;
private size_t m_input_length;
private size_t m_input_position;
private size_t m_input_row;
private size_t m_input_col;
this(const(ubyte) * input, size_t input_length)
{
m_input = input;
m_input_length = input_length;
}
LexedToken lex_token()
{
for (;;)
{
LexedToken lt = attempt_lex_token();
if (lt.token != TOKEN_DROP)
{
return lt;
}
}
}
private LexedToken attempt_lex_token()
{
LexedToken lt = LexedToken(m_input_row, m_input_col, 0, TOKEN_NONE);
struct LexedTokenState
{
size_t length;
size_t delta_row;
size_t delta_col;
uint token;
}
LexedTokenState last_accepts_info;
last_accepts_info.token = TOKEN_NONE;
LexedTokenState attempt_info;
uint current_state;
for (;;)
{
auto decoded = Decoder.decode_code_point(&m_input[m_input_position + attempt_info.length], m_input_length - m_input_position - attempt_info.length);
if (decoded.code_point == Decoder.CODE_POINT_INVALID)
{
lt.token = TOKEN_DECODE_ERROR;
return lt;
}
bool lex_continue = false;
if (decoded.code_point != Decoder.CODE_POINT_EOF)
{
uint dest = transition(current_state, decoded.code_point);
if (dest != cast(uint)-1)
{
lex_continue = true;
attempt_info.length += decoded.code_point_length;
if (decoded.code_point == '\n')
{
attempt_info.delta_row++;
attempt_info.delta_col = 0u;
}
else
{
attempt_info.delta_col++;
}
current_state = dest;
if (states[current_state].accepts != TOKEN_NONE)
{
attempt_info.token = states[current_state].accepts;
last_accepts_info = attempt_info;
}
}
}
else if (attempt_info.length == 0u)
{
lt.token = TOKEN_EOF;
break;
}
if (!lex_continue)
{
if (last_accepts_info.token != TOKEN_NONE)
{
lt.token = last_accepts_info.token;
lt.length = last_accepts_info.length;
m_input_position += last_accepts_info.length;
m_input_row += last_accepts_info.delta_row;
if (last_accepts_info.delta_row != 0u)
{
m_input_col = last_accepts_info.delta_col;
}
else
{
m_input_col += last_accepts_info.delta_col;
}
}
break;
}
}
return lt;
}
private uint transition(uint current_state, uint code_point)
{
uint transition_table_index = states[current_state].transition_table_index;
for (uint i = 0u; i < states[current_state].n_transitions; i++)
{
if ((transitions[transition_table_index + i].first <= code_point) &&
(code_point <= transitions[transition_table_index + i].last))
{
return transitions[transition_table_index + i].destination;
}
}
return cast(uint)-1;
}
}
static class Parser
{
private struct Shift
{
uint token_id;
uint state_id;
}
private struct Reduce
{
uint token_id;
uint rule_id;
uint rule_set_id;
}
private struct State
{
uint shift_table_index;
uint n_shift_entries;
uint reduce_table_index;
uint n_reduce_entries;
}
<% state_table, shift_table, reduce_table = @parser.build_tables %>
private static immutable Shift shifts[] = [
<% shift_table.each do |shift| %>
Shift(<%= shift[:token_id] %>u, <%= shift[:state_id] %>u),
<% end %>
];
private static immutable Reduce reduces[] = [
<% reduce_table.each do |reduce| %>
Reduce(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u),
<% end %>
];
private static immutable State states[] = [
<% state_table.each do |state| %>
State(<%= state[:shift_index] %>u, <%= state[:n_shifts] %>u, <%= state[:reduce_index] %>u, <%= state[:n_reduces] %>u),
<% end %>
];
private Lexer m_lexer;
this(const(ubyte) * input, size_t input_length)
{
m_lexer = new Lexer(input, input_length);
}
}
}