<% if @grammar.modulename %> module <%= @grammar.modulename %>; <% end %> import std.stdio; <% @grammar.code_blocks.each do |code| %> <%= code %> <% end %> class <%= @classname %> { enum { <% @grammar.tokens.each_with_index do |token, index| %> TOKEN_<%= token.code_name %> = <%= index %>, <% end %> _TOKEN_COUNT = <%= @grammar.tokens.size %>, _TOKEN_DECODE_ERROR = <%= TOKEN_DECODE_ERROR %>, _TOKEN_DROP = <%= TOKEN_DROP %>, } static immutable string token_names[] = [ <% @grammar.tokens.each_with_index do |token, index| %> "<%= token.name %>", <% end %> ]; static class Decoder { enum { CODE_POINT_INVALID = 0xFFFFFFFE, CODE_POINT_EOF = 0xFFFFFFFF, } struct DecodedCodePoint { uint code_point; uint code_point_length; } static DecodedCodePoint decode_code_point(string input) { if (input.length == 0u) { return DecodedCodePoint(CODE_POINT_EOF, 0u); } char c = input[0]; uint code_point; uint code_point_length; if ((c & 0x80u) == 0u) { code_point = c; code_point_length = 1u; } else { ubyte following_bytes; if ((c & 0xE0u) == 0xC0u) { code_point = c & 0x1Fu; following_bytes = 1u; } else if ((c & 0xF0u) == 0xE0u) { code_point = c & 0x0Fu; following_bytes = 2u; } else if ((c & 0xF8u) == 0xF0u) { code_point = c & 0x07u; following_bytes = 3u; } else if ((c & 0xFCu) == 0xF8u) { code_point = c & 0x03u; following_bytes = 4u; } else if ((c & 0xFEu) == 0xFCu) { code_point = c & 0x01u; following_bytes = 5u; } else { return DecodedCodePoint(CODE_POINT_INVALID, 0u); } if (input.length <= following_bytes) { return DecodedCodePoint(CODE_POINT_INVALID, 0u); } code_point_length = following_bytes + 1u; for (size_t i = 0u; i < following_bytes; i++) { char b = input[i + 1u]; if ((b & 0xC0u) != 0x80u) { return DecodedCodePoint(CODE_POINT_INVALID, 0u); } code_point = (code_point << 6u) | (b & 0x3Fu); } } return DecodedCodePoint(code_point, code_point_length); } } static class Lexer { private struct Transition { uint first; uint last; uint destination; } private struct State { uint transition_table_index; uint n_transitions; uint token; uint code_id; } private struct Mode { uint state_table_offset; } <% transition_table, state_table, mode_table = @lexer.build_tables %> private static immutable Transition transitions[] = [ <% transition_table.each do |transition_table_entry| %> Transition(<%= transition_table_entry[:first] %>u, <%= transition_table_entry[:last] %>u, <%= transition_table_entry[:destination] %>u), <% end %> ]; private static immutable State states[] = [ <% state_table.each do |state_table_entry| %> State(<%= state_table_entry[:transition_table_index] %>u, <%= state_table_entry[:n_transitions] %>u, <%= state_table_entry[:token] %>u, <%= state_table_entry[:code_id] %>u), <% end %> ]; private static immutable Mode modes[] = [ <% mode_table.each do |mode_table_entry| %> Mode(<%= mode_table_entry[:state_table_offset] %>), <% end %> ]; struct LexedToken { size_t row; size_t col; size_t length; uint token; <%= @grammar.result_type %> result; } private string m_input; private size_t m_input_position; private size_t m_input_row; private size_t m_input_col; private size_t m_mode; this(string input) { m_input = input; m_mode = <%= @lexer.mode_id("default") %>; } LexedToken lex_token() { for (;;) { LexedToken lt = attempt_lex_token(); if (lt.token < _TOKEN_COUNT) { return lt; } } } /** * Execute user code associated with a lexer pattern. * * @param code_id The ID of the user code block to execute. * @param match Matched text for this pattern. * @param lt LexedToken lexer result in progress. * * @return Token ID to accept, or _TOKEN_COUNT if the user code does * not explicitly return a token. */ private uint user_code(uint code_id, string match, LexedToken * lt) { switch (code_id) { <% @grammar.patterns.each do |pattern| %> <% if pattern.code_id %> case <%= pattern.code_id %>u: { <%= expand_code(pattern.code, false) %> } break; <% end %> <% end %> default: break; } return _TOKEN_COUNT; } private LexedToken attempt_lex_token() { LexedToken lt; lt.row = m_input_row; lt.col = m_input_col; lt.token = _TOKEN_COUNT; struct MatchInfo { size_t length; size_t delta_row; size_t delta_col; uint token; uint code_id; } MatchInfo longest_match_info; longest_match_info.token = _TOKEN_COUNT; MatchInfo attempt_match_info; uint current_state = modes[m_mode].state_table_offset; for (;;) { auto decoded = Decoder.decode_code_point(m_input[(m_input_position + attempt_match_info.length)..(m_input.length)]); if (decoded.code_point == Decoder.CODE_POINT_INVALID) { lt.token = _TOKEN_DECODE_ERROR; return lt; } bool lex_continue = false; if (decoded.code_point != Decoder.CODE_POINT_EOF) { uint dest = transition(current_state, decoded.code_point); if (dest != cast(uint)-1) { lex_continue = true; attempt_match_info.length += decoded.code_point_length; if (decoded.code_point == '\n') { attempt_match_info.delta_row++; attempt_match_info.delta_col = 0u; } else { attempt_match_info.delta_col++; } current_state = dest; if ((states[current_state].token != _TOKEN_COUNT) || (states[current_state].code_id != 0xFFFF_FFFFu)) { attempt_match_info.token = states[current_state].token; attempt_match_info.code_id = states[current_state].code_id; longest_match_info = attempt_match_info; } } } else if (attempt_match_info.length == 0u) { lt.token = TOKEN_0EOF; break; } if (!lex_continue) { bool pattern_accepted = false; uint token_to_accept = longest_match_info.token; if (longest_match_info.code_id != 0xFFFF_FFFFu) { uint user_code_token = user_code(longest_match_info.code_id, m_input[m_input_position..(m_input_position + longest_match_info.length)], <); /* A return of _TOKEN_COUNT from user_code() means * that the user code did not explicitly return a * token. So only override the token to return if the * user code does explicitly return a token. */ if (user_code_token != _TOKEN_COUNT) { token_to_accept = user_code_token; } pattern_accepted = true; } if (pattern_accepted || (token_to_accept != _TOKEN_COUNT)) { /* Update the input position tracking. */ m_input_position += longest_match_info.length; m_input_row += longest_match_info.delta_row; if (longest_match_info.delta_row != 0u) { m_input_col = longest_match_info.delta_col; } else { m_input_col += longest_match_info.delta_col; } lt.token = token_to_accept; lt.length = longest_match_info.length; break; } } } return lt; } private uint transition(uint current_state, uint code_point) { uint transition_table_index = states[current_state].transition_table_index; for (uint i = 0u; i < states[current_state].n_transitions; i++) { if ((transitions[transition_table_index + i].first <= code_point) && (code_point <= transitions[transition_table_index + i].last)) { return transitions[transition_table_index + i].destination; } } return cast(uint)-1; } } static class Parser { private struct Shift { uint symbol; uint state; } private struct Reduce { uint token; uint rule; uint rule_set; uint n_states; } private struct State { uint shift_table_index; uint n_shift_entries; uint reduce_table_index; uint n_reduce_entries; } private struct StateResult { uint state; <%= @grammar.result_type %> result; this(uint state) { this.state = state; } } <% state_table, shift_table, reduce_table = @parser.build_tables %> private static immutable Shift shifts[] = [ <% shift_table.each do |shift| %> Shift(<%= shift[:token_id] %>u, <%= shift[:state_id] %>u), <% end %> ]; private static immutable Reduce reduces[] = [ <% reduce_table.each do |reduce| %> Reduce(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u), <% end %> ]; private static immutable State states[] = [ <% state_table.each do |state| %> State(<%= state[:shift_index] %>u, <%= state[:n_shifts] %>u, <%= state[:reduce_index] %>u, <%= state[:n_reduces] %>u), <% end %> ]; private Lexer m_lexer; private <%= @grammar.result_type %> parse_result; this(string input) { m_lexer = new Lexer(input); } bool parse() { Lexer.LexedToken lexed_token; uint token = _TOKEN_COUNT; StateResult[] stateresults = new StateResult[](1); uint reduced_rule_set = 0xFFFFFFFFu; <%= @grammar.result_type %> reduced_parse_result; for (;;) { if (token == _TOKEN_COUNT) { lexed_token = m_lexer.lex_token(); token = lexed_token.token; } uint shift_state = 0xFFFFFFFFu; if (reduced_rule_set != 0xFFFFFFFFu) { shift_state = check_shift(stateresults[$-1].state, reduced_rule_set); } if (shift_state == 0xFFFFFFFFu) { shift_state = check_shift(stateresults[$-1].state, token); if ((shift_state != 0xFFFFFFFFu) && (token == TOKEN_0EOF)) { /* Successful parse. */ parse_result = stateresults[$-1].result; return true; } } if (shift_state != 0xFFFFFFFFu) { /* We have something to shift. */ stateresults ~= StateResult(shift_state); if (reduced_rule_set == 0xFFFFFFFFu) { /* We shifted a token, mark it consumed. */ token = _TOKEN_COUNT; stateresults[$-1].result = lexed_token.result; } else { /* We shifted a RuleSet. */ stateresults[$-1].result = reduced_parse_result; <%= @grammar.result_type %> new_parse_result; reduced_parse_result = new_parse_result; reduced_rule_set = 0xFFFFFFFFu; } continue; } uint reduce_index = check_reduce(stateresults[$-1].state, token); if (reduce_index != 0xFFFFFFFFu) { /* We have something to reduce. */ reduced_parse_result = user_code(reduces[reduce_index].rule, stateresults, reduces[reduce_index].n_states); reduced_rule_set = reduces[reduce_index].rule_set; stateresults.length -= reduces[reduce_index].n_states; continue; } /* Error, unexpected token. */ write("Unexpected token "); if (token < _TOKEN_COUNT) { writeln(token_names[token]); } else { writeln("{other}"); } return false; } } @property <%= @grammar.result_type %> result() { return parse_result; } private uint check_shift(uint state, uint symbol) { uint start = states[state].shift_table_index; uint end = start + states[state].n_shift_entries; for (uint i = start; i < end; i++) { if (shifts[i].symbol == symbol) { // if (symbol < _TOKEN_COUNT) // { // writeln("Shifting ", token_names[symbol]); // } // else // { // writeln("Shifting rule set ", symbol); // } return shifts[i].state; } } return 0xFFFFFFFFu; } private uint check_reduce(uint state, uint token) { uint start = states[state].reduce_table_index; uint end = start + states[state].n_reduce_entries; for (uint i = start; i < end; i++) { if ((reduces[i].token == token) || (reduces[i].token == _TOKEN_COUNT)) { // write("Reducing rule ", reduces[i].rule, ", rule set ", reduces[i].rule_set, " lookahead "); // if (token < _TOKEN_COUNT) // { // writeln(token_names[token]); // } // else // { // writeln("{other}"); // } return i; } } return 0xFFFFFFFFu; } /** * Execute user code associated with a parser rule. * * @param rule The ID of the rule. * * @return Parse result. */ private <%= @grammar.result_type %> user_code(uint rule, StateResult[] stateresults, uint n_states) { <%= @grammar.result_type %> _result; switch (rule) { <% @grammar.rules.each do |rule| %> <% if rule.code %> case <%= rule.id %>u: { <%= expand_code(rule.code, true) %> } break; <% end %> <% end %> default: break; } return _result; } } }