<% if @grammar.modulename %> module <%= @grammar.modulename %>; <% end %> import std.stdio; <% @grammar.code_blocks.each do |code| %> <%= code %> <% end %> class <%= @classname %> { alias TokenID = uint; enum : TokenID { <% @grammar.tokens.each_with_index do |token, index| %> TOKEN_<%= token.code_name %> = <%= index %>, <% unless token.id == index %> <% raise "Token ID (#{token.id}) does not match index (#{index}) for token #{token.name}!" %> <% end %> <% end %> _TOKEN_COUNT = <%= @grammar.tokens.size %>, } struct Token { /* Number of tokens in this parser. */ enum count = <%= @grammar.tokens.size %>; TokenID token; alias token this; @disable this(); this(TokenID token) { this.token = token; } static Token invalid() { return Token(count); } bool is_valid() const { return token < count; } bool is_invalid() const { return !is_valid(); } } alias CodePoint = uint; static immutable string[] token_names = [ <% @grammar.tokens.each_with_index do |token, index| %> "<%= token.name %>", <% end %> ]; static union ParserValue { <% @grammar.ptypes.each do |name, typestring| %> <%= typestring %> v_<%= name %>; <% end %> } static class Decoder { enum Result { SUCCESS, EOF, DECODE_ERROR, } static Result decode_code_point(string input, ref CodePoint out_code_point, ref ubyte out_code_point_length) { if (input.length == 0u) { return Result.EOF; } char c = input[0]; CodePoint code_point; ubyte code_point_length; if ((c & 0x80u) == 0u) { code_point = c; code_point_length = 1u; } else { ubyte following_bytes; if ((c & 0xE0u) == 0xC0u) { code_point = c & 0x1Fu; following_bytes = 1u; } else if ((c & 0xF0u) == 0xE0u) { code_point = c & 0x0Fu; following_bytes = 2u; } else if ((c & 0xF8u) == 0xF0u) { code_point = c & 0x07u; following_bytes = 3u; } else if ((c & 0xFCu) == 0xF8u) { code_point = c & 0x03u; following_bytes = 4u; } else if ((c & 0xFEu) == 0xFCu) { code_point = c & 0x01u; following_bytes = 5u; } else { return Result.DECODE_ERROR; } if (input.length <= following_bytes) { return Result.DECODE_ERROR; } code_point_length = cast(ubyte)(following_bytes + 1u); for (size_t i = 0u; i < following_bytes; i++) { char b = input[i + 1u]; if ((b & 0xC0u) != 0x80u) { return Result.DECODE_ERROR; } code_point = (code_point << 6u) | (b & 0x3Fu); } } out_code_point = code_point; out_code_point_length = code_point_length; return Result.SUCCESS; } } static class Lexer { private struct Transition { CodePoint first; CodePoint last; uint destination; } private struct UserCodeID { enum count = <%= (@grammar.patterns.map(&:code_id).compact.max || -1) + 1%>; uint user_code_id; alias user_code_id this; @disable this(); this(uint user_code_id) { this.user_code_id = user_code_id; } static UserCodeID invalid() { return UserCodeID(count); } bool is_valid() const { return user_code_id < count; } bool is_invalid() const { return !is_valid(); } } private struct State { uint transition_table_index; uint n_transitions; Token token; UserCodeID code_id; bool drop; bool accepts() const { return drop || token.is_valid() || code_id.is_valid(); } } private struct Mode { uint state_table_offset; } <% transition_table, state_table, mode_table = @lexer.build_tables %> private static immutable Transition[] transitions = [ <% transition_table.each do |transition_table_entry| %> Transition(<%= transition_table_entry[:first] %>u, <%= transition_table_entry[:last] %>u, <%= transition_table_entry[:destination] %>u), <% end %> ]; private static immutable State[] states = [ <% state_table.each do |state_table_entry| %> State(<%= state_table_entry[:transition_table_index] %>u, <%= state_table_entry[:n_transitions] %>u, <% if state_table_entry[:token] %> Token(<%= state_table_entry[:token] %>u), <% else %> Token.invalid(), <% end %> <% if state_table_entry[:code_id] %> UserCodeID(<%= state_table_entry[:code_id] %>u), <% else %> UserCodeID.invalid(), <% end %> <%= state_table_entry[:drop] %>), <% end %> ]; private static immutable Mode[] modes = [ <% mode_table.each do |mode_table_entry| %> Mode(<%= mode_table_entry[:state_table_offset] %>), <% end %> ]; struct Result { enum Type { DECODE_ERROR, DROP, TOKEN, UNEXPECTED_INPUT, } Type type; size_t row; size_t col; size_t length; uint token; ParserValue pvalue; } private string m_input; private size_t m_input_position; private size_t m_input_row; private size_t m_input_col; private size_t m_mode; this(string input) { m_input = input; m_mode = <%= @lexer.mode_id("default") %>; } Result lex_token() { for (;;) { Result result = attempt_lex_token(); if (result.token < _TOKEN_COUNT) { return result; } } } /** * Execute user code associated with a lexer pattern. * * @param code_id The ID of the user code block to execute. * @param match Matched text for this pattern. * @param result Result lexer result in progress. * * @return Token to accept, or invalid token if the user code does * not explicitly return a token. */ private Token user_code(UserCodeID code_id, string match, Result * result) { switch (code_id) { <% @grammar.patterns.each do |pattern| %> <% if pattern.code_id %> case <%= pattern.code_id %>u: { <%= expand_code(pattern.code, false, nil, pattern) %> } break; <% end %> <% end %> default: break; } return Token.invalid(); } private Result attempt_lex_token() { Result result; result.row = m_input_row; result.col = m_input_col; result.token = _TOKEN_COUNT; auto match_result = find_longest_match(); if (match_result.is_eof()) { result.type = Result.Type.TOKEN; result.token = TOKEN___EOF; return result; } else if (match_result.is_decode_error()) { result.type = Result.Type.DECODE_ERROR; return result; } else if (match_result.is_unexpected_input()) { result.type = Result.Type.UNEXPECTED_INPUT; return result; } uint token_to_accept = match_result.accepting_state.token; if (match_result.accepting_state.code_id.is_valid()) { Token user_code_token = user_code(match_result.accepting_state.code_id, m_input[m_input_position..(m_input_position + match_result.length)], &result); /* An invalid Token from user_code() means that the user * code did not explicitly return a token. So only override * the token to return if the user code does explicitly * return a token. */ if (user_code_token.is_valid()) { token_to_accept = user_code_token.token; } } /* Update the input position tracking. */ m_input_position += match_result.length; m_input_row += match_result.delta_row; if (match_result.delta_row != 0u) { m_input_col = match_result.delta_col; } else { m_input_col += match_result.delta_col; } result.token = token_to_accept; result.length = match_result.length; if (match_result.accepting_state.drop) { result.type = Result.Type.DROP; } else { result.type = Result.Type.TOKEN; } return result; } /** * Result type for find_longest_match(). * * Alternatives: * - decode_error * - eof * - found_match(length, delta_row, delta_col, accepting_state) * - unexpected_input(unexpected_input_length) */ struct FindLongestMatchResult { enum : ubyte { FOUND_MATCH, DECODE_ERROR, EOF, UNEXPECTED_INPUT, } ubyte type; alias type this; union { struct { size_t length; size_t delta_row; size_t delta_col; const(State) * accepting_state; } size_t unexpected_input_length; } this(ubyte type) { this.type = type; } this(ubyte type, size_t unexpected_input_length) { this.type = type; this.unexpected_input_length = unexpected_input_length; } this(ubyte type, size_t length, size_t delta_row, size_t delta_col, const(State) * accepting_state) { this.type = type; this.length = length; this.delta_row = delta_row; this.delta_col = delta_col; this.accepting_state = accepting_state; } static FindLongestMatchResult found_match(size_t length, size_t delta_row, size_t delta_col, const(State) * accepting_state) { return FindLongestMatchResult(FOUND_MATCH, length, delta_row, delta_col, accepting_state); } static FindLongestMatchResult decode_error() { return FindLongestMatchResult(DECODE_ERROR); } static FindLongestMatchResult eof() { return FindLongestMatchResult(EOF); } static FindLongestMatchResult unexpected_input(size_t unexpected_input_length) { return FindLongestMatchResult(UNEXPECTED_INPUT, unexpected_input_length); } bool is_found_match() { return type == FOUND_MATCH; } bool is_decode_error() { return type == DECODE_ERROR; } bool is_eof() { return type == EOF; } bool is_unexpected_input() { return type == UNEXPECTED_INPUT; } } private FindLongestMatchResult find_longest_match() { FindLongestMatchResult longest_match = FindLongestMatchResult.found_match(0, 0, 0, null); FindLongestMatchResult attempt_match = longest_match; uint current_state = modes[m_mode].state_table_offset; for (;;) { string input = m_input[(m_input_position + attempt_match.length)..(m_input.length)]; CodePoint code_point; ubyte code_point_length; switch (Decoder.decode_code_point(input, code_point, code_point_length)) { case Decoder.Result.SUCCESS: auto transition_result = transition(current_state, code_point); if (transition_result.found()) { attempt_match.length += code_point_length; if (code_point == '\n') { attempt_match.delta_row++; attempt_match.delta_col = 0u; } else { attempt_match.delta_col++; } current_state = transition_result.destination(); if (states[current_state].accepts()) { attempt_match.accepting_state = &states[current_state]; longest_match = attempt_match; } } else if (longest_match.length > 0) { return longest_match; } else { return FindLongestMatchResult.unexpected_input(attempt_match.length + code_point_length); } break; case Decoder.Result.EOF: /* We hit EOF. */ if (longest_match.length > 0) { /* We have a match, so use it. */ return longest_match; } else if (attempt_match.length != 0) { /* There is a partial match - error! */ return FindLongestMatchResult.unexpected_input(attempt_match.length); } else { /* Valid EOF return. */ return FindLongestMatchResult.eof(); } break; case Decoder.Result.DECODE_ERROR: return FindLongestMatchResult.decode_error(); default: assert(false); } } } /** * Result type for transition(). * * Alternatives: * - found(destination) * - not_found */ struct TransitionResult { private uint m_destination; static TransitionResult found(uint destination) { return TransitionResult(destination); } static TransitionResult not_found() { return TransitionResult(cast(uint)-1); } bool found() { return m_destination != cast(uint)-1; } @property uint destination() { return m_destination; } } private TransitionResult transition(uint current_state, uint code_point) { uint transition_table_index = states[current_state].transition_table_index; for (uint i = 0u; i < states[current_state].n_transitions; i++) { if ((transitions[transition_table_index + i].first <= code_point) && (code_point <= transitions[transition_table_index + i].last)) { uint destination = transitions[transition_table_index + i].destination; return TransitionResult.found(destination); } } return TransitionResult.not_found(); } } static class Parser { private struct Shift { uint symbol; uint state; } private struct Reduce { uint token; uint rule; uint rule_set; uint n_states; } private struct State { uint shift_table_index; uint n_shift_entries; uint reduce_table_index; uint n_reduce_entries; } private struct StateValue { uint state; ParserValue pvalue; this(uint state) { this.state = state; } } <% state_table, shift_table, reduce_table = @parser.build_tables %> private static immutable Shift[] shifts = [ <% shift_table.each do |shift| %> Shift(<%= shift[:token_id] %>u, <%= shift[:state_id] %>u), <% end %> ]; private static immutable Reduce[] reduces = [ <% reduce_table.each do |reduce| %> Reduce(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u), <% end %> ]; private static immutable State[] states = [ <% state_table.each do |state| %> State(<%= state[:shift_index] %>u, <%= state[:n_shifts] %>u, <%= state[:reduce_index] %>u, <%= state[:n_reduces] %>u), <% end %> ]; private Lexer m_lexer; private ParserValue parse_result; this(string input) { m_lexer = new Lexer(input); } bool parse() { Lexer.Result lexed_token; uint token = _TOKEN_COUNT; StateValue[] statevalues = new StateValue[](1); uint reduced_rule_set = 0xFFFFFFFFu; ParserValue reduced_parser_value; for (;;) { if (token == _TOKEN_COUNT) { lexed_token = m_lexer.lex_token(); token = lexed_token.token; } uint shift_state = 0xFFFFFFFFu; if (reduced_rule_set != 0xFFFFFFFFu) { shift_state = check_shift(statevalues[$-1].state, reduced_rule_set); } if (shift_state == 0xFFFFFFFFu) { shift_state = check_shift(statevalues[$-1].state, token); if ((shift_state != 0xFFFFFFFFu) && (token == TOKEN___EOF)) { /* Successful parse. */ parse_result = statevalues[$-1].pvalue; return true; } } if (shift_state != 0xFFFFFFFFu) { /* We have something to shift. */ statevalues ~= StateValue(shift_state); if (reduced_rule_set == 0xFFFFFFFFu) { /* We shifted a token, mark it consumed. */ token = _TOKEN_COUNT; statevalues[$-1].pvalue = lexed_token.pvalue; } else { /* We shifted a RuleSet. */ statevalues[$-1].pvalue = reduced_parser_value; ParserValue new_parse_result; reduced_parser_value = new_parse_result; reduced_rule_set = 0xFFFFFFFFu; } continue; } uint reduce_index = check_reduce(statevalues[$-1].state, token); if (reduce_index != 0xFFFFFFFFu) { /* We have something to reduce. */ reduced_parser_value = user_code(reduces[reduce_index].rule, statevalues, reduces[reduce_index].n_states); reduced_rule_set = reduces[reduce_index].rule_set; statevalues.length -= reduces[reduce_index].n_states; continue; } /* Error, unexpected token. */ write("Unexpected token "); if (token < _TOKEN_COUNT) { writeln(token_names[token]); } else { writeln("{other}"); } return false; } } @property <%= start_rule_type[1] %> result() { return parse_result.v_<%= start_rule_type[0] %>; } private uint check_shift(uint state, uint symbol) { uint start = states[state].shift_table_index; uint end = start + states[state].n_shift_entries; for (uint i = start; i < end; i++) { if (shifts[i].symbol == symbol) { // if (symbol < _TOKEN_COUNT) // { // writeln("Shifting ", token_names[symbol]); // } // else // { // writeln("Shifting rule set ", symbol); // } return shifts[i].state; } } return 0xFFFFFFFFu; } private uint check_reduce(uint state, uint token) { uint start = states[state].reduce_table_index; uint end = start + states[state].n_reduce_entries; for (uint i = start; i < end; i++) { if ((reduces[i].token == token) || (reduces[i].token == _TOKEN_COUNT)) { // write("Reducing rule ", reduces[i].rule, ", rule set ", reduces[i].rule_set, " lookahead "); // if (token < _TOKEN_COUNT) // { // writeln(token_names[token]); // } // else // { // writeln("{other}"); // } return i; } } return 0xFFFFFFFFu; } /** * Execute user code associated with a parser rule. * * @param rule The ID of the rule. * * @return Parse value. */ private ParserValue user_code(uint rule, StateValue[] statevalues, uint n_states) { ParserValue _pvalue; switch (rule) { <% @grammar.rules.each do |rule| %> <% if rule.code %> case <%= rule.id %>u: { <%= expand_code(rule.code, true, rule, nil) %> } break; <% end %> <% end %> default: break; } return _pvalue; } } }