Do not consume lookahead token when reducing

This commit is contained in:
Josh Holtrop 2022-06-25 21:35:54 -04:00
parent f2cc5b112e
commit 2fbe13e071
3 changed files with 41 additions and 3 deletions

View File

@ -323,7 +323,6 @@ class <%= @classname %>
if (reduced_rule_set != 0xFFFFFFFFu) if (reduced_rule_set != 0xFFFFFFFFu)
{ {
shift_state = check_shift(states[$-1], reduced_rule_set); shift_state = check_shift(states[$-1], reduced_rule_set);
reduced_rule_set = 0xFFFFFFFFu;
} }
if (shift_state == 0xFFFFFFFFu) if (shift_state == 0xFFFFFFFFu)
{ {
@ -337,7 +336,14 @@ class <%= @classname %>
return true; return true;
} }
states ~= shift_state; states ~= shift_state;
if (reduced_rule_set == 0xFFFFFFFFu)
{
token = _TOKEN_NONE; token = _TOKEN_NONE;
}
else
{
reduced_rule_set = 0xFFFFFFFFu;
}
continue; continue;
} }
@ -398,7 +404,19 @@ class <%= @classname %>
if ((reduces[i].token == token) || if ((reduces[i].token == token) ||
(reduces[i].token == _TOKEN_NONE)) (reduces[i].token == _TOKEN_NONE))
{ {
writeln("Reducing rule ", reduces[i].rule, ", rule set ", reduces[i].rule_set); write("Reducing rule ", reduces[i].rule, ", rule set ", reduces[i].rule_set, " lookahead ");
if (token < _TOKEN_COUNT)
{
writeln(token_names[token]);
}
else if (token == _TOKEN_EOF)
{
writeln("{EOF}");
}
else
{
writeln("{other}");
}
return i; return i;
} }
} }

View File

@ -68,6 +68,8 @@ R1 -> a b;
R2 -> a b; R2 -> a b;
EOF EOF
build_parser build_parser
compile("spec/test_d_lexer3.d")
run
end end
it "handles reducing a rule that could be arrived at from multiple states" do it "handles reducing a rule that could be arrived at from multiple states" do

18
spec/test_d_lexer3.d Normal file
View File

@ -0,0 +1,18 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "aba";
auto parser = new Testparser.Parser(cast(const(ubyte) *)input.ptr, input.length);
assert(parser.parse() == true);
input = "abb";
parser = new Testparser.Parser(cast(const(ubyte) *)input.ptr, input.length);
assert(parser.parse() == true);
}