Compare commits
73 Commits
Author | SHA1 | Date | |
---|---|---|---|
c24f323ff0 | |||
fec2c28693 | |||
61339aeae9 | |||
95b3dc6550 | |||
74d94fef72 | |||
588c5e21c7 | |||
5f1c306273 | |||
343e8a7f9e | |||
b3a134bf8d | |||
4a71dc74fb | |||
a7348be95d | |||
9746b3f2bf | |||
c5b8fc28bd | |||
092fce61eb | |||
e647248e34 | |||
f4ae1b8601 | |||
eae2e17f41 | |||
87d6d29d60 | |||
3aced70356 | |||
2dd89445fc | |||
4ae5ab79b3 | |||
69cc8fa67d | |||
7f3eb8f315 | |||
d76e12fea1 | |||
911e9505b7 | |||
aaeb0c4db1 | |||
fd89c5c6b3 | |||
1468946735 | |||
2bccf3303e | |||
0d1ee74ca6 | |||
985b180f62 | |||
f3e4941ad8 | |||
494afb7307 | |||
508dabe760 | |||
153f9d28f8 | |||
d0f542cbd7 | |||
786c78b635 | |||
f0bd8d8663 | |||
c7a18ef821 | |||
cb06a56f81 | |||
2b28ef622d | |||
19c32b58dc | |||
3a8dcac55f | |||
632ab2fe6f | |||
3eaf0d3d49 | |||
918dc7b2bb | |||
5b2cbe53e6 | |||
1d1590dfda | |||
1c91dcd298 | |||
5dfd62b756 | |||
fad7f4fb36 | |||
d55c5e0080 | |||
6c847c05b1 | |||
a5800575c8 | |||
24af3590d1 | |||
92c76b74c8 | |||
a032ac027c | |||
af5edaa762 | |||
81f15245f2 | |||
d8aa72d516 | |||
aabc8a5af5 | |||
705e5d8ba9 | |||
f152cd9da1 | |||
9a9315f7f9 | |||
197f126109 | |||
db6dc0e099 | |||
547dbd3850 | |||
aff0102536 | |||
1328a718ac | |||
562c24ce9e | |||
c824ae9e5c | |||
140b2d8350 | |||
3c8794058f |
59
CHANGELOG.md
59
CHANGELOG.md
@ -1,3 +1,62 @@
|
||||
## v1.5.1
|
||||
|
||||
### Improvements
|
||||
|
||||
- Improve performance (#28)
|
||||
|
||||
## v1.5.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Track start and end text positions for tokens and rules in AST node structures (#27)
|
||||
- Add warnings for shift/reduce conflicts to log file (#25)
|
||||
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
|
||||
- Add rule field aliases (#24)
|
||||
|
||||
### Improvements
|
||||
|
||||
- Show line numbers of rules on conflict (#23)
|
||||
|
||||
## v1.4.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Allow user to specify AST node name prefix or suffix
|
||||
- Allow specifying the start rule name
|
||||
- Allow rule terms to be marked as optional
|
||||
|
||||
### Improvements
|
||||
|
||||
- Give a better error message when a referenced ptype has not been declared
|
||||
|
||||
## v1.3.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add AST generation (#22)
|
||||
|
||||
## v1.2.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Allow one line user code blocks (#21)
|
||||
- Add backslash escape codes (#19)
|
||||
- Add API to access unexpected token found (#18)
|
||||
- Add token_names API (#17)
|
||||
- Add D example to user guide for p_context_init() (#16)
|
||||
- Allow user termination from lexer code blocks (#15)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix generator hang when state transition cycle is present (#20)
|
||||
|
||||
## v1.1.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add user parser terminations (#13)
|
||||
- Document generated parser API in user guide (#14)
|
||||
|
||||
## v1.0.0
|
||||
|
||||
- Initial release
|
||||
|
1
Gemfile
1
Gemfile
@ -5,3 +5,4 @@ gem "rspec"
|
||||
gem "rdoc"
|
||||
gem "redcarpet"
|
||||
gem "syntax"
|
||||
gem "simplecov"
|
||||
|
@ -2,6 +2,7 @@ GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
diff-lcs (1.5.0)
|
||||
docile (1.4.0)
|
||||
psych (5.1.0)
|
||||
stringio
|
||||
rake (13.0.6)
|
||||
@ -21,6 +22,12 @@ GEM
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.12.0)
|
||||
rspec-support (3.12.1)
|
||||
simplecov (0.22.0)
|
||||
docile (~> 1.1)
|
||||
simplecov-html (~> 0.11)
|
||||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.12.3)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
stringio (3.0.7)
|
||||
syntax (1.2.2)
|
||||
|
||||
@ -32,6 +39,7 @@ DEPENDENCIES
|
||||
rdoc
|
||||
redcarpet
|
||||
rspec
|
||||
simplecov
|
||||
syntax
|
||||
|
||||
BUNDLED WITH
|
||||
|
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2010-2023 Josh Holtrop
|
||||
Copyright (c) 2010-2024 Josh Holtrop
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
91
README.md
91
README.md
@ -1,21 +1,104 @@
|
||||
# The Propane Parser Generator
|
||||
|
||||
Propane is an LR Parser Generator (LPG) which:
|
||||
Propane is a LALR Parser Generator (LPG) which:
|
||||
|
||||
* accepts LR(0), SLR, and LALR grammars
|
||||
* generates a built-in lexer to tokenize input
|
||||
* supports UTF-8 lexer inputs
|
||||
* generates a table-driven parser to parse input in linear time
|
||||
* generates a table-driven shift/reduce parser to parse input in linear time
|
||||
* targets C or D language outputs
|
||||
* optionally supports automatic full AST generation
|
||||
* is MIT-licensed
|
||||
* is distributable as a standalone Ruby script
|
||||
|
||||
## Installation
|
||||
|
||||
TODO
|
||||
Propane is designed to be distributed as a stand-alone single file script that
|
||||
can be copied into and versioned in a project's source tree.
|
||||
The only requirement to run Propane is that the system has a Ruby interpreter
|
||||
installed.
|
||||
The latest release can be downloaded from [https://github.com/holtrop/propane/releases](https://github.com/holtrop/propane/releases).
|
||||
|
||||
Simply copy the `propane` executable script into the desired location within
|
||||
the project to be built (typically the root of the repository) and mark it
|
||||
executable.
|
||||
|
||||
## Usage
|
||||
|
||||
TODO: Write usage instructions here
|
||||
### Command Line Interface
|
||||
|
||||
Propane is typically invoked from the command-line as `./propane`.
|
||||
|
||||
Usage: ./propane [options] <input-file> <output-file>
|
||||
Options:
|
||||
-h, --help Show this usage and exit.
|
||||
--log LOG Write log file. This will show all parser states and their
|
||||
associated shifts and reduces. It can be helpful when
|
||||
debugging a grammar.
|
||||
--version Show program version and exit.
|
||||
-w Treat warnings as errors. This option will treat shift/reduce
|
||||
conflicts as fatal errors and will print them to stderr in
|
||||
addition to the log file.
|
||||
|
||||
The user must specify the path to a Propane input grammar file and a path to an
|
||||
output file.
|
||||
The generated source code will be written to the output file.
|
||||
If a log file path is specified, Propane will write a log file containing
|
||||
detailed information about the parser states and transitions.
|
||||
|
||||
### Propane Grammar File
|
||||
|
||||
A Propane grammar file provides Propane with the patterns, tokens, grammar
|
||||
rules, and user code blocks from which to build the generated lexer and parser.
|
||||
|
||||
Example grammar file:
|
||||
|
||||
```
|
||||
<<
|
||||
import std.math;
|
||||
>>
|
||||
|
||||
# Parser values are unsigned integers.
|
||||
ptype ulong;
|
||||
|
||||
# A few basic arithmetic operators.
|
||||
token plus /\+/;
|
||||
token times /\*/;
|
||||
token power /\*\*/;
|
||||
token integer /\d+/ <<
|
||||
ulong v;
|
||||
foreach (c; match)
|
||||
{
|
||||
v *= 10;
|
||||
v += (c - '0');
|
||||
}
|
||||
$$ = v;
|
||||
>>
|
||||
token lparen /\(/;
|
||||
token rparen /\)/;
|
||||
# Drop whitespace.
|
||||
drop /\s+/;
|
||||
|
||||
Start -> E1 << $$ = $1; >>
|
||||
E1 -> E2 << $$ = $1; >>
|
||||
E1 -> E1 plus E2 << $$ = $1 + $3; >>
|
||||
E2 -> E3 << $$ = $1; >>
|
||||
E2 -> E2 times E3 << $$ = $1 * $3; >>
|
||||
E3 -> E4 << $$ = $1; >>
|
||||
E3 -> E3 power E4 <<
|
||||
$$ = pow($1, $3);
|
||||
>>
|
||||
E4 -> integer << $$ = $1; >>
|
||||
E4 -> lparen E1 rparen << $$ = $2; >>
|
||||
```
|
||||
|
||||
Grammar files can contain comment lines beginning with `#` which are ignored.
|
||||
White space in the grammar file is also ignored.
|
||||
|
||||
It is convention to use the extension `.propane` for the Propane grammar file,
|
||||
however any file name is accepted by Propane.
|
||||
|
||||
See [https://holtrop.github.io/propane/index.html](https://holtrop.github.io/propane/index.html) for the full User Guide.
|
||||
|
||||
## Development
|
||||
|
||||
|
14
Rakefile
14
Rakefile
@ -1,5 +1,8 @@
|
||||
require "rake/clean"
|
||||
require "rspec/core/rake_task"
|
||||
|
||||
CLEAN.include %w[spec/run gen .yardoc yard coverage dist]
|
||||
|
||||
task :build_dist do
|
||||
sh "ruby rb/build_dist.rb"
|
||||
end
|
||||
@ -10,9 +13,20 @@ RSpec::Core::RakeTask.new(:spec, :example_pattern) do |task, args|
|
||||
end
|
||||
end
|
||||
|
||||
# dspec task is useful to test the distributable release script, but is not
|
||||
# useful for coverage information.
|
||||
desc "Dist Specs"
|
||||
task :dspec, [:example_string] => :build_dist do |task, args|
|
||||
ENV["dist_specs"] = "1"
|
||||
Rake::Task["spec"].execute(args)
|
||||
ENV.delete("dist_specs")
|
||||
end
|
||||
|
||||
task :default => :spec
|
||||
|
||||
desc "Build user guide"
|
||||
task :user_guide do
|
||||
system("ruby", "-Ilib", "rb/gen_user_guide.rb")
|
||||
end
|
||||
|
||||
task :all => [:spec, :dspec, :user_guide]
|
||||
|
1138
assets/parser.c.erb
Normal file
1138
assets/parser.c.erb
Normal file
File diff suppressed because it is too large
Load Diff
@ -8,13 +8,13 @@
|
||||
module <%= @grammar.modulename %>;
|
||||
<% end %>
|
||||
|
||||
import core.stdc.stdlib : malloc;
|
||||
|
||||
/**************************************************************************
|
||||
* User code blocks
|
||||
*************************************************************************/
|
||||
|
||||
<% @grammar.code_blocks.each do |code| %>
|
||||
<%= code %>
|
||||
<% end %>
|
||||
<%= @grammar.code_blocks.fetch("", "") %>
|
||||
|
||||
/**************************************************************************
|
||||
* Public types
|
||||
@ -29,10 +29,11 @@ public enum : size_t
|
||||
<%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN,
|
||||
<%= @grammar.prefix.upcase %>DROP,
|
||||
<%= @grammar.prefix.upcase %>EOF,
|
||||
<%= @grammar.prefix.upcase %>USER_TERMINATED,
|
||||
}
|
||||
|
||||
/** Token type. */
|
||||
public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.invalid_token_id) %>;
|
||||
public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.terminate_token_id) %>;
|
||||
|
||||
/** Token IDs. */
|
||||
public enum : <%= @grammar.prefix %>token_t
|
||||
@ -44,21 +45,14 @@ public enum : <%= @grammar.prefix %>token_t
|
||||
<% end %>
|
||||
<% end %>
|
||||
INVALID_TOKEN_ID = <%= @grammar.invalid_token_id %>,
|
||||
TERMINATE_TOKEN_ID = <%= @grammar.terminate_token_id %>,
|
||||
}
|
||||
|
||||
/** Code point type. */
|
||||
public alias <%= @grammar.prefix %>code_point_t = uint;
|
||||
|
||||
/** Parser values type(s). */
|
||||
public union <%= @grammar.prefix %>value_t
|
||||
{
|
||||
<% @grammar.ptypes.each do |name, typestring| %>
|
||||
<%= typestring %> v_<%= name %>;
|
||||
<% end %>
|
||||
}
|
||||
|
||||
/**
|
||||
* A structure to keep track of parser position.
|
||||
* A structure to keep track of input position.
|
||||
*
|
||||
* This is useful for reporting errors, etc...
|
||||
*/
|
||||
@ -69,14 +63,79 @@ public struct <%= @grammar.prefix %>position_t
|
||||
|
||||
/** Input text column (0-based). */
|
||||
uint col;
|
||||
|
||||
/** Invalid position value. */
|
||||
enum INVALID = <%= @grammar.prefix %>position_t(0xFFFF_FFFF, 0xFFFF_FFFF);
|
||||
|
||||
/** Return whether the position is valid. */
|
||||
public @property bool valid()
|
||||
{
|
||||
return row != 0xFFFF_FFFFu;
|
||||
}
|
||||
}
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** Parser values type. */
|
||||
public alias <%= @grammar.prefix %>value_t = <%= @grammar.ptype %>;
|
||||
<% else %>
|
||||
/** Parser values type(s). */
|
||||
public union <%= @grammar.prefix %>value_t
|
||||
{
|
||||
<% @grammar.ptypes.each do |name, typestring| %>
|
||||
<%= typestring %> v_<%= name %>;
|
||||
<% end %>
|
||||
}
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** Common AST node structure. */
|
||||
private struct ASTNode
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
void *[0] fields;
|
||||
}
|
||||
|
||||
/** AST node types. @{ */
|
||||
public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
||||
{
|
||||
/* ASTNode fields must be present in the same order here. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
}
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<% rule_set.ast_fields.each do |fields| %>
|
||||
union
|
||||
{
|
||||
<% fields.each do |field_name, type| %>
|
||||
<%= type %> * <%= field_name %>;
|
||||
<% end %>
|
||||
}
|
||||
<% end %>
|
||||
}
|
||||
|
||||
<% end %>
|
||||
/** @} */
|
||||
<% end %>
|
||||
|
||||
/** Lexed token information. */
|
||||
public struct <%= @grammar.prefix %>token_info_t
|
||||
{
|
||||
/** Text position where the token was found. */
|
||||
/** Text position of first code point in token. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
|
||||
/** Text position of last code point in token. */
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
|
||||
/** Number of input bytes used by the token. */
|
||||
size_t length;
|
||||
|
||||
@ -112,10 +171,17 @@ public struct <%= @grammar.prefix %>context_t
|
||||
/* Parser context data. */
|
||||
|
||||
/** Parse result value. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t parse_result;
|
||||
<% end %>
|
||||
|
||||
/** Unexpected token received. */
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
|
||||
/** User terminate code. */
|
||||
size_t user_terminate_code;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -143,6 +209,7 @@ private enum : size_t
|
||||
P_UNEXPECTED_TOKEN,
|
||||
P_DROP,
|
||||
P_EOF,
|
||||
P_USER_TERMINATED,
|
||||
}
|
||||
<% end %>
|
||||
|
||||
@ -332,7 +399,10 @@ private struct lexer_match_info_t
|
||||
/** Number of bytes of input text used to match. */
|
||||
size_t length;
|
||||
|
||||
/** Input text position delta. */
|
||||
/** Input text position delta to end of token. */
|
||||
<%= @grammar.prefix %>position_t end_delta_position;
|
||||
|
||||
/** Input text position delta to next code point after token end. */
|
||||
<%= @grammar.prefix %>position_t delta_position;
|
||||
|
||||
/** Accepting lexer state from the match. */
|
||||
@ -424,9 +494,12 @@ private lexer_state_id_t check_lexer_transition(uint current_state, uint code_po
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure.
|
||||
* @param[out] out_token_info
|
||||
* The lexed token information is stored here if the return value is
|
||||
* P_SUCCESS.
|
||||
* @param[out] out_match_info
|
||||
* The longest match information is stored here if the return value is
|
||||
* P_SUCCESS or P_DECODE_ERROR.
|
||||
* @param[out] out_unexpected_input_length
|
||||
* The unexpected input length is stored here if the return value is
|
||||
* P_UNEXPECTED_INPUT.
|
||||
*
|
||||
* @reval P_SUCCESS
|
||||
* A token was successfully lexed.
|
||||
@ -457,6 +530,7 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
if (transition_state != INVALID_LEXER_STATE_ID)
|
||||
{
|
||||
attempt_match.length += code_point_length;
|
||||
attempt_match.end_delta_position = attempt_match.delta_position;
|
||||
if (code_point == '\n')
|
||||
{
|
||||
attempt_match.delta_position.row++;
|
||||
@ -504,7 +578,6 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
/* Valid EOF return. */
|
||||
return P_EOF;
|
||||
}
|
||||
break;
|
||||
|
||||
case P_DECODE_ERROR:
|
||||
/* If we see a decode error, we may be partially in the middle of
|
||||
@ -536,13 +609,14 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
* Input text does not match any lexer pattern.
|
||||
* @retval P_DROP
|
||||
* A drop pattern was matched so the lexer should continue.
|
||||
* @retval P_USER_TERMINATED
|
||||
* User code has requested to terminate the lexer.
|
||||
*/
|
||||
private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||
{
|
||||
<%= @grammar.prefix %>token_info_t token_info;
|
||||
token_info.position = context.text_position;
|
||||
token_info.token = INVALID_TOKEN_ID;
|
||||
*out_token_info = token_info; // TODO: remove
|
||||
lexer_match_info_t match_info;
|
||||
size_t unexpected_input_length;
|
||||
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
|
||||
@ -555,6 +629,12 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
||||
string match = context.input[context.input_index..(context.input_index + match_info.length)];
|
||||
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
|
||||
match_info.accepting_state.code_id, match, &token_info);
|
||||
/* A TERMINATE_TOKEN_ID return code from lexer_user_code() means
|
||||
* that the user code is requesting to terminate the lexer. */
|
||||
if (user_code_token == TERMINATE_TOKEN_ID)
|
||||
{
|
||||
return P_USER_TERMINATED;
|
||||
}
|
||||
/* An invalid token returned from lexer_user_code() means that the
|
||||
* user code did not explicitly return a token. So only override
|
||||
* the token to return if the user code does explicitly return a
|
||||
@ -583,11 +663,22 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
||||
}
|
||||
token_info.token = token_to_accept;
|
||||
token_info.length = match_info.length;
|
||||
if (match_info.end_delta_position.row != 0u)
|
||||
{
|
||||
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
|
||||
token_info.end_position.col = match_info.end_delta_position.col;
|
||||
}
|
||||
else
|
||||
{
|
||||
token_info.end_position.row = token_info.position.row;
|
||||
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
||||
}
|
||||
*out_token_info = token_info;
|
||||
return P_SUCCESS;
|
||||
|
||||
case P_EOF:
|
||||
token_info.token = TOKEN___EOF;
|
||||
token_info.end_position = token_info.position;
|
||||
*out_token_info = token_info;
|
||||
return P_SUCCESS;
|
||||
|
||||
@ -625,6 +716,8 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
||||
* The decoder encountered invalid text encoding.
|
||||
* @reval P_UNEXPECTED_INPUT
|
||||
* Input text does not match any lexer pattern.
|
||||
* @retval P_USER_TERMINATED
|
||||
* User code has requested to terminate the lexer.
|
||||
*/
|
||||
public size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||
{
|
||||
@ -701,6 +794,25 @@ private struct reduce_t
|
||||
* reduce action.
|
||||
*/
|
||||
parser_state_id_t n_states;
|
||||
<% if @grammar.ast %>
|
||||
|
||||
/**
|
||||
* Map of rule components to rule set child fields.
|
||||
*/
|
||||
immutable(ushort) * rule_set_node_field_index_map;
|
||||
|
||||
/**
|
||||
* Number of rule set AST node fields.
|
||||
*/
|
||||
ushort rule_set_node_field_array_size;
|
||||
|
||||
/**
|
||||
* Whether this rule was a generated optional rule that matched the
|
||||
* optional target. In this case, propagate the matched target node up
|
||||
* instead of making a new node for this rule.
|
||||
*/
|
||||
bool propagate_optional_target;
|
||||
<% end %>
|
||||
}
|
||||
|
||||
/** Parser state entry. */
|
||||
@ -732,6 +844,11 @@ private struct state_value_t
|
||||
/** Parser value from this state. */
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node. */
|
||||
void * ast_node;
|
||||
<% end %>
|
||||
|
||||
this(size_t state_id)
|
||||
{
|
||||
this.state_id = state_id;
|
||||
@ -741,14 +858,32 @@ private struct state_value_t
|
||||
/** Parser shift table. */
|
||||
private immutable shift_t[] parser_shift_table = [
|
||||
<% @parser.shift_table.each do |shift| %>
|
||||
shift_t(<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u),
|
||||
shift_t(<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u),
|
||||
<% end %>
|
||||
];
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<% @grammar.rules.each do |rule| %>
|
||||
<% unless rule.flat_rule_set_node_field_index_map? %>
|
||||
immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map = [<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>];
|
||||
<% end %>
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
/** Parser reduce table. */
|
||||
private immutable reduce_t[] parser_reduce_table = [
|
||||
<% @parser.reduce_table.each do |reduce| %>
|
||||
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u),
|
||||
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
|
||||
<% if @grammar.ast %>
|
||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
||||
, null
|
||||
<% else %>
|
||||
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
|
||||
<% end %>
|
||||
, <%= reduce[:rule].rule_set.ast_fields.size %>
|
||||
, <%= reduce[:propagate_optional_target] %>
|
||||
<% end %>
|
||||
),
|
||||
<% end %>
|
||||
];
|
||||
|
||||
@ -759,17 +894,19 @@ private immutable parser_state_t[] parser_state_table = [
|
||||
<% end %>
|
||||
];
|
||||
|
||||
<% unless @grammar.ast %>
|
||||
/**
|
||||
* Execute user code associated with a parser rule.
|
||||
*
|
||||
* @param rule The ID of the rule.
|
||||
*
|
||||
* @return Parse value.
|
||||
* @retval P_SUCCESS
|
||||
* Continue parsing.
|
||||
* @retval P_USER_TERMINATED
|
||||
* User requested to terminate parsing.
|
||||
*/
|
||||
private <%= @grammar.prefix %>value_t parser_user_code(uint rule, state_value_t[] statevalues, uint n_states)
|
||||
private size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint rule, state_value_t[] statevalues, uint n_states, <%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
<%= @grammar.prefix %>value_t _pvalue;
|
||||
|
||||
switch (rule)
|
||||
{
|
||||
<% @grammar.rules.each do |rule| %>
|
||||
@ -782,8 +919,9 @@ private <%= @grammar.prefix %>value_t parser_user_code(uint rule, state_value_t[
|
||||
default: break;
|
||||
}
|
||||
|
||||
return _pvalue;
|
||||
return P_SUCCESS;
|
||||
}
|
||||
<% end %>
|
||||
|
||||
/**
|
||||
* Check if the parser should shift to a new state.
|
||||
@ -845,7 +983,7 @@ private size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token
|
||||
* can be accessed with <%= @grammar.prefix %>result().
|
||||
* @retval P_UNEXPECTED_TOKEN
|
||||
* An unexpected token was encountered that does not match any grammar rule.
|
||||
* The value context.token holds the unexpected token.
|
||||
* The function p_token(&context) can be used to get the unexpected token.
|
||||
* @reval P_DECODE_ERROR
|
||||
* The decoder encountered invalid text encoding.
|
||||
* @reval P_UNEXPECTED_INPUT
|
||||
@ -857,7 +995,11 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
||||
state_value_t[] statevalues = new state_value_t[](1);
|
||||
size_t reduced_rule_set = INVALID_ID;
|
||||
<% if @grammar.ast %>
|
||||
void * reduced_parser_node;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value;
|
||||
<% end %>
|
||||
for (;;)
|
||||
{
|
||||
if (token == INVALID_TOKEN_ID)
|
||||
@ -880,7 +1022,11 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
||||
{
|
||||
/* Successful parse. */
|
||||
<% if @grammar.ast %>
|
||||
context.parse_result = cast(<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)statevalues[$-1].ast_node;
|
||||
<% else %>
|
||||
context.parse_result = statevalues[$-1].pvalue;
|
||||
<% end %>
|
||||
return P_SUCCESS;
|
||||
}
|
||||
}
|
||||
@ -891,15 +1037,24 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if (reduced_rule_set == INVALID_ID)
|
||||
{
|
||||
/* We shifted a token, mark it consumed. */
|
||||
token = INVALID_TOKEN_ID;
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token_info.position, token_info.end_position, token, token_info.pvalue);
|
||||
statevalues[$-1].ast_node = token_ast_node;
|
||||
<% else %>
|
||||
statevalues[$-1].pvalue = token_info.pvalue;
|
||||
<% end %>
|
||||
token = INVALID_TOKEN_ID;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We shifted a RuleSet. */
|
||||
<% if @grammar.ast %>
|
||||
statevalues[$-1].ast_node = reduced_parser_node;
|
||||
<% else %>
|
||||
statevalues[$-1].pvalue = reduced_parser_value;
|
||||
<%= @grammar.prefix %>value_t new_parse_result;
|
||||
reduced_parser_value = new_parse_result;
|
||||
<% end %>
|
||||
reduced_rule_set = INVALID_ID;
|
||||
}
|
||||
continue;
|
||||
@ -909,7 +1064,63 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if (reduce_index != INVALID_ID)
|
||||
{
|
||||
/* We have something to reduce. */
|
||||
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states);
|
||||
<% if @grammar.ast %>
|
||||
if (parser_reduce_table[reduce_index].propagate_optional_target)
|
||||
{
|
||||
reduced_parser_node = statevalues[$ - 1].ast_node;
|
||||
}
|
||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
||||
{
|
||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
||||
ASTNode * node = cast(ASTNode *)malloc(ASTNode.sizeof + n_fields * (void *).sizeof);
|
||||
node.position = <%= @grammar.prefix %>position_t.INVALID;
|
||||
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
|
||||
foreach (i; 0..n_fields)
|
||||
{
|
||||
node.fields[i] = null;
|
||||
}
|
||||
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map is null)
|
||||
{
|
||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
||||
{
|
||||
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
||||
{
|
||||
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
||||
}
|
||||
}
|
||||
bool position_found = false;
|
||||
foreach (i; 0..n_fields)
|
||||
{
|
||||
ASTNode * child = cast(ASTNode *)node.fields[i];
|
||||
if (child && child.position.valid)
|
||||
{
|
||||
if (!position_found)
|
||||
{
|
||||
node.position = child.position;
|
||||
position_found = true;
|
||||
}
|
||||
node.end_position = child.end_position;
|
||||
}
|
||||
}
|
||||
reduced_parser_node = node;
|
||||
}
|
||||
else
|
||||
{
|
||||
reduced_parser_node = null;
|
||||
}
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value2;
|
||||
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
||||
{
|
||||
return P_USER_TERMINATED;
|
||||
}
|
||||
reduced_parser_value = reduced_parser_value2;
|
||||
<% end %>
|
||||
reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
|
||||
statevalues.length -= parser_reduce_table[reduce_index].n_states;
|
||||
continue;
|
||||
@ -934,9 +1145,17 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
*
|
||||
* @return Parse result value.
|
||||
*/
|
||||
<% if @grammar.ast %>
|
||||
public <%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% else %>
|
||||
public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% end %>
|
||||
{
|
||||
<% if @grammar.ast %>
|
||||
return context.parse_result;
|
||||
<% else %>
|
||||
return context.parse_result.v_<%= start_rule_type[0] %>;
|
||||
<% end %>
|
||||
}
|
||||
|
||||
/**
|
||||
@ -951,3 +1170,26 @@ public <%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @gram
|
||||
{
|
||||
return context.text_position;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the user terminate code.
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure.
|
||||
*
|
||||
* @return User terminate code.
|
||||
*/
|
||||
public size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return context.user_terminate_code;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the parse token.
|
||||
*
|
||||
* @return Parse token.
|
||||
*/
|
||||
public <%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return context.token;
|
||||
}
|
||||
|
198
assets/parser.h.erb
Normal file
198
assets/parser.h.erb
Normal file
@ -0,0 +1,198 @@
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* This file is generated by Propane.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
/**************************************************************************
|
||||
* Public types
|
||||
*************************************************************************/
|
||||
|
||||
/* Result codes. */
|
||||
#define <%= @grammar.prefix.upcase %>SUCCESS 0u
|
||||
#define <%= @grammar.prefix.upcase %>DECODE_ERROR 1u
|
||||
#define <%= @grammar.prefix.upcase %>UNEXPECTED_INPUT 2u
|
||||
#define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u
|
||||
#define <%= @grammar.prefix.upcase %>DROP 4u
|
||||
#define <%= @grammar.prefix.upcase %>EOF 5u
|
||||
#define <%= @grammar.prefix.upcase %>USER_TERMINATED 6u
|
||||
|
||||
/** Token type. */
|
||||
typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>token_t;
|
||||
|
||||
/** Token IDs. */
|
||||
<% @grammar.tokens.each_with_index do |token, index| %>
|
||||
#define TOKEN_<%= token.code_name %> <%= index %>u
|
||||
<% unless token.id == index %>
|
||||
<% raise "Token ID (#{token.id}) does not match index (#{index}) for token #{token.name}!" %>
|
||||
<% end %>
|
||||
<% end %>
|
||||
#define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u
|
||||
#define TERMINATE_TOKEN_ID <%= @grammar.terminate_token_id %>u
|
||||
|
||||
/** Code point type. */
|
||||
typedef uint32_t <%= @grammar.prefix %>code_point_t;
|
||||
|
||||
/**
|
||||
* A structure to keep track of input position.
|
||||
*
|
||||
* This is useful for reporting errors, etc...
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/** Input text row (0-based). */
|
||||
uint32_t row;
|
||||
|
||||
/** Input text column (0-based). */
|
||||
uint32_t col;
|
||||
} <%= @grammar.prefix %>position_t;
|
||||
|
||||
/** Return whether the position is valid. */
|
||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0xFFFFFFFFu)
|
||||
|
||||
/** User header code blocks. */
|
||||
<%= @grammar.code_blocks.fetch("header", "") %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** Parser values type. */
|
||||
typedef <%= @grammar.ptype %> <%= @grammar.prefix %>value_t;
|
||||
<% else %>
|
||||
/** Parser values type(s). */
|
||||
typedef union
|
||||
{
|
||||
<% @grammar.ptypes.each do |name, typestring| %>
|
||||
<%= typestring %> v_<%= name %>;
|
||||
<% end %>
|
||||
} <%= @grammar.prefix %>value_t;
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node types. @{ */
|
||||
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
||||
{
|
||||
/* ASTNode fields must be present in the same order here. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
struct <%= name %>;
|
||||
<% end %>
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<% rule_set.ast_fields.each do |fields| %>
|
||||
union
|
||||
{
|
||||
<% fields.each do |field_name, type| %>
|
||||
struct <%= type %> * <%= field_name %>;
|
||||
<% end %>
|
||||
};
|
||||
<% end %>
|
||||
} <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>;
|
||||
|
||||
<% end %>
|
||||
/** @} */
|
||||
<% end %>
|
||||
|
||||
/** Lexed token information. */
|
||||
typedef struct
|
||||
{
|
||||
/** Text position of first code point in token. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
|
||||
/** Text position of last code point in token. */
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
|
||||
/** Number of input bytes used by the token. */
|
||||
size_t length;
|
||||
|
||||
/** Token that was lexed. */
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
|
||||
/** Parser value associated with the token. */
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
} <%= @grammar.prefix %>token_info_t;
|
||||
|
||||
/**
|
||||
* Lexer and parser context.
|
||||
*
|
||||
* The user must allocate an instance of this structure and pass it to any
|
||||
* public API function.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/* Lexer context data. */
|
||||
|
||||
/** Input text. */
|
||||
uint8_t const * input;
|
||||
|
||||
/** Input text length. */
|
||||
size_t input_length;
|
||||
|
||||
/** Input text index (byte offset). */
|
||||
size_t input_index;
|
||||
|
||||
/** Input text position (row/column). */
|
||||
<%= @grammar.prefix %>position_t text_position;
|
||||
|
||||
/** Current lexer mode. */
|
||||
size_t mode;
|
||||
|
||||
/* Parser context data. */
|
||||
|
||||
/** Parse result value. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t parse_result;
|
||||
<% end %>
|
||||
|
||||
/** Unexpected token received. */
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
|
||||
/** User terminate code. */
|
||||
size_t user_terminate_code;
|
||||
} <%= @grammar.prefix %>context_t;
|
||||
|
||||
/**************************************************************************
|
||||
* Public data
|
||||
*************************************************************************/
|
||||
|
||||
/** Token names. */
|
||||
extern const char * <%= @grammar.prefix %>token_names[];
|
||||
|
||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
|
||||
|
||||
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
|
||||
<%= @grammar.prefix %>code_point_t * out_code_point, uint8_t * out_code_point_length);
|
||||
|
||||
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info);
|
||||
|
||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% else %>
|
||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% end %>
|
||||
|
||||
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context);
|
File diff suppressed because it is too large
Load Diff
1
extra/vim/ftdetect/propane.vim
Normal file
1
extra/vim/ftdetect/propane.vim
Normal file
@ -0,0 +1 @@
|
||||
au BufNewFile,BufRead *.propane set filetype=propane
|
33
extra/vim/syntax/propane.vim
Normal file
33
extra/vim/syntax/propane.vim
Normal file
@ -0,0 +1,33 @@
|
||||
" Vim syntax file for Propane
|
||||
" Language: propane
|
||||
" Maintainer: Josh Holtrop
|
||||
" URL: https://github.com/holtrop/propane
|
||||
|
||||
if exists("b:current_syntax")
|
||||
finish
|
||||
endif
|
||||
|
||||
if !exists("b:propane_subtype")
|
||||
let b:propane_subtype = "d"
|
||||
endif
|
||||
|
||||
exe "syn include @propaneTarget syntax/".b:propane_subtype.".vim"
|
||||
|
||||
syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contains=@propaneTarget keepend
|
||||
|
||||
syn match propaneComment "#.*"
|
||||
syn match propaneOperator "->"
|
||||
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
|
||||
syn match propaneFieldOperator ":" contained
|
||||
syn match propaneOperator "?"
|
||||
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
||||
|
||||
syn region propaneRegex start="/" end="/" skip="\\/"
|
||||
|
||||
hi def link propaneComment Comment
|
||||
hi def link propaneKeyword Keyword
|
||||
hi def link propaneRegex String
|
||||
hi def link propaneOperator Operator
|
||||
hi def link propaneFieldOperator Operator
|
||||
hi def link propaneDelimiter Delimiter
|
||||
hi def link propaneFieldAlias Identifier
|
@ -1,6 +1,7 @@
|
||||
require "erb"
|
||||
require "set"
|
||||
require "stringio"
|
||||
require_relative "propane/assets"
|
||||
require_relative "propane/cli"
|
||||
require_relative "propane/code_point_range"
|
||||
require_relative "propane/fa"
|
||||
@ -30,10 +31,10 @@ class Propane
|
||||
|
||||
class << self
|
||||
|
||||
def run(input_file, output_file, log_file)
|
||||
def run(input_file, output_file, log_file, options)
|
||||
begin
|
||||
grammar = Grammar.new(File.read(input_file))
|
||||
generator = Generator.new(grammar, output_file, log_file)
|
||||
generator = Generator.new(grammar, output_file, log_file, options)
|
||||
generator.generate
|
||||
rescue Error => e
|
||||
$stderr.puts e.message
|
||||
|
10
lib/propane/assets.rb
Normal file
10
lib/propane/assets.rb
Normal file
@ -0,0 +1,10 @@
|
||||
class Propane
|
||||
module Assets
|
||||
class << self
|
||||
def get(name)
|
||||
path = File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/#{name}")
|
||||
File.binread(path)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -4,15 +4,21 @@ class Propane
|
||||
USAGE = <<EOF
|
||||
Usage: #{$0} [options] <input-file> <output-file>
|
||||
Options:
|
||||
--log LOG Write log file
|
||||
--version Show program version and exit
|
||||
-h, --help Show this usage and exit
|
||||
-h, --help Show this usage and exit.
|
||||
--log LOG Write log file. This will show all parser states and their
|
||||
associated shifts and reduces. It can be helpful when
|
||||
debugging a grammar.
|
||||
--version Show program version and exit.
|
||||
-w Treat warnings as errors. This option will treat shift/reduce
|
||||
conflicts as fatal errors and will print them to stderr in
|
||||
addition to the log file.
|
||||
EOF
|
||||
|
||||
class << self
|
||||
|
||||
def run(args)
|
||||
params = []
|
||||
options = {}
|
||||
log_file = nil
|
||||
i = 0
|
||||
while i < args.size
|
||||
@ -24,11 +30,13 @@ EOF
|
||||
log_file = args[i]
|
||||
end
|
||||
when "--version"
|
||||
puts "propane v#{VERSION}"
|
||||
puts "propane version #{VERSION}"
|
||||
return 0
|
||||
when "-h", "--help"
|
||||
puts USAGE
|
||||
return 0
|
||||
when "-w"
|
||||
options[:warnings_as_errors] = true
|
||||
when /^-/
|
||||
$stderr.puts "Error: unknown option #{arg}"
|
||||
return 1
|
||||
@ -45,7 +53,7 @@ EOF
|
||||
$stderr.puts "Error: cannot read #{params[0]}"
|
||||
return 2
|
||||
end
|
||||
Propane.run(*params, log_file)
|
||||
Propane.run(*params, log_file, options)
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -2,7 +2,7 @@ class Propane
|
||||
|
||||
class Generator
|
||||
|
||||
def initialize(grammar, output_file, log_file)
|
||||
def initialize(grammar, output_file, log_file, options)
|
||||
@grammar = grammar
|
||||
@output_file = output_file
|
||||
if log_file
|
||||
@ -10,16 +10,30 @@ class Propane
|
||||
else
|
||||
@log = StringIO.new
|
||||
end
|
||||
@classname = @grammar.classname || File.basename(output_file).sub(%r{[^a-zA-Z0-9].*}, "").capitalize
|
||||
@language =
|
||||
if output_file =~ /\.([a-z]+)$/
|
||||
$1
|
||||
else
|
||||
"d"
|
||||
end
|
||||
@options = options
|
||||
process_grammar!
|
||||
end
|
||||
|
||||
def generate
|
||||
erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.d.erb")), trim_mode: "<>")
|
||||
extensions = [@language]
|
||||
if @language == "c"
|
||||
extensions += %w[h]
|
||||
end
|
||||
extensions.each do |extension|
|
||||
template = Assets.get("parser.#{extension}.erb")
|
||||
erb = ERB.new(template, trim_mode: "<>")
|
||||
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
||||
result = erb.result(binding.clone)
|
||||
File.open(@output_file, "wb") do |fh|
|
||||
File.open(output_file, "wb") do |fh|
|
||||
fh.write(result)
|
||||
end
|
||||
end
|
||||
@log.close
|
||||
end
|
||||
|
||||
@ -38,6 +52,7 @@ class Propane
|
||||
unless found_default
|
||||
raise Error.new("No patterns found for default mode")
|
||||
end
|
||||
check_ptypes!
|
||||
# Add EOF token.
|
||||
@grammar.tokens << Token.new("$EOF", nil, nil)
|
||||
tokens_by_name = {}
|
||||
@ -53,11 +68,14 @@ class Propane
|
||||
tokens_by_name[token.name] = token
|
||||
end
|
||||
# Check for user start rule.
|
||||
unless @grammar.rules.find {|rule| rule.name == "Start"}
|
||||
raise Error.new("Start rule not found")
|
||||
unless @grammar.rules.find {|rule| rule.name == @grammar.start_rule}
|
||||
raise Error.new("Start rule `#{@grammar.start_rule}` not found")
|
||||
end
|
||||
# Add "real" start rule.
|
||||
@grammar.rules.unshift(Rule.new("$Start", ["Start", "$EOF"], nil, nil, nil))
|
||||
@grammar.rules.unshift(Rule.new("$Start", [@grammar.start_rule, "$EOF"], nil, nil, nil))
|
||||
# Generate and add rules for optional components.
|
||||
generate_optional_component_rules!(tokens_by_name)
|
||||
# Build rule sets.
|
||||
rule_sets = {}
|
||||
rule_set_id = @grammar.tokens.size
|
||||
@grammar.rules.each_with_index do |rule, rule_id|
|
||||
@ -106,10 +124,55 @@ class Propane
|
||||
end
|
||||
end
|
||||
determine_possibly_empty_rulesets!(rule_sets)
|
||||
rule_sets.each do |name, rule_set|
|
||||
rule_set.finalize(@grammar)
|
||||
end
|
||||
# Generate the lexer.
|
||||
@lexer = Lexer.new(@grammar)
|
||||
# Generate the parser.
|
||||
@parser = Parser.new(@grammar, rule_sets, @log)
|
||||
@parser = Parser.new(@grammar, rule_sets, @log, @options)
|
||||
end
|
||||
|
||||
# Check that any referenced ptypes have been defined.
|
||||
def check_ptypes!
|
||||
(@grammar.patterns + @grammar.tokens + @grammar.rules).each do |potor|
|
||||
if potor.ptypename
|
||||
unless @grammar.ptypes.include?(potor.ptypename)
|
||||
raise Error.new("Error: Line #{potor.line_number}: ptype #{potor.ptypename} not declared. Declare with `ptype` statement.")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Generate and add rules for any optional components.
|
||||
def generate_optional_component_rules!(tokens_by_name)
|
||||
optional_rules_added = Set.new
|
||||
@grammar.rules.each do |rule|
|
||||
rule.components.each do |component|
|
||||
if component =~ /^(.*)\?$/
|
||||
c = $1
|
||||
unless optional_rules_added.include?(component)
|
||||
# Create two rules for the optional component: one empty and
|
||||
# one just matching the component.
|
||||
# We need to find the ptypename for the optional component in
|
||||
# order to copy it to the generated rules.
|
||||
if tokens_by_name[c]
|
||||
# The optional component is a token.
|
||||
ptypename = tokens_by_name[c].ptypename
|
||||
else
|
||||
# The optional component must be a rule, so find any instance
|
||||
# of that rule that specifies a ptypename.
|
||||
ptypename = @grammar.rules.reduce(nil) do |result, rule|
|
||||
rule.name == c && rule.ptypename ? rule.ptypename : result
|
||||
end
|
||||
end
|
||||
@grammar.rules << Rule.new(component, [], nil, ptypename, rule.line_number)
|
||||
@grammar.rules << Rule.new(component, [c], "$$ = $1;\n", ptypename, rule.line_number)
|
||||
optional_rules_added << component
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Determine which grammar rules could expand to empty sequences.
|
||||
@ -185,27 +248,79 @@ class Propane
|
||||
code = code.gsub(/\$token\(([$\w]+)\)/) do |match|
|
||||
"TOKEN_#{Token.code_name($1)}"
|
||||
end
|
||||
code = code.gsub(/\$terminate\((.*)\);/) do |match|
|
||||
user_terminate_code = $1
|
||||
retval = rule ? "P_USER_TERMINATED" : "TERMINATE_TOKEN_ID"
|
||||
case @language
|
||||
when "c"
|
||||
"context->user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
||||
when "d"
|
||||
"context.user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
||||
end
|
||||
end
|
||||
if parser
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
case @language
|
||||
when "c"
|
||||
"_pvalue->v_#{rule.ptypename}"
|
||||
when "d"
|
||||
"_pvalue.v_#{rule.ptypename}"
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$(\d+)/) do |match|
|
||||
index = $1.to_i
|
||||
case @language
|
||||
when "c"
|
||||
"state_values_stack_index(statevalues, -1 - (int)n_states + #{index})->pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||
when "d"
|
||||
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$\{(\w+)\}/) do |match|
|
||||
aliasname = $1
|
||||
if index = rule.aliases[aliasname]
|
||||
case @language
|
||||
when "c"
|
||||
"state_values_stack_index(statevalues, -(int)n_states + #{index})->pvalue.v_#{rule.components[index].ptypename}"
|
||||
when "d"
|
||||
"statevalues[$-n_states+#{index}].pvalue.v_#{rule.components[index].ptypename}"
|
||||
end
|
||||
else
|
||||
raise Error.new("Field alias '#{aliasname}' not found")
|
||||
end
|
||||
end
|
||||
else
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
if @grammar.ast
|
||||
case @language
|
||||
when "c"
|
||||
"out_token_info->pvalue"
|
||||
when "d"
|
||||
"out_token_info.pvalue"
|
||||
end
|
||||
else
|
||||
case @language
|
||||
when "c"
|
||||
"out_token_info->pvalue.v_#{pattern.ptypename}"
|
||||
when "d"
|
||||
"out_token_info.pvalue.v_#{pattern.ptypename}"
|
||||
end
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match|
|
||||
mode_name = $1
|
||||
mode_id = @lexer.mode_id(mode_name)
|
||||
unless mode_id
|
||||
raise Error.new("Lexer mode '#{mode_name}' not found")
|
||||
end
|
||||
case @language
|
||||
when "c"
|
||||
"context->mode = #{mode_id}u"
|
||||
when "d"
|
||||
"context.mode = #{mode_id}u"
|
||||
end
|
||||
end
|
||||
end
|
||||
code
|
||||
end
|
||||
|
||||
@ -215,7 +330,7 @@ class Propane
|
||||
# Start rule parser value type name and type string.
|
||||
def start_rule_type
|
||||
start_rule = @grammar.rules.find do |rule|
|
||||
rule.name == "Start"
|
||||
rule.name == @grammar.start_rule
|
||||
end
|
||||
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
|
||||
end
|
||||
@ -229,13 +344,28 @@ class Propane
|
||||
# Type.
|
||||
def get_type_for(max)
|
||||
if max <= 0xFF
|
||||
case @language
|
||||
when "c"
|
||||
"uint8_t"
|
||||
when "d"
|
||||
"ubyte"
|
||||
end
|
||||
elsif max <= 0xFFFF
|
||||
case @language
|
||||
when "c"
|
||||
"uint16_t"
|
||||
when "d"
|
||||
"ushort"
|
||||
end
|
||||
else
|
||||
case @language
|
||||
when "c"
|
||||
"uint32_t"
|
||||
else
|
||||
"uint"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
@ -5,10 +5,13 @@ class Propane
|
||||
# Reserve identifiers beginning with a double-underscore for internal use.
|
||||
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
|
||||
|
||||
attr_reader :classname
|
||||
attr_reader :ast
|
||||
attr_reader :ast_prefix
|
||||
attr_reader :ast_suffix
|
||||
attr_reader :modulename
|
||||
attr_reader :patterns
|
||||
attr_reader :rules
|
||||
attr_reader :start_rule
|
||||
attr_reader :tokens
|
||||
attr_reader :code_blocks
|
||||
attr_reader :ptypes
|
||||
@ -16,15 +19,19 @@ class Propane
|
||||
|
||||
def initialize(input)
|
||||
@patterns = []
|
||||
@start_rule = "Start"
|
||||
@tokens = []
|
||||
@rules = []
|
||||
@code_blocks = []
|
||||
@code_blocks = {}
|
||||
@line_number = 1
|
||||
@next_line_number = @line_number
|
||||
@mode = nil
|
||||
@input = input.gsub("\r\n", "\n")
|
||||
@ptypes = {"default" => "void *"}
|
||||
@prefix = "p_"
|
||||
@ast = false
|
||||
@ast_prefix = ""
|
||||
@ast_suffix = ""
|
||||
parse_grammar!
|
||||
end
|
||||
|
||||
@ -36,6 +43,10 @@ class Propane
|
||||
@tokens.size
|
||||
end
|
||||
|
||||
def terminate_token_id
|
||||
@tokens.size + 1
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def parse_grammar!
|
||||
@ -48,10 +59,13 @@ class Propane
|
||||
if parse_white_space!
|
||||
elsif parse_comment_line!
|
||||
elsif @mode.nil? && parse_mode_label!
|
||||
elsif parse_ast_statement!
|
||||
elsif parse_ast_prefix_statement!
|
||||
elsif parse_ast_suffix_statement!
|
||||
elsif parse_module_statement!
|
||||
elsif parse_class_statement!
|
||||
elsif parse_ptype_statement!
|
||||
elsif parse_pattern_statement!
|
||||
elsif parse_start_statement!
|
||||
elsif parse_token_statement!
|
||||
elsif parse_tokenid_statement!
|
||||
elsif parse_drop_statement!
|
||||
@ -80,6 +94,24 @@ class Propane
|
||||
consume!(/#.*\n/)
|
||||
end
|
||||
|
||||
def parse_ast_statement!
|
||||
if consume!(/ast\s*;/)
|
||||
@ast = true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_prefix_statement!
|
||||
if md = consume!(/ast_prefix\s+(\w+)\s*;/)
|
||||
@ast_prefix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_suffix_statement!
|
||||
if md = consume!(/ast_suffix\s+(\w+)\s*;/)
|
||||
@ast_suffix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_module_statement!
|
||||
if consume!(/module\s+/)
|
||||
md = consume!(/([\w.]+)\s*/, "expected module name")
|
||||
@ -90,20 +122,13 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
def parse_class_statement!
|
||||
if consume!(/class\s+/)
|
||||
md = consume!(/([\w.]+)\s*/, "expected class name")
|
||||
@classname = md[1]
|
||||
consume!(/;/, "expected `;'")
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ptype_statement!
|
||||
if consume!(/ptype\s+/)
|
||||
name = "default"
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
name = md[1]
|
||||
end
|
||||
md = consume!(/([^;]+);/, "expected parser result type expression")
|
||||
@ -116,12 +141,15 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
pattern = parse_pattern! || name
|
||||
consume!(/\s+/)
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected pattern or `;' or code block")
|
||||
consume!(/;/, "expected `;' or code block")
|
||||
end
|
||||
token = Token.new(name, ptypename, @line_number)
|
||||
@tokens << token
|
||||
@ -137,6 +165,9 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
consume!(/;/, "expected `;'");
|
||||
@ -164,10 +195,17 @@ class Propane
|
||||
def parse_rule_statement!
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
|
||||
rule_name, ptypename = *md[1, 2]
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}\s*)*)\s*/, "expected rule component list")
|
||||
if @ast && ptypename
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}(?::#{IDENTIFIER_REGEX})?\??\s*)*)\s*/, "expected rule component list")
|
||||
components = md[1].strip.split(/\s+/)
|
||||
if @ast
|
||||
consume!(/;/, "expected `;'")
|
||||
else
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected pattern or `;' or code block")
|
||||
consume!(/;/, "expected `;' or code block")
|
||||
end
|
||||
end
|
||||
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
||||
@mode = nil
|
||||
@ -179,6 +217,9 @@ class Propane
|
||||
if pattern = parse_pattern!
|
||||
consume!(/\s+/)
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
unless code = parse_code_block!
|
||||
@ -190,9 +231,22 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
def parse_start_statement!
|
||||
if md = consume!(/start\s+(\w+)\s*;/)
|
||||
@start_rule = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_code_block_statement!
|
||||
if code = parse_code_block!
|
||||
@code_blocks << code
|
||||
if md = consume!(/<<([a-z]*)(.*?)>>\n/m)
|
||||
name, code = md[1..2]
|
||||
code.sub!(/\A\n/, "")
|
||||
code += "\n" unless code.end_with?("\n")
|
||||
if @code_blocks[name]
|
||||
@code_blocks[name] += code
|
||||
else
|
||||
@code_blocks[name] = code
|
||||
end
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
@ -225,8 +279,11 @@ class Propane
|
||||
end
|
||||
|
||||
def parse_code_block!
|
||||
if md = consume!(/<<\n(.*?)^>>\n/m)
|
||||
md[1]
|
||||
if md = consume!(/<<(.*?)>>\n/m)
|
||||
code = md[1]
|
||||
code.sub!(/\A\n/, "")
|
||||
code += "\n" unless code.end_with?("\n")
|
||||
code
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -7,12 +7,14 @@ class Propane
|
||||
attr_reader :reduce_table
|
||||
attr_reader :rule_sets
|
||||
|
||||
def initialize(grammar, rule_sets, log)
|
||||
def initialize(grammar, rule_sets, log, options)
|
||||
@grammar = grammar
|
||||
@rule_sets = rule_sets
|
||||
@log = log
|
||||
@item_sets = []
|
||||
@item_sets_set = {}
|
||||
@warnings = Set.new
|
||||
@options = options
|
||||
start_item = Item.new(grammar.rules.first, 0)
|
||||
eval_item_sets = Set[ItemSet.new([start_item])]
|
||||
|
||||
@ -23,10 +25,10 @@ class Propane
|
||||
item_set.id = @item_sets.size
|
||||
@item_sets << item_set
|
||||
@item_sets_set[item_set] = item_set
|
||||
item_set.following_symbols.each do |following_symbol|
|
||||
unless following_symbol.name == "$EOF"
|
||||
following_set = item_set.build_following_item_set(following_symbol)
|
||||
eval_item_sets << following_set
|
||||
item_set.next_symbols.each do |next_symbol|
|
||||
unless next_symbol.name == "$EOF"
|
||||
next_item_set = item_set.build_next_item_set(next_symbol)
|
||||
eval_item_sets << next_item_set
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -37,8 +39,11 @@ class Propane
|
||||
end
|
||||
|
||||
build_reduce_actions!
|
||||
write_log!
|
||||
build_tables!
|
||||
write_log!
|
||||
if @warnings.size > 0 && @options[:warnings_as_errors]
|
||||
raise Error.new("Fatal errors (-w):\n" + @warnings.join("\n"))
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
@ -48,27 +53,37 @@ class Propane
|
||||
@shift_table = []
|
||||
@reduce_table = []
|
||||
@item_sets.each do |item_set|
|
||||
shift_entries = item_set.following_symbols.map do |following_symbol|
|
||||
shift_entries = item_set.next_symbols.map do |next_symbol|
|
||||
state_id =
|
||||
if following_symbol.name == "$EOF"
|
||||
if next_symbol.name == "$EOF"
|
||||
0
|
||||
else
|
||||
item_set.following_item_set[following_symbol].id
|
||||
item_set.next_item_set[next_symbol].id
|
||||
end
|
||||
{
|
||||
symbol_id: following_symbol.id,
|
||||
symbol: next_symbol,
|
||||
state_id: state_id,
|
||||
}
|
||||
end
|
||||
unless item_set.reduce_rules.empty?
|
||||
shift_entries.each do |shift_entry|
|
||||
token = shift_entry[:symbol]
|
||||
if get_lookahead_reduce_actions_for_item_set(item_set).include?(token)
|
||||
rule = item_set.reduce_actions[token]
|
||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
||||
end
|
||||
end
|
||||
end
|
||||
reduce_entries =
|
||||
case ra = item_set.reduce_actions
|
||||
when Rule
|
||||
[{token_id: @grammar.invalid_token_id, rule_id: ra.id,
|
||||
rule_set_id: ra.rule_set.id, n_states: ra.components.size}]
|
||||
when Hash
|
||||
ra.map do |token, rule|
|
||||
{token_id: token.id, rule_id: rule.id,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size}
|
||||
if rule = item_set.reduce_rule
|
||||
[{token_id: @grammar.invalid_token_id, rule_id: rule.id, rule: rule,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
||||
elsif reduce_actions = item_set.reduce_actions
|
||||
reduce_actions.map do |token, rule|
|
||||
{token_id: token.id, rule_id: rule.id, rule: rule,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
||||
end
|
||||
else
|
||||
[]
|
||||
@ -85,11 +100,11 @@ class Propane
|
||||
end
|
||||
|
||||
def process_item_set(item_set)
|
||||
item_set.following_symbols.each do |following_symbol|
|
||||
unless following_symbol.name == "$EOF"
|
||||
following_set = @item_sets_set[item_set.build_following_item_set(following_symbol)]
|
||||
item_set.following_item_set[following_symbol] = following_set
|
||||
following_set.in_sets << item_set
|
||||
item_set.next_symbols.each do |next_symbol|
|
||||
unless next_symbol.name == "$EOF"
|
||||
next_item_set = @item_sets_set[item_set.build_next_item_set(next_symbol)]
|
||||
item_set.next_item_set[next_symbol] = next_item_set
|
||||
next_item_set.in_sets << item_set
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -99,7 +114,7 @@ class Propane
|
||||
# @return [void]
|
||||
def build_reduce_actions!
|
||||
@item_sets.each do |item_set|
|
||||
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set)
|
||||
build_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
|
||||
@ -108,38 +123,55 @@ class Propane
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
#
|
||||
# @return [nil, Rule, Hash]
|
||||
# If no reduce actions are possible for the given item set, nil.
|
||||
# If only one reduce action is possible for the given item set, the Rule
|
||||
# to reduce.
|
||||
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
|
||||
# @return [void]
|
||||
def build_reduce_actions_for_item_set(item_set)
|
||||
# To build the reduce actions, we start by looking at any
|
||||
# "complete" items, i.e., items where the parse position is at the
|
||||
# end of a rule. These are the only rules that are candidates for
|
||||
# reduction in the current ItemSet.
|
||||
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
|
||||
# If there are no rules to reduce for this ItemSet, we're done here.
|
||||
return nil if reduce_rules.size == 0
|
||||
if item_set.reduce_rules.size == 1
|
||||
item_set.reduce_rule = item_set.reduce_rules.first
|
||||
end
|
||||
|
||||
# If there is exactly one rule to reduce for this ItemSet, then do not
|
||||
# figure out the lookaheads; just reduce it.
|
||||
return reduce_rules.first if reduce_rules.size == 1
|
||||
if item_set.reduce_rules.size > 1
|
||||
# Force item_set.reduce_actions to be built to store the lookahead
|
||||
# tokens for the possible reduce rules if there is more than one.
|
||||
get_lookahead_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
|
||||
# Otherwise, we have more than one possible rule to reduce.
|
||||
# Get the reduce actions for a single item set (parser state).
|
||||
#
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
#
|
||||
# @return [Hash]
|
||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
||||
def get_lookahead_reduce_actions_for_item_set(item_set)
|
||||
item_set.reduce_actions ||= build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
|
||||
# Build the reduce actions for a single item set (parser state).
|
||||
#
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
#
|
||||
# @return [Hash]
|
||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
||||
def build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
# We will be looking for all possible tokens that can follow instances of
|
||||
# these rules. Rather than looking through the entire grammar for the
|
||||
# possible following tokens, we will only look in the item sets leading
|
||||
# up to this one. This restriction gives us a more precise lookahead set,
|
||||
# and allows us to parse LALR grammars.
|
||||
item_sets = item_set.leading_item_sets
|
||||
reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
item_sets = Set[item_set] + item_set.leading_item_sets
|
||||
item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
||||
lookahead_tokens_for_rule.each do |lookahead_token|
|
||||
if existing_reduce_rule = reduce_actions[lookahead_token]
|
||||
raise Error.new("Error: reduce/reduce conflict between rule #{existing_reduce_rule.id} (#{existing_reduce_rule.name}) and rule #{reduce_rule.id} (#{reduce_rule.name})")
|
||||
raise Error.new("Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number})")
|
||||
end
|
||||
reduce_actions[lookahead_token] = reduce_rule
|
||||
end
|
||||
@ -181,9 +213,9 @@ class Propane
|
||||
# tokens to form the lookahead token set.
|
||||
item_sets.each do |item_set|
|
||||
item_set.items.each do |item|
|
||||
if item.following_symbol == rule_set
|
||||
if item.next_symbol == rule_set
|
||||
(1..).each do |offset|
|
||||
case symbol = item.following_symbol(offset)
|
||||
case symbol = item.next_symbol(offset)
|
||||
when nil
|
||||
rule_set = item.rule.rule_set
|
||||
unless checked_rule_sets.include?(rule_set)
|
||||
@ -240,20 +272,26 @@ class Propane
|
||||
@log.puts
|
||||
@log.puts " Incoming states: #{incoming_ids.join(", ")}"
|
||||
@log.puts " Outgoing states:"
|
||||
item_set.following_item_set.each do |following_symbol, following_item_set|
|
||||
@log.puts " #{following_symbol.name} => #{following_item_set.id}"
|
||||
item_set.next_item_set.each do |next_symbol, next_item_set|
|
||||
@log.puts " #{next_symbol.name} => #{next_item_set.id}"
|
||||
end
|
||||
@log.puts
|
||||
@log.puts " Reduce actions:"
|
||||
case item_set.reduce_actions
|
||||
when Rule
|
||||
@log.puts " * => rule #{item_set.reduce_actions.id}, rule set #{@rule_sets[item_set.reduce_actions.name].id} (#{item_set.reduce_actions.name})"
|
||||
when Hash
|
||||
if item_set.reduce_rule
|
||||
@log.puts " * => rule #{item_set.reduce_rule.id}, rule set #{@rule_sets[item_set.reduce_rule.name].id} (#{item_set.reduce_rule.name})"
|
||||
elsif item_set.reduce_actions
|
||||
item_set.reduce_actions.each do |token, rule|
|
||||
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
|
||||
end
|
||||
end
|
||||
end
|
||||
if @warnings.size > 0
|
||||
@log.puts
|
||||
@log.puts "Warnings:"
|
||||
@warnings.each do |warning|
|
||||
@log.puts " #{warning}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -56,7 +56,7 @@ class Propane
|
||||
|
||||
# Return the set of Items obtained by "closing" the current item.
|
||||
#
|
||||
# If the following symbol for the current item is another Rule name, then
|
||||
# If the next symbol for the current item is another Rule name, then
|
||||
# this method will return all Items for that Rule with a position of 0.
|
||||
# Otherwise, an empty Array is returned.
|
||||
#
|
||||
@ -81,17 +81,17 @@ class Propane
|
||||
@position == @rule.components.size
|
||||
end
|
||||
|
||||
# Get the following symbol for the Item.
|
||||
# Get the next symbol for the Item.
|
||||
#
|
||||
# That is, the symbol which follows the parse position marker in the
|
||||
# That is, the symbol which is after the parse position marker in the
|
||||
# current Item.
|
||||
#
|
||||
# @param offset [Integer]
|
||||
# Offset from current parse position to examine.
|
||||
#
|
||||
# @return [Token, RuleSet, nil]
|
||||
# Following symbol for the Item.
|
||||
def following_symbol(offset = 0)
|
||||
# Next symbol for the Item.
|
||||
def next_symbol(offset = 0)
|
||||
@rule.components[@position + offset]
|
||||
end
|
||||
|
||||
@ -108,25 +108,25 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
# Get whether this Item is followed by the provided symbol.
|
||||
# Get whether this Item's next symbol is the given symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Symbol to query.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this Item is followed by the provided symbol.
|
||||
def followed_by?(symbol)
|
||||
following_symbol == symbol
|
||||
# Whether this Item's next symbol is the given symbol.
|
||||
def next_symbol?(symbol)
|
||||
next_symbol == symbol
|
||||
end
|
||||
|
||||
# Get the following item for this Item.
|
||||
# Get the next item for this Item.
|
||||
#
|
||||
# That is, the Item formed by moving the parse position marker one place
|
||||
# forward from its position in this Item.
|
||||
#
|
||||
# @return [Item]
|
||||
# The following item for this Item.
|
||||
def following_item
|
||||
# The next item for this Item.
|
||||
def next_item
|
||||
Item.new(@rule, @position + 1)
|
||||
end
|
||||
|
||||
|
@ -2,7 +2,7 @@ class Propane
|
||||
class Parser
|
||||
|
||||
# Represent a parser "item set", which is a set of possible items that the
|
||||
# parser could currently be parsing.
|
||||
# parser could currently be parsing. This is equivalent to a parser state.
|
||||
class ItemSet
|
||||
|
||||
# @return [Set<Item>]
|
||||
@ -14,15 +14,24 @@ class Propane
|
||||
attr_accessor :id
|
||||
|
||||
# @return [Hash]
|
||||
# Maps a following symbol to its ItemSet.
|
||||
attr_reader :following_item_set
|
||||
# Maps a next symbol to its ItemSet.
|
||||
attr_reader :next_item_set
|
||||
|
||||
# @return [Set<ItemSet>]
|
||||
# ItemSets leading to this item set.
|
||||
attr_reader :in_sets
|
||||
|
||||
# @return [nil, Rule, Hash]
|
||||
# Reduce actions, mapping lookahead tokens to rules.
|
||||
# @return [nil, Rule]
|
||||
# Rule to reduce if there is only one possibility.
|
||||
attr_accessor :reduce_rule
|
||||
|
||||
# @return [Set<Rule>]
|
||||
# Set of rules that could be reduced in this parser state.
|
||||
attr_accessor :reduce_rules
|
||||
|
||||
# @return [nil, Hash]
|
||||
# Reduce actions, mapping lookahead tokens to rules, if there is
|
||||
# more than one rule that could be reduced.
|
||||
attr_accessor :reduce_actions
|
||||
|
||||
# Build an ItemSet.
|
||||
@ -31,28 +40,28 @@ class Propane
|
||||
# Items in this ItemSet.
|
||||
def initialize(items)
|
||||
@items = Set.new(items)
|
||||
@following_item_set = {}
|
||||
@next_item_set = {}
|
||||
@in_sets = Set.new
|
||||
close!
|
||||
end
|
||||
|
||||
# Get the set of following symbols for all Items in this ItemSet.
|
||||
# Get the set of next symbols for all Items in this ItemSet.
|
||||
#
|
||||
# @return [Set<Token, RuleSet>]
|
||||
# Set of following symbols for all Items in this ItemSet.
|
||||
def following_symbols
|
||||
Set.new(@items.map(&:following_symbol).compact)
|
||||
# Set of next symbols for all Items in this ItemSet.
|
||||
def next_symbols
|
||||
@_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
|
||||
end
|
||||
|
||||
# Build a following ItemSet for the given following symbol.
|
||||
# Build a next ItemSet for the given next symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Following symbol to build the following ItemSet for.
|
||||
# Next symbol to build the next ItemSet for.
|
||||
#
|
||||
# @return [ItemSet]
|
||||
# Following ItemSet for the given following symbol.
|
||||
def build_following_item_set(symbol)
|
||||
ItemSet.new(items_followed_by(symbol).map(&:following_item))
|
||||
# Next ItemSet for the given next symbol.
|
||||
def build_next_item_set(symbol)
|
||||
ItemSet.new(items_with_next(symbol).map(&:next_item))
|
||||
end
|
||||
|
||||
# Hash function.
|
||||
@ -87,13 +96,26 @@ class Propane
|
||||
|
||||
# Set of ItemSets that lead to this ItemSet.
|
||||
#
|
||||
# This set includes this ItemSet.
|
||||
#
|
||||
# @return [Set<ItemSet>]
|
||||
# Set of all ItemSets that lead up to this ItemSet.
|
||||
def leading_item_sets
|
||||
@in_sets.reduce(Set[self]) do |result, item_set|
|
||||
result + item_set.leading_item_sets
|
||||
@_leading_item_sets ||=
|
||||
begin
|
||||
result = Set.new
|
||||
eval_sets = Set[self]
|
||||
evaled = Set.new
|
||||
while eval_sets.size > 0
|
||||
eval_set = eval_sets.first
|
||||
eval_sets.delete(eval_set)
|
||||
evaled << eval_set
|
||||
eval_set.in_sets.each do |in_set|
|
||||
result << in_set
|
||||
unless evaled.include?(in_set)
|
||||
eval_sets << in_set
|
||||
end
|
||||
end
|
||||
end
|
||||
result
|
||||
end
|
||||
end
|
||||
|
||||
@ -127,16 +149,16 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
# Get the Items followed by the given following symbol.
|
||||
# Get the Items with the given next symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Following symbol.
|
||||
# Next symbol.
|
||||
#
|
||||
# @return [Array<Item>]
|
||||
# Items followed by the given following symbol.
|
||||
def items_followed_by(symbol)
|
||||
# Items with the given next symbol.
|
||||
def items_with_next(symbol)
|
||||
@items.select do |item|
|
||||
item.followed_by?(symbol)
|
||||
item.next_symbol?(symbol)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -134,8 +134,18 @@ class Propane
|
||||
else
|
||||
c = @pattern.slice!(0)
|
||||
case c
|
||||
when "a"
|
||||
CharacterRangeUnit.new("\a", "\a")
|
||||
when "b"
|
||||
CharacterRangeUnit.new("\b", "\b")
|
||||
when "d"
|
||||
CharacterRangeUnit.new("0", "9")
|
||||
when "f"
|
||||
CharacterRangeUnit.new("\f", "\f")
|
||||
when "n"
|
||||
CharacterRangeUnit.new("\n", "\n")
|
||||
when "r"
|
||||
CharacterRangeUnit.new("\r", "\r")
|
||||
when "s"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new(" ")
|
||||
@ -145,6 +155,10 @@ class Propane
|
||||
ccu << CharacterRangeUnit.new("\f")
|
||||
ccu << CharacterRangeUnit.new("\v")
|
||||
ccu
|
||||
when "t"
|
||||
CharacterRangeUnit.new("\t", "\t")
|
||||
when "v"
|
||||
CharacterRangeUnit.new("\v", "\v")
|
||||
else
|
||||
CharacterRangeUnit.new(c)
|
||||
end
|
||||
|
@ -6,6 +6,10 @@ class Propane
|
||||
# Rule components.
|
||||
attr_reader :components
|
||||
|
||||
# @return [Hash]
|
||||
# Field aliases.
|
||||
attr_reader :aliases
|
||||
|
||||
# @return [String]
|
||||
# User code associated with the rule.
|
||||
attr_reader :code
|
||||
@ -30,6 +34,11 @@ class Propane
|
||||
# The RuleSet that this Rule is a part of.
|
||||
attr_accessor :rule_set
|
||||
|
||||
# @return [Array<Integer>]
|
||||
# Map this rule's components to their positions in the parent RuleSet's
|
||||
# node field pointer array. This is used for AST construction.
|
||||
attr_accessor :rule_set_node_field_index_map
|
||||
|
||||
# Construct a Rule.
|
||||
#
|
||||
# @param name [String]
|
||||
@ -44,7 +53,20 @@ class Propane
|
||||
# Line number where the rule was defined in the input grammar.
|
||||
def initialize(name, components, code, ptypename, line_number)
|
||||
@name = name
|
||||
@components = components
|
||||
@aliases = {}
|
||||
@components = components.each_with_index.map do |component, i|
|
||||
if component =~ /(\S+):(\S+)/
|
||||
c, aliasname = $1, $2
|
||||
if @aliases[aliasname]
|
||||
raise Error.new("Error: duplicate field alias `#{aliasname}` for rule #{name} defined on line #{line_number}")
|
||||
end
|
||||
@aliases[aliasname] = i
|
||||
c
|
||||
else
|
||||
component
|
||||
end
|
||||
end
|
||||
@rule_set_node_field_index_map = components.map {0}
|
||||
@code = code
|
||||
@ptypename = ptypename
|
||||
@line_number = line_number
|
||||
@ -60,6 +82,14 @@ class Propane
|
||||
@components.empty?
|
||||
end
|
||||
|
||||
# Return whether this is an optional Rule.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this is an optional Rule.
|
||||
def optional?
|
||||
@name.end_with?("?")
|
||||
end
|
||||
|
||||
# Represent the Rule as a String.
|
||||
#
|
||||
# @return [String]
|
||||
@ -68,6 +98,17 @@ class Propane
|
||||
"#{@name} -> #{@components.map(&:name).join(" ")}"
|
||||
end
|
||||
|
||||
# Check whether the rule set node field index map is just a 1:1 mapping.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Boolean indicating whether the rule set node field index map is just a
|
||||
# 1:1 mapping.
|
||||
def flat_rule_set_node_field_index_map?
|
||||
@rule_set_node_field_index_map.each_with_index.all? do |v, i|
|
||||
v == i
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -1,7 +1,12 @@
|
||||
class Propane
|
||||
|
||||
# A RuleSet collects all grammar rules of the same name.
|
||||
class RuleSet
|
||||
|
||||
# @return [Array<Hash>]
|
||||
# AST fields.
|
||||
attr_reader :ast_fields
|
||||
|
||||
# @return [Integer]
|
||||
# ID of the RuleSet.
|
||||
attr_reader :id
|
||||
@ -51,6 +56,24 @@ class Propane
|
||||
@could_be_empty
|
||||
end
|
||||
|
||||
# Return whether this is an optional RuleSet.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this is an optional RuleSet.
|
||||
def optional?
|
||||
@name.end_with?("?")
|
||||
end
|
||||
|
||||
# For optional rule sets, return the underlying component that is optional.
|
||||
def option_target
|
||||
@rules.each do |rule|
|
||||
if rule.components.size > 0
|
||||
return rule.components[0]
|
||||
end
|
||||
end
|
||||
raise "Optional rule target not found"
|
||||
end
|
||||
|
||||
# Build the start token set for the RuleSet.
|
||||
#
|
||||
# @return [Set<Token>]
|
||||
@ -75,6 +98,72 @@ class Propane
|
||||
@_start_token_set
|
||||
end
|
||||
|
||||
# Finalize a RuleSet after adding all Rules to it.
|
||||
def finalize(grammar)
|
||||
if grammar.ast
|
||||
build_ast_fields(grammar)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Build the set of AST fields for this RuleSet.
|
||||
#
|
||||
# This is an Array of Hashes. Each entry in the Array corresponds to a
|
||||
# field location in the AST node. The entry is a Hash. It could have one or
|
||||
# two keys. It will always have the field name with a positional suffix as
|
||||
# a key. It may also have the field name without the positional suffix if
|
||||
# that field only exists in one position across all Rules in the RuleSet.
|
||||
#
|
||||
# @return [void]
|
||||
def build_ast_fields(grammar)
|
||||
field_ast_node_indexes = {}
|
||||
field_indexes_across_all_rules = {}
|
||||
@ast_fields = []
|
||||
@rules.each do |rule|
|
||||
rule.components.each_with_index do |component, i|
|
||||
if component.is_a?(RuleSet) && component.optional?
|
||||
component = component.option_target
|
||||
end
|
||||
if component.is_a?(Token)
|
||||
node_name = "Token"
|
||||
else
|
||||
node_name = component.name
|
||||
end
|
||||
struct_name = "#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
field_name = "p#{node_name}#{i + 1}"
|
||||
unless field_ast_node_indexes[field_name]
|
||||
field_ast_node_indexes[field_name] = @ast_fields.size
|
||||
@ast_fields << {field_name => struct_name}
|
||||
end
|
||||
field_indexes_across_all_rules[node_name] ||= Set.new
|
||||
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
||||
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
||||
end
|
||||
end
|
||||
field_indexes_across_all_rules.each do |node_name, indexes_across_all_rules|
|
||||
if indexes_across_all_rules.size == 1
|
||||
# If this field was only seen in one position across all rules,
|
||||
# then add an alias to the positional field name that does not
|
||||
# include the position.
|
||||
@ast_fields[indexes_across_all_rules.first]["p#{node_name}"] =
|
||||
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
end
|
||||
end
|
||||
# Now merge in the field aliases as given by the user in the
|
||||
# grammar.
|
||||
field_aliases = {}
|
||||
@rules.each do |rule|
|
||||
rule.aliases.each do |alias_name, index|
|
||||
if field_aliases[alias_name] && field_aliases[alias_name] != index
|
||||
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}`")
|
||||
end
|
||||
field_aliases[alias_name] = index
|
||||
@ast_fields[index][alias_name] = @ast_fields[index].first[1]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -1,3 +1,3 @@
|
||||
class Propane
|
||||
VERSION = "0.1.0"
|
||||
VERSION = "1.5.1"
|
||||
end
|
||||
|
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec bundle exec ruby -Ilib bin/propane "$@"
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
require "erb"
|
||||
require "fileutils"
|
||||
require "digest/md5"
|
||||
|
||||
@ -13,6 +14,24 @@ START_FILE = "bin/#{PROG_NAME}"
|
||||
LIB_DIR = "lib"
|
||||
DIST = "dist"
|
||||
|
||||
ASSETS_TEMPLATE = <<EOF
|
||||
class Propane
|
||||
module Assets
|
||||
class << self
|
||||
def get(name)
|
||||
case name
|
||||
<% Dir.glob("assets/*").each do |asset_file| %>
|
||||
when <%= File.basename(asset_file).inspect %>
|
||||
<%= File.binread(asset_file).inspect %>
|
||||
<% end %>
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
EOF
|
||||
|
||||
assets_module = ERB.new(ASSETS_TEMPLATE, trim_mode: "<>").result
|
||||
files_processed = {}
|
||||
combined_file = []
|
||||
|
||||
@ -25,8 +44,12 @@ combine_files = lambda do |file|
|
||||
if File.exist?(path)
|
||||
unless files_processed[path]
|
||||
files_processed[path] = true
|
||||
if require_name == "propane/assets"
|
||||
combined_file << assets_module
|
||||
else
|
||||
combine_files[path]
|
||||
end
|
||||
end
|
||||
else
|
||||
raise "require path #{path.inspect} not found"
|
||||
end
|
||||
|
183
spec/json_parser.c.propane
Normal file
183
spec/json_parser.c.propane
Normal file
@ -0,0 +1,183 @@
|
||||
<<header
|
||||
#include "json_types.h"
|
||||
#include "testutils.h"
|
||||
>>
|
||||
<<
|
||||
#include "math.h"
|
||||
#include <stdbool.h>
|
||||
static str_t string_value;
|
||||
>>
|
||||
|
||||
ptype JSONValue *;
|
||||
|
||||
drop /\s+/;
|
||||
token lbrace /\{/;
|
||||
token rbrace /\}/;
|
||||
token lbracket /\[/;
|
||||
token rbracket /\]/;
|
||||
token comma /,/;
|
||||
token colon /:/;
|
||||
token number /-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?/ <<
|
||||
double n = 0.0;
|
||||
bool negative = false;
|
||||
size_t i = 0u;
|
||||
if (match[i] == '-')
|
||||
{
|
||||
negative = true;
|
||||
i++;
|
||||
}
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
n *= 10.0;
|
||||
n += (match[i] - '0');
|
||||
i++;
|
||||
}
|
||||
if (match[i] == '.')
|
||||
{
|
||||
i++;
|
||||
double mult = 0.1;
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
n += mult * (match[i] - '0');
|
||||
mult /= 10.0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (match[i] == 'e' || match[i] == 'E')
|
||||
{
|
||||
bool exp_negative = false;
|
||||
i++;
|
||||
if (match[i] == '-')
|
||||
{
|
||||
exp_negative = true;
|
||||
i++;
|
||||
}
|
||||
else if (match[i] == '+')
|
||||
{
|
||||
i++;
|
||||
}
|
||||
long exp = 0.0;
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
exp *= 10;
|
||||
exp += (match[i] - '0');
|
||||
i++;
|
||||
}
|
||||
if (exp_negative)
|
||||
{
|
||||
exp = -exp;
|
||||
}
|
||||
n = pow(n, exp);
|
||||
}
|
||||
if (negative)
|
||||
{
|
||||
n = -n;
|
||||
}
|
||||
$$ = JSONValue_new(JSON_NUMBER);
|
||||
$$->number = n;
|
||||
>>
|
||||
token true <<
|
||||
$$ = JSONValue_new(JSON_TRUE);
|
||||
>>
|
||||
token false <<
|
||||
$$ = JSONValue_new(JSON_FALSE);
|
||||
>>
|
||||
token null <<
|
||||
$$ = JSONValue_new(JSON_NULL);
|
||||
>>
|
||||
/"/ <<
|
||||
$mode(string);
|
||||
str_init(&string_value, "");
|
||||
>>
|
||||
string: token string /"/ <<
|
||||
$$ = JSONValue_new(JSON_STRING);
|
||||
$$->string = string_value;
|
||||
$mode(default);
|
||||
>>
|
||||
string: /\\"/ <<
|
||||
str_append(&string_value, "\"");
|
||||
>>
|
||||
string: /\\\\/ <<
|
||||
str_append(&string_value, "\\");
|
||||
>>
|
||||
string: /\\\// <<
|
||||
str_append(&string_value, "/");
|
||||
>>
|
||||
string: /\\b/ <<
|
||||
str_append(&string_value, "\b");
|
||||
>>
|
||||
string: /\\f/ <<
|
||||
str_append(&string_value, "\f");
|
||||
>>
|
||||
string: /\\n/ <<
|
||||
str_append(&string_value, "\n");
|
||||
>>
|
||||
string: /\\r/ <<
|
||||
str_append(&string_value, "\r");
|
||||
>>
|
||||
string: /\\t/ <<
|
||||
str_append(&string_value, "\t");
|
||||
>>
|
||||
string: /\\u[0-9a-fA-F]{4}/ <<
|
||||
/* Not actually going to encode the code point for this example... */
|
||||
char s[] = {'{', match[2], match[3], match[4], match[5], '}', 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
string: /[^\\]/ <<
|
||||
char s[] = {match[0], 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
Start -> Value <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> string <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> number <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> Object <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> Array <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> true <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> false <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> null <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Object -> lbrace rbrace <<
|
||||
$$ = JSONObject_new();
|
||||
>>
|
||||
Object -> lbrace KeyValues rbrace <<
|
||||
$$ = $2;
|
||||
>>
|
||||
KeyValues -> KeyValue <<
|
||||
$$ = $1;
|
||||
>>
|
||||
KeyValues -> KeyValues comma KeyValue <<
|
||||
JSONObject_append($1, $3->object.entries[0].name, $3->object.entries[0].value);
|
||||
$$ = $1;
|
||||
>>
|
||||
KeyValue -> string colon Value <<
|
||||
$$ = JSONObject_new();
|
||||
JSONObject_append($$, str_cstr(&$1->string), $3);
|
||||
>>
|
||||
Array -> lbracket rbracket <<
|
||||
$$ = JSONArray_new();
|
||||
>>
|
||||
Array -> lbracket Values rbracket <<
|
||||
$$ = $2;
|
||||
>>
|
||||
Values -> Value <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Values -> Values comma Value <<
|
||||
JSONArray_append($1, $3);
|
||||
$$ = $1;
|
||||
>>
|
64
spec/json_types.c
Normal file
64
spec/json_types.c
Normal file
@ -0,0 +1,64 @@
|
||||
#include "json_types.h"
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "testutils.h"
|
||||
|
||||
JSONValue * JSONValue_new(size_t id)
|
||||
{
|
||||
JSONValue * jv = calloc(1, sizeof(JSONValue));
|
||||
jv->id = id;
|
||||
return jv;
|
||||
}
|
||||
|
||||
JSONValue * JSONObject_new(void)
|
||||
{
|
||||
JSONValue * jv = JSONValue_new(JSON_OBJECT);
|
||||
jv->object.size = 0u;
|
||||
return jv;
|
||||
}
|
||||
|
||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value)
|
||||
{
|
||||
size_t const size = object->object.size;
|
||||
for (size_t i = 0u; i < size; i++)
|
||||
{
|
||||
if (strcmp(name, object->object.entries[i].name) == 0)
|
||||
{
|
||||
object->object.entries[i].value = value;
|
||||
return;
|
||||
}
|
||||
}
|
||||
size_t const new_size = size + 1;
|
||||
void * new_entries = malloc(sizeof(object->object.entries[0]) * new_size);
|
||||
if (size > 0)
|
||||
{
|
||||
memcpy(new_entries, object->object.entries, size * sizeof(object->object.entries[0]));
|
||||
free(object->object.entries);
|
||||
}
|
||||
object->object.entries = new_entries;
|
||||
object->object.entries[size].name = name;
|
||||
object->object.entries[size].value = value;
|
||||
object->object.size = new_size;
|
||||
}
|
||||
|
||||
JSONValue * JSONArray_new(void)
|
||||
{
|
||||
JSONValue * jv = JSONValue_new(JSON_ARRAY);
|
||||
jv->array.size = 0u;
|
||||
return jv;
|
||||
}
|
||||
|
||||
void JSONArray_append(JSONValue * array, JSONValue * value)
|
||||
{
|
||||
size_t const size = array->array.size;
|
||||
size_t const new_size = size + 1;
|
||||
JSONValue ** new_entries = malloc(sizeof(JSONValue *) * new_size);
|
||||
if (array->array.size > 0)
|
||||
{
|
||||
memcpy(new_entries, array->array.entries, sizeof(JSONValue *) * size);
|
||||
free(array->array.entries);
|
||||
}
|
||||
array->array.entries = new_entries;
|
||||
array->array.entries[size] = value;
|
||||
array->array.size = new_size;
|
||||
}
|
46
spec/json_types.h
Normal file
46
spec/json_types.h
Normal file
@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include "testutils.h"
|
||||
|
||||
#define JSON_OBJECT 0u
|
||||
#define JSON_ARRAY 1u
|
||||
#define JSON_NUMBER 2u
|
||||
#define JSON_STRING 3u
|
||||
#define JSON_TRUE 4u
|
||||
#define JSON_FALSE 5u
|
||||
#define JSON_NULL 6u
|
||||
|
||||
typedef struct JSONValue_s
|
||||
{
|
||||
size_t id;
|
||||
union
|
||||
{
|
||||
struct
|
||||
{
|
||||
size_t size;
|
||||
struct
|
||||
{
|
||||
char const * name;
|
||||
struct JSONValue_s * value;
|
||||
} * entries;
|
||||
} object;
|
||||
struct
|
||||
{
|
||||
size_t size;
|
||||
struct JSONValue_s ** entries;
|
||||
} array;
|
||||
double number;
|
||||
str_t string;
|
||||
};
|
||||
} JSONValue;
|
||||
|
||||
JSONValue * JSONValue_new(size_t id);
|
||||
|
||||
JSONValue * JSONObject_new(void);
|
||||
|
||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value);
|
||||
|
||||
JSONValue * JSONArray_new(void);
|
||||
|
||||
void JSONArray_append(JSONValue * array, JSONValue * value);
|
@ -5,7 +5,6 @@ class Propane
|
||||
# Comment line
|
||||
|
||||
module a.b;
|
||||
class Foobar;
|
||||
ptype XYZ * ;
|
||||
|
||||
token while;
|
||||
@ -30,7 +29,6 @@ B -> <<
|
||||
>>
|
||||
EOF
|
||||
grammar = Grammar.new(input)
|
||||
expect(grammar.classname).to eq "Foobar"
|
||||
expect(grammar.modulename).to eq "a.b"
|
||||
expect(grammar.ptype).to eq "XYZ *"
|
||||
expect(grammar.ptypes).to eq("default" => "XYZ *")
|
||||
@ -38,44 +36,44 @@ EOF
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "while"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 7
|
||||
expect(o.line_number).to eq 6
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "while"
|
||||
expect(o.line_number).to eq 7
|
||||
expect(o.line_number).to eq 6
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "id"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 10
|
||||
expect(o.line_number).to eq 9
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*"
|
||||
expect(o.line_number).to eq 10
|
||||
expect(o.line_number).to eq 9
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "token_with_code"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 12
|
||||
expect(o.line_number).to eq 11
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "token_with_code"
|
||||
expect(o.line_number).to eq 12
|
||||
expect(o.line_number).to eq 11
|
||||
expect(o.code).to eq "Code for the token\n"
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 16
|
||||
expect(o.line_number).to eq 15
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 18
|
||||
expect(o.line_number).to eq 17
|
||||
expect(o.token).to be_nil
|
||||
expect(o.code).to be_nil
|
||||
|
||||
@ -84,19 +82,19 @@ EOF
|
||||
o = grammar.rules[0]
|
||||
expect(o.name).to eq "A"
|
||||
expect(o.components).to eq %w[B]
|
||||
expect(o.line_number).to eq 20
|
||||
expect(o.line_number).to eq 19
|
||||
expect(o.code).to eq " a = 42;\n"
|
||||
|
||||
o = grammar.rules[1]
|
||||
expect(o.name).to eq "B"
|
||||
expect(o.components).to eq %w[C while id]
|
||||
expect(o.line_number).to eq 23
|
||||
expect(o.line_number).to eq 22
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.rules[2]
|
||||
expect(o.name).to eq "B"
|
||||
expect(o.components).to eq []
|
||||
expect(o.line_number).to eq 24
|
||||
expect(o.line_number).to eq 23
|
||||
expect(o.code).to eq " b = 0;\n"
|
||||
end
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,18 @@
|
||||
unless ENV["dist_specs"]
|
||||
require "bundler/setup"
|
||||
require "propane"
|
||||
require "simplecov"
|
||||
|
||||
SimpleCov.start do
|
||||
add_filter "/spec/"
|
||||
add_filter "/.bundle/"
|
||||
if ENV["partial_specs"]
|
||||
command_name "RSpec-partial"
|
||||
else
|
||||
command_name "RSpec"
|
||||
end
|
||||
project_name "Propane"
|
||||
merge_timeout 3600
|
||||
end
|
||||
|
||||
RSpec.configure do |config|
|
||||
# Enable flags like --only-failures and --next-failure
|
||||
@ -9,3 +22,6 @@ RSpec.configure do |config|
|
||||
c.syntax = :expect
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
require "propane"
|
||||
|
55
spec/test_ast.c
Normal file
55
spec/test_ast.c
Normal file
@ -0,0 +1,55 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
assert(start->pItems1 != NULL);
|
||||
assert(start->pItems != NULL);
|
||||
Items * items = start->pItems;
|
||||
assert(items->pItem != NULL);
|
||||
assert(items->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
||||
assert(items->pItemsMore != NULL);
|
||||
ItemsMore * itemsmore = items->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore != NULL);
|
||||
itemsmore = itemsmore->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore == NULL);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems == NULL);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems != NULL);
|
||||
assert(start->pItems->pItem != NULL);
|
||||
assert(start->pItems->pItem->pDual != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
||||
|
||||
return 0;
|
||||
}
|
57
spec/test_ast.d
Normal file
57
spec/test_ast.d
Normal file
@ -0,0 +1,57 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
assert(start.pItems1 !is null);
|
||||
assert(start.pItems !is null);
|
||||
Items * items = start.pItems;
|
||||
assert(items.pItem !is null);
|
||||
assert(items.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
||||
assert(items.pItemsMore !is null);
|
||||
ItemsMore * itemsmore = items.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore !is null);
|
||||
itemsmore = itemsmore.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore is null);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems is null);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems !is null);
|
||||
assert(start.pItems.pItem !is null);
|
||||
assert(start.pItems.pItem.pDual !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
||||
}
|
19
spec/test_ast_field_aliases.c
Normal file
19
spec/test_ast_field_aliases.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(TOKEN_a, start->first->pToken->token);
|
||||
assert_eq(TOKEN_b, start->second->pToken->token);
|
||||
assert_eq(TOKEN_c, start->third->pToken->token);
|
||||
|
||||
return 0;
|
||||
}
|
21
spec/test_ast_field_aliases.d
Normal file
21
spec/test_ast_field_aliases.d
Normal file
@ -0,0 +1,21 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(TOKEN_a, start.first.pToken.token);
|
||||
assert_eq(TOKEN_b, start.second.pToken.token);
|
||||
assert_eq(TOKEN_c, start.third.pToken.token);
|
||||
}
|
102
spec/test_ast_invalid_positions.c
Normal file
102
spec/test_ast_invalid_positions.c
Normal file
@ -0,0 +1,102 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(2, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(7, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(7, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(7, start->end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->pA->position.row);
|
||||
assert_eq(0, start->pT1->pA->position.col);
|
||||
assert_eq(1, start->pT1->pA->end_position.row);
|
||||
assert_eq(1, start->pT1->pA->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(1, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(1, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->pA->position.row);
|
||||
assert_eq(0, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(0, start->pT1->pA->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(0, start->end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(!p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(0, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(0, start->end_position.row);
|
||||
assert_eq(0, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
104
spec/test_ast_invalid_positions.d
Normal file
104
spec/test_ast_invalid_positions.d
Normal file
@ -0,0 +1,104 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(2, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(7, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(7, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(7, start.end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.pA.position.row);
|
||||
assert_eq(0, start.pT1.pA.position.col);
|
||||
assert_eq(1, start.pT1.pA.end_position.row);
|
||||
assert_eq(1, start.pT1.pA.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(1, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(1, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.pA.position.row);
|
||||
assert_eq(0, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(0, start.pT1.pA.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(0, start.end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(!start.pT1.pA.position.valid);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(0, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(0, start.end_position.row);
|
||||
assert_eq(0, start.end_position.col);
|
||||
}
|
55
spec/test_ast_ps.c
Normal file
55
spec/test_ast_ps.c
Normal file
@ -0,0 +1,55 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
PStartS * start = p_result(&context);
|
||||
assert(start->pItems1 != NULL);
|
||||
assert(start->pItems != NULL);
|
||||
PItemsS * items = start->pItems;
|
||||
assert(items->pItem != NULL);
|
||||
assert(items->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
||||
assert(items->pItemsMore != NULL);
|
||||
PItemsMoreS * itemsmore = items->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore != NULL);
|
||||
itemsmore = itemsmore->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore == NULL);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems == NULL);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems != NULL);
|
||||
assert(start->pItems->pItem != NULL);
|
||||
assert(start->pItems->pItem->pDual != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
||||
|
||||
return 0;
|
||||
}
|
57
spec/test_ast_ps.d
Normal file
57
spec/test_ast_ps.d
Normal file
@ -0,0 +1,57 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
PStartS * start = p_result(&context);
|
||||
assert(start.pItems1 !is null);
|
||||
assert(start.pItems !is null);
|
||||
PItemsS * items = start.pItems;
|
||||
assert(items.pItem !is null);
|
||||
assert(items.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
||||
assert(items.pItemsMore !is null);
|
||||
PItemsMoreS * itemsmore = items.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore !is null);
|
||||
itemsmore = itemsmore.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore is null);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems is null);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems !is null);
|
||||
assert(start.pItems.pItem !is null);
|
||||
assert(start.pItems.pItem.pDual !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
||||
}
|
84
spec/test_ast_token_positions.c
Normal file
84
spec/test_ast_token_positions.c
Normal file
@ -0,0 +1,84 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(0, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->pT2->pToken->position.row);
|
||||
assert_eq(1, start->pT2->pToken->position.col);
|
||||
assert_eq(0, start->pT2->pToken->end_position.row);
|
||||
assert_eq(2, start->pT2->pToken->end_position.col);
|
||||
assert_eq(0, start->pT2->position.row);
|
||||
assert_eq(1, start->pT2->position.col);
|
||||
assert_eq(0, start->pT2->end_position.row);
|
||||
assert_eq(2, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(0, start->pT3->pToken->position.row);
|
||||
assert_eq(3, start->pT3->pToken->position.col);
|
||||
assert_eq(0, start->pT3->pToken->end_position.row);
|
||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
||||
assert_eq(0, start->pT3->position.row);
|
||||
assert_eq(3, start->pT3->position.col);
|
||||
assert_eq(0, start->pT3->end_position.row);
|
||||
assert_eq(5, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(0, start->end_position.row);
|
||||
assert_eq(5, start->end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(2, start->pT1->pToken->position.row);
|
||||
assert_eq(2, start->pT1->pToken->position.col);
|
||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
||||
assert_eq(3, start->pT1->pToken->end_position.col);
|
||||
assert_eq(2, start->pT1->position.row);
|
||||
assert_eq(2, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(3, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(3, start->pT2->pToken->position.row);
|
||||
assert_eq(0, start->pT2->pToken->position.col);
|
||||
assert_eq(4, start->pT2->pToken->end_position.row);
|
||||
assert_eq(1, start->pT2->pToken->end_position.col);
|
||||
assert_eq(3, start->pT2->position.row);
|
||||
assert_eq(0, start->pT2->position.col);
|
||||
assert_eq(4, start->pT2->end_position.row);
|
||||
assert_eq(1, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(6, start->pT3->pToken->position.row);
|
||||
assert_eq(5, start->pT3->pToken->position.col);
|
||||
assert_eq(6, start->pT3->pToken->end_position.row);
|
||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
||||
assert_eq(6, start->pT3->position.row);
|
||||
assert_eq(5, start->pT3->position.col);
|
||||
assert_eq(6, start->pT3->end_position.row);
|
||||
assert_eq(5, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(2, start->position.row);
|
||||
assert_eq(2, start->position.col);
|
||||
assert_eq(6, start->end_position.row);
|
||||
assert_eq(5, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
86
spec/test_ast_token_positions.d
Normal file
86
spec/test_ast_token_positions.d
Normal file
@ -0,0 +1,86 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(0, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.pT2.pToken.position.row);
|
||||
assert_eq(1, start.pT2.pToken.position.col);
|
||||
assert_eq(0, start.pT2.pToken.end_position.row);
|
||||
assert_eq(2, start.pT2.pToken.end_position.col);
|
||||
assert_eq(0, start.pT2.position.row);
|
||||
assert_eq(1, start.pT2.position.col);
|
||||
assert_eq(0, start.pT2.end_position.row);
|
||||
assert_eq(2, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(0, start.pT3.pToken.position.row);
|
||||
assert_eq(3, start.pT3.pToken.position.col);
|
||||
assert_eq(0, start.pT3.pToken.end_position.row);
|
||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
||||
assert_eq(0, start.pT3.position.row);
|
||||
assert_eq(3, start.pT3.position.col);
|
||||
assert_eq(0, start.pT3.end_position.row);
|
||||
assert_eq(5, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(0, start.end_position.row);
|
||||
assert_eq(5, start.end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(2, start.pT1.pToken.position.row);
|
||||
assert_eq(2, start.pT1.pToken.position.col);
|
||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
||||
assert_eq(3, start.pT1.pToken.end_position.col);
|
||||
assert_eq(2, start.pT1.position.row);
|
||||
assert_eq(2, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(3, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(3, start.pT2.pToken.position.row);
|
||||
assert_eq(0, start.pT2.pToken.position.col);
|
||||
assert_eq(4, start.pT2.pToken.end_position.row);
|
||||
assert_eq(1, start.pT2.pToken.end_position.col);
|
||||
assert_eq(3, start.pT2.position.row);
|
||||
assert_eq(0, start.pT2.position.col);
|
||||
assert_eq(4, start.pT2.end_position.row);
|
||||
assert_eq(1, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(6, start.pT3.pToken.position.row);
|
||||
assert_eq(5, start.pT3.pToken.position.col);
|
||||
assert_eq(6, start.pT3.pToken.end_position.row);
|
||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
||||
assert_eq(6, start.pT3.position.row);
|
||||
assert_eq(5, start.pT3.position.col);
|
||||
assert_eq(6, start.pT3.end_position.row);
|
||||
assert_eq(5, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(2, start.position.row);
|
||||
assert_eq(2, start.position.col);
|
||||
assert_eq(6, start.end_position.row);
|
||||
assert_eq(5, start.end_position.col);
|
||||
}
|
29
spec/test_basic_math_grammar.c
Normal file
29
spec/test_basic_math_grammar.c
Normal file
@ -0,0 +1,29 @@
|
||||
#include "testparser.h"
|
||||
#include "testutils.h"
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "1 + 2 * 3 + 4";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(11, p_result(&context));
|
||||
|
||||
input = "1 * 2 ** 4 * 3";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(48, p_result(&context));
|
||||
|
||||
input = "(1 + 2) * 3 + 4";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(13, p_result(&context));
|
||||
|
||||
input = "(2 * 2) ** 3 + 4 + 5";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(73, p_result(&context));
|
||||
|
||||
return 0;
|
||||
}
|
42
spec/test_error_positions.c
Normal file
42
spec/test_error_positions.c
Normal file
@ -0,0 +1,42 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a 42";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "a\n123\na a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 2);
|
||||
assert(p_position(&context).col == 3);
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 0);
|
||||
assert(p_position(&context).col == 0);
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(&context).row == 2);
|
||||
assert(p_position(&context).col == 1);
|
||||
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
assert(p_position(&context).row == 5);
|
||||
assert(p_position(&context).col == 4);
|
||||
|
||||
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
||||
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
||||
|
||||
return 0;
|
||||
}
|
@ -17,13 +17,13 @@ unittest
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(2, 3));
|
||||
assert(context.token == TOKEN_a);
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(0, 0));
|
||||
assert(context.token == TOKEN_num);
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, input);
|
||||
@ -33,6 +33,8 @@ unittest
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
writeln(p_position(&context));
|
||||
assert(p_position(&context) == p_position_t(5, 4));
|
||||
|
||||
assert(p_token_names[TOKEN_a] == "a");
|
||||
assert(p_token_names[TOKEN_num] == "num");
|
||||
}
|
||||
|
13
spec/test_field_aliases.c
Normal file
13
spec/test_field_aliases.c
Normal file
@ -0,0 +1,13 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
return 0;
|
||||
}
|
15
spec/test_field_aliases.d
Normal file
15
spec/test_field_aliases.d
Normal file
@ -0,0 +1,15 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
110
spec/test_lexer.c
Normal file
110
spec/test_lexer.c
Normal file
@ -0,0 +1,110 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
size_t result;
|
||||
p_code_point_t code_point;
|
||||
uint8_t code_point_length;
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"5", 1u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == '5');
|
||||
assert(code_point_length == 1u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"", 0u, &code_point, &code_point_length);
|
||||
assert(result == P_EOF);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xC2\xA9", 2u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0xA9u);
|
||||
assert(code_point_length == 2u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\xa7\xa1", 4u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0x1F9E1u);
|
||||
assert(code_point_length == 4u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\x27", 3u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\xa7\xFF", 4u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xfe", 1u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
|
||||
p_token_info_t token_info;
|
||||
char const * input = "5 + 4 * \n677 + 567";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 0u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 2u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 2u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 4u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 4u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 6u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 6u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_times);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 2u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 4u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 4u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 6u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 8u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 9u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 9u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
|
||||
p_context_init(&context, (uint8_t const *)"", 0u);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 0u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
|
||||
return 0;
|
||||
}
|
@ -47,23 +47,23 @@ unittest
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), 1, TOKEN_int));
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 2), 1, TOKEN_plus));
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 2), p_position_t(0, 2), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 4), 1, TOKEN_int));
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 4), p_position_t(0, 4), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 6), 1, TOKEN_times));
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 6), p_position_t(0, 6), 1, TOKEN_times));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 0), 3, TOKEN_int));
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 0), p_position_t(1, 2), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 4), 1, TOKEN_plus));
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 4), p_position_t(1, 4), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 6), 3, TOKEN_int));
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 6), p_position_t(1, 8), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 9), 0, TOKEN___EOF));
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 9), p_position_t(1, 9), 0, TOKEN___EOF));
|
||||
|
||||
p_context_init(&context, "");
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), 0, TOKEN___EOF));
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 0, TOKEN___EOF));
|
||||
}
|
||||
|
15
spec/test_lexer_match_text.c
Normal file
15
spec/test_lexer_match_text.c
Normal file
@ -0,0 +1,15 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "identifier_123";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
return 0;
|
||||
}
|
20
spec/test_lexer_modes.c
Normal file
20
spec/test_lexer_modes.c
Normal file
@ -0,0 +1,20 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abc \"a string\" def";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "abc \"abc def\" def";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
19
spec/test_lexer_result_value.c
Normal file
19
spec/test_lexer_result_value.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
|
||||
input = "fabulous";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 8u);
|
||||
|
||||
return 0;
|
||||
}
|
18
spec/test_lexer_unknown_character.c
Normal file
18
spec/test_lexer_unknown_character.c
Normal file
@ -0,0 +1,18 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
|
||||
input = "123";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 123u);
|
||||
|
||||
return 0;
|
||||
}
|
13
spec/test_match_backslashes.c
Normal file
13
spec/test_match_backslashes.c
Normal file
@ -0,0 +1,13 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
15
spec/test_match_backslashes.d
Normal file
15
spec/test_match_backslashes.d
Normal file
@ -0,0 +1,15 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
19
spec/test_multiple_parsers.c
Normal file
19
spec/test_multiple_parsers.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "testparsermyp1.h"
|
||||
#include "testparsermyp2.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input1 = "a\n1";
|
||||
myp1_context_t context1;
|
||||
myp1_context_init(&context1, (uint8_t const *)input1, strlen(input1));
|
||||
assert(myp1_parse(&context1) == MYP1_SUCCESS);
|
||||
|
||||
char const * input2 = "bcb";
|
||||
myp2_context_t context2;
|
||||
myp2_context_init(&context2, (uint8_t const *)input2, strlen(input2));
|
||||
assert(myp2_parse(&context2) == MYP2_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
22
spec/test_optional_rule_component.c
Normal file
22
spec/test_optional_rule_component.c
Normal file
@ -0,0 +1,22 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
23
spec/test_optional_rule_component.d
Normal file
23
spec/test_optional_rule_component.d
Normal file
@ -0,0 +1,23 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
42
spec/test_optional_rule_component_ast.c
Normal file
42
spec/test_optional_rule_component_ast.c
Normal file
@ -0,0 +1,42 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert_eq(TOKEN_b, start->pToken2->token);
|
||||
assert(start->pR3 == NULL);
|
||||
assert(start->pR == NULL);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, start->pToken1->token);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR3 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert(start->pR == start->pR3);
|
||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
43
spec/test_optional_rule_component_ast.d
Normal file
43
spec/test_optional_rule_component_ast.d
Normal file
@ -0,0 +1,43 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert_eq(TOKEN_b, start.pToken2.token);
|
||||
assert(start.pR3 is null);
|
||||
assert(start.pR is null);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 != null);
|
||||
assert_eq(TOKEN_a, start.pToken1.token);
|
||||
assert(start.pToken2 != null);
|
||||
assert(start.pR3 != null);
|
||||
assert(start.pR != null);
|
||||
assert(start.pR == start.pR3);
|
||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert(start.pR !is null);
|
||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||
}
|
17
spec/test_parser_identical_rules_lookahead.c
Normal file
17
spec/test_parser_identical_rules_lookahead.c
Normal file
@ -0,0 +1,17 @@
|
||||
#include "testparser.h"
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "aba";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
24
spec/test_parser_rule_from_multiple_states.c
Normal file
24
spec/test_parser_rule_from_multiple_states.c
Normal file
@ -0,0 +1,24 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 0);
|
||||
assert(p_position(&context).col == 1);
|
||||
assert(context.token == TOKEN___EOF);
|
||||
|
||||
input = "a b";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "bb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
13
spec/test_parser_rule_user_code.c
Normal file
13
spec/test_parser_rule_user_code.c
Normal file
@ -0,0 +1,13 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "ab";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
56
spec/test_parsing_json.c
Normal file
56
spec/test_parsing_json.c
Normal file
@ -0,0 +1,56 @@
|
||||
#include "testparser.h"
|
||||
#include "json_types.h"
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "{}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_OBJECT);
|
||||
|
||||
input = "[]";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_ARRAY);
|
||||
|
||||
input = "-45.6";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == -45.6);
|
||||
|
||||
input = "2E-2";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == 0.02);
|
||||
|
||||
input = "{\"hi\":true}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
JSONValue * o = p_result(&context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(1, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "hi") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_TRUE);
|
||||
|
||||
input = "{\"ff\": false, \"nn\": null}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
o = p_result(&context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(2, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "ff") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_FALSE);
|
||||
assert(strcmp(o->object.entries[1].name, "nn") == 0);
|
||||
assert(o->object.entries[1].value->id == JSON_NULL);
|
||||
|
||||
return 0;
|
||||
}
|
24
spec/test_parsing_lists.c
Normal file
24
spec/test_parsing_lists.c
Normal file
@ -0,0 +1,24 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 0u);
|
||||
|
||||
input = "aaaaaaaaaaaaaaaa";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 16u);
|
||||
|
||||
return 0;
|
||||
}
|
20
spec/test_pattern.c
Normal file
20
spec/test_pattern.c
Normal file
@ -0,0 +1,20 @@
|
||||
#include "testparser.h"
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "defabcdef";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
13
spec/test_return_token_from_pattern.c
Normal file
13
spec/test_return_token_from_pattern.c
Normal file
@ -0,0 +1,13 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "defghidef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
9
spec/test_start_rule.c
Normal file
9
spec/test_start_rule.c
Normal file
@ -0,0 +1,9 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
8
spec/test_start_rule.d
Normal file
8
spec/test_start_rule.d
Normal file
@ -0,0 +1,8 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
17
spec/test_start_rule_ast.c
Normal file
17
spec/test_start_rule_ast.c
Normal file
@ -0,0 +1,17 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "hi";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Top * top = p_result(&context);
|
||||
assert(top->pToken != NULL);
|
||||
assert_eq(TOKEN_hi, top->pToken->token);
|
||||
|
||||
return 0;
|
||||
}
|
19
spec/test_start_rule_ast.d
Normal file
19
spec/test_start_rule_ast.d
Normal file
@ -0,0 +1,19 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "hi";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Top * top = p_result(&context);
|
||||
assert(top.pToken !is null);
|
||||
assert_eq(TOKEN_hi, top.pToken.token);
|
||||
}
|
20
spec/test_user_code.c
Normal file
20
spec/test_user_code.c
Normal file
@ -0,0 +1,20 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "abcabcdef";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
19
spec/test_user_terminate.c
Normal file
19
spec/test_user_terminate.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "aacc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
||||
assert(p_user_terminate_code(&context) == 4200);
|
||||
|
||||
return 0;
|
||||
}
|
20
spec/test_user_terminate.d
Normal file
20
spec/test_user_terminate.d
Normal file
@ -0,0 +1,20 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "aacc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
||||
assert(p_user_terminate_code(&context) == 4200);
|
||||
}
|
19
spec/test_user_terminate_lexer.c
Normal file
19
spec/test_user_terminate_lexer.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "b";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
||||
assert(p_user_terminate_code(&context) == 8675309);
|
||||
|
||||
return 0;
|
||||
}
|
20
spec/test_user_terminate_lexer.d
Normal file
20
spec/test_user_terminate_lexer.d
Normal file
@ -0,0 +1,20 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "b";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
||||
assert(p_user_terminate_code(&context) == 8675309);
|
||||
}
|
38
spec/testutils.c
Normal file
38
spec/testutils.c
Normal file
@ -0,0 +1,38 @@
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line)
|
||||
{
|
||||
if (expected != actual)
|
||||
{
|
||||
fprintf(stderr, "%s:%lu: expected %lu, got %lu\n", file, line, expected, actual);
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
void str_init(str_t * str, char const * cs)
|
||||
{
|
||||
size_t length = strlen(cs);
|
||||
str->cs = malloc(length + 1u);
|
||||
strcpy(str->cs, cs);
|
||||
}
|
||||
|
||||
void str_append(str_t * str, char const * cs)
|
||||
{
|
||||
size_t length = strlen(str->cs);
|
||||
size_t length2 = strlen(cs);
|
||||
char * new_cs = malloc(length + length2 + 1u);
|
||||
memcpy(new_cs, str->cs, length);
|
||||
strcpy(&new_cs[length], cs);
|
||||
free(str->cs);
|
||||
str->cs = new_cs;
|
||||
}
|
||||
|
||||
void str_free(str_t * str)
|
||||
{
|
||||
free(str->cs);
|
||||
}
|
19
spec/testutils.h
Normal file
19
spec/testutils.h
Normal file
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line);
|
||||
|
||||
#define assert_eq(expected, actual) \
|
||||
assert_eq_size_t_i(expected, actual, __FILE__, __LINE__)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
char * cs;
|
||||
} str_t;
|
||||
|
||||
void str_init(str_t * str, char const * cs);
|
||||
void str_append(str_t * str, char const * cs);
|
||||
void str_free(str_t * str);
|
||||
static inline char * str_cstr(str_t * str)
|
||||
{
|
||||
return str->cs;
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user