Compare commits
82 Commits
a7348be95d
...
e2db9c95c5
| Author | SHA1 | Date | |
|---|---|---|---|
| e2db9c95c5 | |||
| e992e6344a | |||
| a05a55ceb8 | |||
| e9ecf33f58 | |||
| dd687d0299 | |||
| d440d0442d | |||
| f9b4563f94 | |||
| 334990840e | |||
| f584389a29 | |||
| de3fb0d120 | |||
| d4ad67c23d | |||
| ff61dd05d9 | |||
| 6fd5186159 | |||
| 9f2fe6f84b | |||
| 78adf86103 | |||
| f4bc719aed | |||
| 43c0f50874 | |||
| 243ee0f19f | |||
| 8d2efcc19b | |||
| 9c787f0e89 | |||
| ef6a0f9552 | |||
| 77ec7c9de4 | |||
| 6a87bb2d56 | |||
| cb426b4be1 | |||
| addf27d837 | |||
| 9c03d20083 | |||
| e223d03d7c | |||
| be6a9ca2c1 | |||
| 072af73b1e | |||
| ac8ed4bf5a | |||
| 8a393f554a | |||
| 66f95cb6d8 | |||
| dbe0bf8ad0 | |||
| 75fb627602 | |||
| 7ccb4c8730 | |||
| 962b7125ec | |||
| 2df27b04fe | |||
| 17f1454a4f | |||
| b371f4b404 | |||
| 91f476187b | |||
| 54a0629e60 | |||
| 69aa3097c3 | |||
| 5486e5f138 | |||
| 5b243507cf | |||
| 25d6e3bc34 | |||
| 035bb2fc60 | |||
| 125c149750 | |||
| 9ef80e61d4 | |||
| 5b94b03b04 | |||
| 9d686989ec | |||
| 87d892d0a3 | |||
| 4ec57fa48d | |||
| 8b38ea4261 | |||
| 92da10e483 | |||
| 839174a635 | |||
| 659de44c31 | |||
| 207201d589 | |||
| 89bc52fd80 | |||
| eb9d9026fc | |||
| 54bb3307cd | |||
| 5ebcbb2d6d | |||
| 61ebbb4f19 | |||
| 1b4ca59158 | |||
| b02c9205c0 | |||
| 7344554b5f | |||
| 77571a3449 | |||
| e098b7e445 | |||
| 3ea344a520 | |||
| 530878a796 | |||
| 1d468b6d3c | |||
| 98e10d3d14 | |||
| 36c74e439e | |||
| c24f323ff0 | |||
| fec2c28693 | |||
| 61339aeae9 | |||
| 95b3dc6550 | |||
| 74d94fef72 | |||
| 588c5e21c7 | |||
| 5f1c306273 | |||
| 343e8a7f9e | |||
| b3a134bf8d | |||
| 4a71dc74fb |
38
.github/workflows/run-tests.yml
vendored
Normal file
38
.github/workflows/run-tests.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: Run Propane Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
ruby-version: ['3.4']
|
||||
|
||||
steps:
|
||||
- name: Install dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get update && sudo apt-get install -y gcc gdc ldc
|
||||
|
||||
- name: Install dependencies (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: brew install gcc ldc
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: ${{ matrix.ruby-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bundle install
|
||||
|
||||
- name: Run tests
|
||||
run: rake all
|
||||
117
CHANGELOG.md
117
CHANGELOG.md
@ -1,8 +1,123 @@
|
||||
## v4.1.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add `p_context_delete()` and `p_tree_delete()` for D targets.
|
||||
|
||||
## v4.0.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add `context_user_fields` statement to allow custom context user fields.
|
||||
- Add `token_user_fields` statement to allow custom token user fields.
|
||||
- Add `on_token_node` statement to allow custom code when constructing token nodes.
|
||||
- Add `free_token_node` statement to allow custom code when freeing token nodes.
|
||||
- Add `p_context_delete()`.
|
||||
- Allow `drop` patterns to execute lexer user code blocks.
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- Replace `p_context_init()` with `p_context_new()` and `p_context_delete()`.
|
||||
- Renamed `p_free_tree()` to `p_tree_delete()`.
|
||||
- The `free_token_node` statement now takes a user code block instead of a
|
||||
function name parameter.
|
||||
|
||||
## v3.0.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add support for multiple starting rules (#38)
|
||||
- Add `p_free_tree()` functions to reclaim generated tree memory
|
||||
- Add `free_token_node` grammar statement to reclaim user-allocated memory stored in a Token tree node `pvalue` field
|
||||
- Add valgrind memory leak tests to unit tests
|
||||
- Fix build issues for C++ to officially support C++ target output
|
||||
|
||||
### Improvements
|
||||
|
||||
- Document `p_lex()` and `p_token_info_t` in user guide (#37)
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- Rename AST generation mode to tree generation mode (see [UPGRADING.md](UPGRADING.md))
|
||||
|
||||
## v2.3.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add \D, \S, \w, \W special character classes
|
||||
|
||||
### Improvements
|
||||
|
||||
- Include line numbers for pattern errors
|
||||
- Improve performance in a few places
|
||||
- Parallelize parser table generation on Linux hosts
|
||||
- Add github workflow to run unit tests
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix a couple clang warnings for C backend
|
||||
- Fix C backend not fully initializing pvalues when multiple ptypes are used with different sizes.
|
||||
- Fix some user guide examples
|
||||
|
||||
## v2.2.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix GC issue for D backend when AST is enabled (#36)
|
||||
|
||||
## v2.2.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Allow multiple lexer modes to be specified for a lexer pattern (#35)
|
||||
- Document p_decode_code_point() API function (#34)
|
||||
|
||||
## v2.1.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- Field aliases for AST node fields could alias incorrect field when multiple rule alternatives present for one rule set (#33)
|
||||
|
||||
## v2.1.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Report rule name and line number for conflicting AST node field positions errors (#32)
|
||||
|
||||
## v2.0.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Log conflicting rules on reduce/reduce conflict (#31)
|
||||
- Use 1-based row and column values for position values (#30)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix named optional rules (#29)
|
||||
|
||||
### Upgrading
|
||||
|
||||
- Adjust all uses of p_position_t row and col values to expect 1-based instead
|
||||
of 0-based values.
|
||||
|
||||
## v1.5.1
|
||||
|
||||
### Improvements
|
||||
|
||||
- Improve performance (#28)
|
||||
|
||||
## v1.5.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Track token position in AST Token node
|
||||
- Track start and end text positions for tokens and rules in AST node structures (#27)
|
||||
- Add warnings for shift/reduce conflicts to log file (#25)
|
||||
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
|
||||
- Add rule field aliases (#24)
|
||||
|
||||
### Improvements
|
||||
|
||||
- Show line numbers of rules on conflict (#23)
|
||||
|
||||
## v1.4.0
|
||||
|
||||
|
||||
1
Gemfile
1
Gemfile
@ -1,5 +1,6 @@
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "base64"
|
||||
gem "rake"
|
||||
gem "rspec"
|
||||
gem "rdoc"
|
||||
|
||||
46
Gemfile.lock
46
Gemfile.lock
@ -1,40 +1,48 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
diff-lcs (1.5.0)
|
||||
docile (1.4.0)
|
||||
psych (5.1.0)
|
||||
base64 (0.3.0)
|
||||
date (3.5.1)
|
||||
diff-lcs (1.6.2)
|
||||
docile (1.4.1)
|
||||
erb (6.0.1)
|
||||
psych (5.3.1)
|
||||
date
|
||||
stringio
|
||||
rake (13.0.6)
|
||||
rdoc (6.5.0)
|
||||
rake (13.3.1)
|
||||
rdoc (7.2.0)
|
||||
erb
|
||||
psych (>= 4.0.0)
|
||||
redcarpet (3.6.0)
|
||||
rspec (3.12.0)
|
||||
rspec-core (~> 3.12.0)
|
||||
rspec-expectations (~> 3.12.0)
|
||||
rspec-mocks (~> 3.12.0)
|
||||
rspec-core (3.12.2)
|
||||
rspec-support (~> 3.12.0)
|
||||
rspec-expectations (3.12.3)
|
||||
tsort
|
||||
redcarpet (3.6.1)
|
||||
rspec (3.13.2)
|
||||
rspec-core (~> 3.13.0)
|
||||
rspec-expectations (~> 3.13.0)
|
||||
rspec-mocks (~> 3.13.0)
|
||||
rspec-core (3.13.6)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-expectations (3.13.5)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.12.0)
|
||||
rspec-mocks (3.12.6)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-mocks (3.13.7)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.12.0)
|
||||
rspec-support (3.12.1)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-support (3.13.7)
|
||||
simplecov (0.22.0)
|
||||
docile (~> 1.1)
|
||||
simplecov-html (~> 0.11)
|
||||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.12.3)
|
||||
simplecov-html (0.13.2)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
stringio (3.0.7)
|
||||
stringio (3.2.0)
|
||||
syntax (1.2.2)
|
||||
tsort (0.2.0)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
base64
|
||||
rake
|
||||
rdoc
|
||||
redcarpet
|
||||
|
||||
17
README.md
17
README.md
@ -6,8 +6,10 @@ Propane is a LALR Parser Generator (LPG) which:
|
||||
* generates a built-in lexer to tokenize input
|
||||
* supports UTF-8 lexer inputs
|
||||
* generates a table-driven shift/reduce parser to parse input in linear time
|
||||
* targets C or D language outputs
|
||||
* optionally supports automatic full AST generation
|
||||
* targets C, C++, or D language outputs
|
||||
* optionally supports automatic full parse tree generation
|
||||
* supports starting parsing from multiple start rules
|
||||
* tracks input text start and end positions for all matched tokens/rules
|
||||
* is MIT-licensed
|
||||
* is distributable as a standalone Ruby script
|
||||
|
||||
@ -31,9 +33,14 @@ Propane is typically invoked from the command-line as `./propane`.
|
||||
|
||||
Usage: ./propane [options] <input-file> <output-file>
|
||||
Options:
|
||||
--log LOG Write log file
|
||||
--version Show program version and exit
|
||||
-h, --help Show this usage and exit
|
||||
-h, --help Show this usage and exit.
|
||||
--log LOG Write log file. This will show all parser states and their
|
||||
associated shifts and reduces. It can be helpful when
|
||||
debugging a grammar.
|
||||
--version Show program version and exit.
|
||||
-w Treat warnings as errors. This option will treat shift/reduce
|
||||
conflicts as fatal errors and will print them to stderr in
|
||||
addition to the log file.
|
||||
|
||||
The user must specify the path to a Propane input grammar file and a path to an
|
||||
output file.
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
require "rake/clean"
|
||||
require "rspec/core/rake_task"
|
||||
require "simplecov"
|
||||
require "stringio"
|
||||
|
||||
CLEAN.include %w[spec/run gen .yardoc yard coverage dist]
|
||||
|
||||
@ -12,6 +14,18 @@ RSpec::Core::RakeTask.new(:spec, :example_pattern) do |task, args|
|
||||
task.rspec_opts = %W[-e "#{args.example_pattern}" -f documentation]
|
||||
end
|
||||
end
|
||||
task :spec do |task, args|
|
||||
unless ENV["dist_specs"]
|
||||
original_stdout = $stdout
|
||||
sio = StringIO.new
|
||||
$stdout = sio
|
||||
SimpleCov.collate Dir["coverage/.resultset.json"]
|
||||
$stdout = original_stdout
|
||||
sio.string.lines.each do |line|
|
||||
$stdout.write(line) unless line =~ /Coverage report generated for/
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# dspec task is useful to test the distributable release script, but is not
|
||||
# useful for coverage information.
|
||||
21
UPGRADING.md
Normal file
21
UPGRADING.md
Normal file
@ -0,0 +1,21 @@
|
||||
## v4.0.0
|
||||
|
||||
### API Changes
|
||||
|
||||
- Replace any calls to `p_context_init()` with `p_context_new()`.
|
||||
- Replace any references to the address of a statically allocated context
|
||||
structure with the pointer returned from `p_context_init()` (e.g. `&context`
|
||||
-> `context`).
|
||||
- Add a call to `p_context_delete()` (for C or C++) after lexing/parsing to
|
||||
reclaim context memory.
|
||||
- Rename `p_free_tree()` calls to `p_tree_delete()`.
|
||||
- Change `free_token_node` statement calls from taking a function name argument
|
||||
to taking a user code block.
|
||||
|
||||
## v3.0.0
|
||||
|
||||
### Grammar Changes
|
||||
|
||||
- Rename `ast;` statement to `tree;`.
|
||||
- Rename `ast_prefix;` statement to `tree_prefix;`.
|
||||
- Rename `ast_suffix;` statement to `tree_suffix;`.
|
||||
@ -43,27 +43,52 @@ const char * <%= @grammar.prefix %>token_names[] = {
|
||||
*************************************************************************/
|
||||
|
||||
/**
|
||||
* Initialize lexer/parser context structure.
|
||||
* Allocate and initialize lexer/parser context structure.
|
||||
*
|
||||
* Deinitialize and deallocate with <%= @grammar.prefix %>context_delete().
|
||||
*
|
||||
* @param[out] context
|
||||
* Lexer/parser context structure.
|
||||
* @param input
|
||||
* Text input.
|
||||
* @param input_length
|
||||
* Text input length.
|
||||
*
|
||||
* @return Context structure for lexer/parser.
|
||||
*/
|
||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length)
|
||||
<%= @grammar.prefix %>context_t * <%= @grammar.prefix %>context_new(uint8_t const * input, size_t input_length)
|
||||
{
|
||||
/* New default-initialized context structure. */
|
||||
<%= @grammar.prefix %>context_t newcontext = {0};
|
||||
<% if @cpp %>
|
||||
<%= @grammar.prefix %>context_t * context = new <%= @grammar.prefix %>context_t();
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>context_t * context = (<%= @grammar.prefix %>context_t *)calloc(1, sizeof(<%= @grammar.prefix %>context_t));
|
||||
<% end %>
|
||||
|
||||
/* Lexer initialization. */
|
||||
newcontext.input = input;
|
||||
newcontext.input_length = input_length;
|
||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||
context->input = input;
|
||||
context->input_length = input_length;
|
||||
context->text_position.row = 1u;
|
||||
context->text_position.col = 1u;
|
||||
context->mode = <%= @lexer.mode_id("default") %>;
|
||||
|
||||
/* Copy to the user's context structure. */
|
||||
*context = newcontext;
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deinitialize and deallocate lexer/parser context structure.
|
||||
*
|
||||
* For C++, destructors will be called for any context user fields. However, if
|
||||
* pointers are used to store allocated resources, the user should free them
|
||||
* before calling this function.
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure allocated with <%= @grammar.prefix %>context_new().
|
||||
*/
|
||||
void <%= @grammar.prefix %>context_delete(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
<% if @cpp %>
|
||||
delete context;
|
||||
<% else %>
|
||||
free(context);
|
||||
<% end %>
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -342,8 +367,10 @@ static lexer_state_id_t check_lexer_transition(uint32_t current_state, uint32_t
|
||||
static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
lexer_match_info_t * out_match_info, size_t * out_unexpected_input_length)
|
||||
{
|
||||
lexer_match_info_t longest_match = {0};
|
||||
lexer_match_info_t attempt_match = {0};
|
||||
lexer_match_info_t longest_match;
|
||||
memset(&longest_match, 0, sizeof(longest_match));
|
||||
lexer_match_info_t attempt_match;
|
||||
memset(&attempt_match, 0, sizeof(attempt_match));
|
||||
*out_match_info = longest_match;
|
||||
uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
|
||||
for (;;)
|
||||
@ -357,6 +384,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
switch (result)
|
||||
{
|
||||
case P_SUCCESS:
|
||||
{
|
||||
lexer_state_id_t transition_state = check_lexer_transition(current_state, code_point);
|
||||
if (transition_state != INVALID_LEXER_STATE_ID)
|
||||
{
|
||||
@ -365,7 +393,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
if (code_point == '\n')
|
||||
{
|
||||
attempt_match.delta_position.row++;
|
||||
attempt_match.delta_position.col = 0u;
|
||||
attempt_match.delta_position.col = 1u;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -388,6 +416,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
*out_unexpected_input_length = attempt_match.length + code_point_length;
|
||||
return P_UNEXPECTED_INPUT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case P_EOF:
|
||||
@ -445,7 +474,8 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
*/
|
||||
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||
{
|
||||
<%= @grammar.prefix %>token_info_t token_info = {0};
|
||||
<%= @grammar.prefix %>token_info_t token_info;
|
||||
memset(&token_info, 0, sizeof(token_info));
|
||||
token_info.position = context->text_position;
|
||||
token_info.token = INVALID_TOKEN_ID;
|
||||
lexer_match_info_t match_info;
|
||||
@ -454,6 +484,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
||||
switch (result)
|
||||
{
|
||||
case P_SUCCESS:
|
||||
{
|
||||
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state->token;
|
||||
if (match_info.accepting_state->code_id != INVALID_USER_CODE_ID)
|
||||
{
|
||||
@ -505,6 +536,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
||||
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
||||
}
|
||||
*out_token_info = token_info;
|
||||
}
|
||||
return P_SUCCESS;
|
||||
|
||||
case P_EOF:
|
||||
@ -567,7 +599,7 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
|
||||
*************************************************************************/
|
||||
|
||||
/** Invalid position value. */
|
||||
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0xFFFFFFFFu, 0xFFFFFFFFu}
|
||||
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0u, 0u}
|
||||
|
||||
/** Reduce ID type. */
|
||||
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
|
||||
@ -628,7 +660,7 @@ typedef struct
|
||||
* reduce action.
|
||||
*/
|
||||
parser_state_id_t n_states;
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
|
||||
/**
|
||||
* Map of rule components to rule set child fields.
|
||||
@ -636,7 +668,7 @@ typedef struct
|
||||
uint16_t const * rule_set_node_field_index_map;
|
||||
|
||||
/**
|
||||
* Number of rule set AST node fields.
|
||||
* Number of rule set tree node fields.
|
||||
*/
|
||||
uint16_t rule_set_node_field_array_size;
|
||||
|
||||
@ -678,19 +710,23 @@ typedef struct
|
||||
/** Parser value from this state. */
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node. */
|
||||
void * ast_node;
|
||||
<% if @grammar.tree %>
|
||||
/** tree node. */
|
||||
void * tree_node;
|
||||
<% end %>
|
||||
} state_value_t;
|
||||
|
||||
/** Common AST node structure. */
|
||||
typedef struct
|
||||
<% if @grammar.tree %>
|
||||
/** Common tree node structure. */
|
||||
typedef struct TreeNode_s
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
void * fields[];
|
||||
} ASTNode;
|
||||
uint16_t n_fields;
|
||||
uint8_t is_token;
|
||||
struct TreeNode_s * fields[];
|
||||
} TreeNode;
|
||||
<% end %>
|
||||
|
||||
/** Parser shift table. */
|
||||
static const shift_t parser_shift_table[] = {
|
||||
@ -699,7 +735,7 @@ static const shift_t parser_shift_table[] = {
|
||||
<% end %>
|
||||
};
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
<% @grammar.rules.each do |rule| %>
|
||||
<% unless rule.flat_rule_set_node_field_index_map? %>
|
||||
const uint16_t r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map[<%= rule.rule_set_node_field_index_map.size %>] = {<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>};
|
||||
@ -710,17 +746,22 @@ const uint16_t r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_
|
||||
/** Parser reduce table. */
|
||||
static const reduce_t parser_reduce_table[] = {
|
||||
<% @parser.reduce_table.each do |reduce| %>
|
||||
{<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
|
||||
<% if @grammar.ast %>
|
||||
{
|
||||
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
||||
<%= reduce[:rule_id] %>u, /* Rule ID */
|
||||
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
||||
<% if @grammar.tree %>
|
||||
<%= reduce[:n_states] %>u, /* Number of states */
|
||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
||||
, NULL
|
||||
NULL, /* No rule set node field index map (flat map) */
|
||||
<% else %>
|
||||
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
|
||||
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
||||
<% end %>
|
||||
, <%= reduce[:rule].rule_set.ast_fields.size %>
|
||||
, <%= reduce[:propagate_optional_target] %>
|
||||
<%= reduce[:rule].rule_set.tree_fields.size %>, /* Number of tree fields */
|
||||
<%= reduce[:propagate_optional_target] %>}, /* Propagate optional target? */
|
||||
<% else %>
|
||||
<%= reduce[:n_states] %>u},
|
||||
<% end %>
|
||||
},
|
||||
<% end %>
|
||||
};
|
||||
|
||||
@ -790,7 +831,7 @@ static void state_values_stack_push(state_values_stack_t * stack)
|
||||
if (current_length >= current_capacity)
|
||||
{
|
||||
size_t const new_capacity = current_capacity * 2u;
|
||||
state_value_t * new_entries = malloc(new_capacity * sizeof(state_value_t));
|
||||
state_value_t * new_entries = (state_value_t *)malloc(new_capacity * sizeof(state_value_t));
|
||||
memcpy(new_entries, stack->entries, current_length * sizeof(state_value_t));
|
||||
free(stack->entries);
|
||||
stack->capacity = new_capacity;
|
||||
@ -824,7 +865,7 @@ static void state_values_stack_free(state_values_stack_t * stack)
|
||||
free(stack->entries);
|
||||
}
|
||||
|
||||
<% unless @grammar.ast %>
|
||||
<% unless @grammar.tree %>
|
||||
/**
|
||||
* Execute user code associated with a parser rule.
|
||||
*
|
||||
@ -907,6 +948,8 @@ static size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token)
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure.
|
||||
* @start_state_id
|
||||
* ID of the state in which to start.
|
||||
*
|
||||
* @retval P_SUCCESS
|
||||
* The parser successfully matched the input text. The parse result value
|
||||
@ -919,25 +962,26 @@ static size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token)
|
||||
* @reval P_UNEXPECTED_INPUT
|
||||
* Input text does not match any lexer pattern.
|
||||
*/
|
||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
static size_t parse_from(<%= @grammar.prefix %>context_t * context, size_t start_state_id)
|
||||
{
|
||||
<%= @grammar.prefix %>token_info_t token_info;
|
||||
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
||||
state_values_stack_t statevalues;
|
||||
size_t reduced_rule_set = INVALID_ID;
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
void * reduced_parser_node;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value;
|
||||
<% end %>
|
||||
state_values_stack_init(&statevalues);
|
||||
state_values_stack_push(&statevalues);
|
||||
state_values_stack_index(&statevalues, -1)->state_id = start_state_id;
|
||||
size_t result;
|
||||
for (;;)
|
||||
{
|
||||
if (token == INVALID_TOKEN_ID)
|
||||
{
|
||||
size_t lexer_result = <%= @grammar.prefix %>lex(context, &token_info);
|
||||
size_t lexer_result = <%= lex_fn %>(context, &token_info);
|
||||
if (lexer_result != P_SUCCESS)
|
||||
{
|
||||
result = lexer_result;
|
||||
@ -956,8 +1000,8 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
||||
{
|
||||
/* Successful parse. */
|
||||
<% if @grammar.ast %>
|
||||
context->parse_result = (<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)state_values_stack_index(&statevalues, -1)->ast_node;
|
||||
<% if @grammar.tree %>
|
||||
context->parse_result = state_values_stack_index(&statevalues, -1)->tree_node;
|
||||
<% else %>
|
||||
context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue;
|
||||
<% end %>
|
||||
@ -973,13 +1017,20 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
if (reduced_rule_set == INVALID_ID)
|
||||
{
|
||||
/* We shifted a token, mark it consumed. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = malloc(sizeof(<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>));
|
||||
token_ast_node->position = token_info.position;
|
||||
token_ast_node->end_position = token_info.end_position;
|
||||
token_ast_node->token = token;
|
||||
token_ast_node->pvalue = token_info.pvalue;
|
||||
state_values_stack_index(&statevalues, -1)->ast_node = token_ast_node;
|
||||
<% if @grammar.tree %>
|
||||
<% if @cpp %>
|
||||
<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> * token_tree_node = new <%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>();
|
||||
<% else %>
|
||||
<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> * token_tree_node = (<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> *)malloc(sizeof(<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>));
|
||||
<% end %>
|
||||
token_tree_node->position = token_info.position;
|
||||
token_tree_node->end_position = token_info.end_position;
|
||||
token_tree_node->n_fields = 0u;
|
||||
token_tree_node->is_token = 1u;
|
||||
token_tree_node->token = token;
|
||||
token_tree_node->pvalue = token_info.pvalue;
|
||||
<%= expand_code(@grammar.on_token_node, false, nil, nil) %>
|
||||
state_values_stack_index(&statevalues, -1)->tree_node = token_tree_node;
|
||||
<% else %>
|
||||
state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue;
|
||||
<% end %>
|
||||
@ -988,11 +1039,12 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
else
|
||||
{
|
||||
/* We shifted a RuleSet. */
|
||||
<% if @grammar.ast %>
|
||||
state_values_stack_index(&statevalues, -1)->ast_node = reduced_parser_node;
|
||||
<% if @grammar.tree %>
|
||||
state_values_stack_index(&statevalues, -1)->tree_node = reduced_parser_node;
|
||||
<% else %>
|
||||
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
|
||||
<%= @grammar.prefix %>value_t new_parse_result = {0};
|
||||
<%= @grammar.prefix %>value_t new_parse_result;
|
||||
memset(&new_parse_result, 0, sizeof(new_parse_result));
|
||||
reduced_parser_value = new_parse_result;
|
||||
<% end %>
|
||||
reduced_rule_set = INVALID_ID;
|
||||
@ -1004,39 +1056,38 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
if (reduce_index != INVALID_ID)
|
||||
{
|
||||
/* We have something to reduce. */
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
if (parser_reduce_table[reduce_index].propagate_optional_target)
|
||||
{
|
||||
reduced_parser_node = state_values_stack_index(&statevalues, -1)->ast_node;
|
||||
reduced_parser_node = state_values_stack_index(&statevalues, -1)->tree_node;
|
||||
}
|
||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
||||
{
|
||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
||||
ASTNode * node = (ASTNode *)malloc(sizeof(ASTNode) + n_fields * sizeof(void *));
|
||||
size_t bytes = sizeof(TreeNode) + n_fields * sizeof(void *);
|
||||
TreeNode * node = (TreeNode *)malloc(bytes);
|
||||
memset(node, 0, bytes);
|
||||
node->position = INVALID_POSITION;
|
||||
node->end_position = INVALID_POSITION;
|
||||
for (size_t i = 0; i < n_fields; i++)
|
||||
{
|
||||
node->fields[i] = NULL;
|
||||
}
|
||||
node->n_fields = n_fields;
|
||||
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map == NULL)
|
||||
{
|
||||
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
|
||||
{
|
||||
node->fields[i] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
|
||||
node->fields[i] = (TreeNode *)state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->tree_node;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
|
||||
{
|
||||
node->fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
|
||||
node->fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = (TreeNode *)state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->tree_node;
|
||||
}
|
||||
}
|
||||
bool position_found = false;
|
||||
for (size_t i = 0; i < n_fields; i++)
|
||||
{
|
||||
ASTNode * child = (ASTNode *)node->fields[i];
|
||||
TreeNode * child = node->fields[i];
|
||||
if ((child != NULL) && <%= @grammar.prefix %>position_valid(child->position))
|
||||
{
|
||||
if (!position_found)
|
||||
@ -1054,9 +1105,11 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
reduced_parser_node = NULL;
|
||||
}
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value2 = {0};
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value2;
|
||||
memset(&reduced_parser_value2, 0, sizeof(reduced_parser_value2));
|
||||
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
||||
{
|
||||
state_values_stack_free(&statevalues);
|
||||
return P_USER_TERMINATED;
|
||||
}
|
||||
reduced_parser_value = reduced_parser_value2;
|
||||
@ -1080,6 +1133,19 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return parse_from(context, 0u);
|
||||
}
|
||||
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
|
||||
size_t <%= @grammar.prefix %>parse_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return parse_from(context, <%= i %>u);
|
||||
}
|
||||
<% end %>
|
||||
|
||||
/**
|
||||
* Get the parse result value.
|
||||
*
|
||||
@ -1088,18 +1154,29 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
*
|
||||
* @return Parse result value.
|
||||
*/
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% if @grammar.tree %>
|
||||
<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return (<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> *) context->parse_result;
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return (<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> *) context->parse_result;
|
||||
}
|
||||
<% end %>
|
||||
<% else %>
|
||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% end %>
|
||||
{
|
||||
<% if @grammar.ast %>
|
||||
return context->parse_result;
|
||||
<% else %>
|
||||
return context->parse_result.v_<%= start_rule_type[0] %>;
|
||||
<% end %>
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
<%= start_rule_type(i)[1] %> <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return context->parse_result.v_<%= start_rule_type(i)[0] %>;
|
||||
}
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
/**
|
||||
* Get the current text input position.
|
||||
@ -1136,3 +1213,48 @@ size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t
|
||||
{
|
||||
return context->token;
|
||||
}
|
||||
<% if @grammar.tree %>
|
||||
|
||||
static void tree_delete(TreeNode * node)
|
||||
{
|
||||
if (node->is_token)
|
||||
{
|
||||
<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> * token_tree_node = (<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> *)node;
|
||||
<%= expand_code(@grammar.free_token_node, false, nil, nil) %>
|
||||
<% if @cpp %>
|
||||
delete token_tree_node;
|
||||
<% else %>
|
||||
free(token_tree_node);
|
||||
<% end %>
|
||||
}
|
||||
else if (node->n_fields > 0u)
|
||||
{
|
||||
for (size_t i = 0u; i < node->n_fields; i++)
|
||||
{
|
||||
if (node->fields[i] != NULL)
|
||||
{
|
||||
tree_delete(node->fields[i]);
|
||||
}
|
||||
}
|
||||
free(node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Free all tree node memory.
|
||||
*/
|
||||
void <%= @grammar.prefix %>tree_delete(<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * tree)
|
||||
{
|
||||
tree_delete((TreeNode *)tree);
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
|
||||
/**
|
||||
* Free all tree node memory.
|
||||
*/
|
||||
void <%= @grammar.prefix %>tree_delete_<%= start_rule %>(<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * tree)
|
||||
{
|
||||
tree_delete((TreeNode *)tree);
|
||||
}
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
@ -8,7 +8,8 @@
|
||||
module <%= @grammar.modulename %>;
|
||||
<% end %>
|
||||
|
||||
import core.stdc.stdlib : malloc;
|
||||
import core.memory;
|
||||
import core.stdc.stdlib : malloc, free;
|
||||
|
||||
/**************************************************************************
|
||||
* User code blocks
|
||||
@ -65,16 +66,16 @@ public struct <%= @grammar.prefix %>position_t
|
||||
uint col;
|
||||
|
||||
/** Invalid position value. */
|
||||
enum INVALID = <%= @grammar.prefix %>position_t(0xFFFF_FFFF, 0xFFFF_FFFF);
|
||||
enum INVALID = <%= @grammar.prefix %>position_t(0u, 0u);
|
||||
|
||||
/** Return whether the position is valid. */
|
||||
public @property bool valid()
|
||||
{
|
||||
return row != 0xFFFF_FFFFu;
|
||||
return row != 0u;
|
||||
}
|
||||
}
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
/** Parser values type. */
|
||||
public alias <%= @grammar.prefix %>value_t = <%= @grammar.ptype %>;
|
||||
<% else %>
|
||||
@ -87,33 +88,40 @@ public union <%= @grammar.prefix %>value_t
|
||||
}
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** Common AST node structure. */
|
||||
private struct ASTNode
|
||||
<% if @grammar.tree %>
|
||||
/** Common tree node structure. */
|
||||
private struct TreeNode
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
ushort n_fields;
|
||||
bool is_token;
|
||||
void *[0] fields;
|
||||
}
|
||||
|
||||
/** AST node types. @{ */
|
||||
public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
||||
/** Tree node types. @{ */
|
||||
public struct <%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>
|
||||
{
|
||||
/* ASTNode fields must be present in the same order here. */
|
||||
/* TreeNode fields must be present in the same order here. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
ushort n_fields;
|
||||
bool is_token;
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
<%= @grammar.token_user_fields %>
|
||||
}
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
public struct <%= @grammar.tree_prefix %><%= name %><%= @grammar.tree_suffix %>
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<% rule_set.ast_fields.each do |fields| %>
|
||||
ushort n_fields;
|
||||
bool is_token;
|
||||
<% rule_set.tree_fields.each do |fields| %>
|
||||
union
|
||||
{
|
||||
<% fields.each do |field_name, type| %>
|
||||
@ -171,8 +179,8 @@ public struct <%= @grammar.prefix %>context_t
|
||||
/* Parser context data. */
|
||||
|
||||
/** Parse result value. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
||||
<% if @grammar.tree %>
|
||||
void * parse_result;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t parse_result;
|
||||
<% end %>
|
||||
@ -182,6 +190,8 @@ public struct <%= @grammar.prefix %>context_t
|
||||
|
||||
/** User terminate code. */
|
||||
size_t user_terminate_code;
|
||||
|
||||
<%= @grammar.context_user_fields %>
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -221,24 +231,39 @@ private enum size_t INVALID_ID = cast(size_t)-1;
|
||||
*************************************************************************/
|
||||
|
||||
/**
|
||||
* Initialize lexer/parser context structure.
|
||||
* Allocate and initialize lexer/parser context structure.
|
||||
*
|
||||
* Deinitialize and deallocate with <%= @grammar.prefix %>context_delete().
|
||||
*
|
||||
* @param[out] context
|
||||
* Lexer/parser context structure.
|
||||
* @param input
|
||||
* Text input.
|
||||
* @param input_length
|
||||
* Text input length.
|
||||
*
|
||||
* @return Context structure for lexer/parser.
|
||||
*/
|
||||
public void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, string input)
|
||||
<%= @grammar.prefix %>context_t * <%= @grammar.prefix %>context_new(string input)
|
||||
{
|
||||
/* New default-initialized context structure. */
|
||||
<%= @grammar.prefix %>context_t newcontext;
|
||||
<%= @grammar.prefix %>context_t * context = new <%= @grammar.prefix %>context_t;
|
||||
|
||||
/* Lexer initialization. */
|
||||
newcontext.input = input;
|
||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||
context.input = input;
|
||||
context.text_position.row = 1u;
|
||||
context.text_position.col = 1u;
|
||||
context.mode = <%= @lexer.mode_id("default") %>;
|
||||
|
||||
/* Copy to the user's context structure. */
|
||||
*context = newcontext;
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deinitialize and deallocate lexer/parser context structure.
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure allocated with <%= @grammar.prefix %>context_new().
|
||||
*/
|
||||
void <%= @grammar.prefix %>context_delete(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -534,7 +559,7 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||
if (code_point == '\n')
|
||||
{
|
||||
attempt_match.delta_position.row++;
|
||||
attempt_match.delta_position.col = 0u;
|
||||
attempt_match.delta_position.col = 1u;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -794,7 +819,7 @@ private struct reduce_t
|
||||
* reduce action.
|
||||
*/
|
||||
parser_state_id_t n_states;
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
|
||||
/**
|
||||
* Map of rule components to rule set child fields.
|
||||
@ -802,7 +827,7 @@ private struct reduce_t
|
||||
immutable(ushort) * rule_set_node_field_index_map;
|
||||
|
||||
/**
|
||||
* Number of rule set AST node fields.
|
||||
* Number of rule set tree node fields.
|
||||
*/
|
||||
ushort rule_set_node_field_array_size;
|
||||
|
||||
@ -844,9 +869,9 @@ private struct state_value_t
|
||||
/** Parser value from this state. */
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node. */
|
||||
void * ast_node;
|
||||
<% if @grammar.tree %>
|
||||
/** Tree node. */
|
||||
void * tree_node;
|
||||
<% end %>
|
||||
|
||||
this(size_t state_id)
|
||||
@ -862,7 +887,7 @@ private immutable shift_t[] parser_shift_table = [
|
||||
<% end %>
|
||||
];
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
<% @grammar.rules.each do |rule| %>
|
||||
<% unless rule.flat_rule_set_node_field_index_map? %>
|
||||
immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map = [<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>];
|
||||
@ -873,17 +898,22 @@ immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name
|
||||
/** Parser reduce table. */
|
||||
private immutable reduce_t[] parser_reduce_table = [
|
||||
<% @parser.reduce_table.each do |reduce| %>
|
||||
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
|
||||
<% if @grammar.ast %>
|
||||
reduce_t(
|
||||
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
||||
<%= reduce[:rule_id] %>u, /* Rule ID */
|
||||
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
||||
<% if @grammar.tree %>
|
||||
<%= reduce[:n_states] %>u, /* Number of states */
|
||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
||||
, null
|
||||
null, /* No rule set node field index map (flat map) */
|
||||
<% else %>
|
||||
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
|
||||
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
||||
<% end %>
|
||||
, <%= reduce[:rule].rule_set.ast_fields.size %>
|
||||
, <%= reduce[:propagate_optional_target] %>
|
||||
<%= reduce[:rule].rule_set.tree_fields.size %>, /* Number of tree fields */
|
||||
<%= reduce[:propagate_optional_target] %>), /* Propagate optional target? */
|
||||
<% else %>
|
||||
<%= reduce[:n_states] %>u), /* Number of states */
|
||||
<% end %>
|
||||
),
|
||||
<% end %>
|
||||
];
|
||||
|
||||
@ -894,7 +924,7 @@ private immutable parser_state_t[] parser_state_table = [
|
||||
<% end %>
|
||||
];
|
||||
|
||||
<% unless @grammar.ast %>
|
||||
<% unless @grammar.tree %>
|
||||
/**
|
||||
* Execute user code associated with a parser rule.
|
||||
*
|
||||
@ -977,6 +1007,8 @@ private size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token
|
||||
*
|
||||
* @param context
|
||||
* Lexer/parser context structure.
|
||||
* @start_state_id
|
||||
* ID of the state in which to start.
|
||||
*
|
||||
* @retval P_SUCCESS
|
||||
* The parser successfully matched the input text. The parse result value
|
||||
@ -989,13 +1021,14 @@ private size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token
|
||||
* @reval P_UNEXPECTED_INPUT
|
||||
* Input text does not match any lexer pattern.
|
||||
*/
|
||||
public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
private size_t parse_from(<%= @grammar.prefix %>context_t * context, size_t start_state_id)
|
||||
{
|
||||
<%= @grammar.prefix %>token_info_t token_info;
|
||||
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
||||
state_value_t[] statevalues = new state_value_t[](1);
|
||||
statevalues[0].state_id = start_state_id;
|
||||
size_t reduced_rule_set = INVALID_ID;
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
void * reduced_parser_node;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t reduced_parser_value;
|
||||
@ -1004,7 +1037,7 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
{
|
||||
if (token == INVALID_TOKEN_ID)
|
||||
{
|
||||
size_t lexer_result = <%= @grammar.prefix %>lex(context, &token_info);
|
||||
size_t lexer_result = <%= lex_fn %>(context, &token_info);
|
||||
if (lexer_result != P_SUCCESS)
|
||||
{
|
||||
return lexer_result;
|
||||
@ -1022,8 +1055,8 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
||||
{
|
||||
/* Successful parse. */
|
||||
<% if @grammar.ast %>
|
||||
context.parse_result = cast(<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)statevalues[$-1].ast_node;
|
||||
<% if @grammar.tree %>
|
||||
context.parse_result = statevalues[$-1].tree_node;
|
||||
<% else %>
|
||||
context.parse_result = statevalues[$-1].pvalue;
|
||||
<% end %>
|
||||
@ -1037,9 +1070,10 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if (reduced_rule_set == INVALID_ID)
|
||||
{
|
||||
/* We shifted a token, mark it consumed. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token_info.position, token_info.end_position, token, token_info.pvalue);
|
||||
statevalues[$-1].ast_node = token_ast_node;
|
||||
<% if @grammar.tree %>
|
||||
<%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %> * token_tree_node = new <%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>(token_info.position, token_info.end_position, 0u, true, token, token_info.pvalue);
|
||||
<%= expand_code(@grammar.on_token_node, false, nil, nil) %>
|
||||
statevalues[$-1].tree_node = token_tree_node;
|
||||
<% else %>
|
||||
statevalues[$-1].pvalue = token_info.pvalue;
|
||||
<% end %>
|
||||
@ -1048,8 +1082,8 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
else
|
||||
{
|
||||
/* We shifted a RuleSet. */
|
||||
<% if @grammar.ast %>
|
||||
statevalues[$-1].ast_node = reduced_parser_node;
|
||||
<% if @grammar.tree %>
|
||||
statevalues[$-1].tree_node = reduced_parser_node;
|
||||
<% else %>
|
||||
statevalues[$-1].pvalue = reduced_parser_value;
|
||||
<%= @grammar.prefix %>value_t new_parse_result;
|
||||
@ -1064,17 +1098,21 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
if (reduce_index != INVALID_ID)
|
||||
{
|
||||
/* We have something to reduce. */
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
if (parser_reduce_table[reduce_index].propagate_optional_target)
|
||||
{
|
||||
reduced_parser_node = statevalues[$ - 1].ast_node;
|
||||
reduced_parser_node = statevalues[$ - 1].tree_node;
|
||||
}
|
||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
||||
{
|
||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
||||
ASTNode * node = cast(ASTNode *)malloc(ASTNode.sizeof + n_fields * (void *).sizeof);
|
||||
size_t node_size = TreeNode.sizeof + n_fields * (void *).sizeof;
|
||||
TreeNode * node = cast(TreeNode *)malloc(node_size);
|
||||
GC.addRange(node, node_size);
|
||||
node.position = <%= @grammar.prefix %>position_t.INVALID;
|
||||
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
|
||||
node.n_fields = cast(ushort)n_fields;
|
||||
node.is_token = false;
|
||||
foreach (i; 0..n_fields)
|
||||
{
|
||||
node.fields[i] = null;
|
||||
@ -1083,20 +1121,20 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
{
|
||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
||||
{
|
||||
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
||||
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].tree_node;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
||||
{
|
||||
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
||||
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].tree_node;
|
||||
}
|
||||
}
|
||||
bool position_found = false;
|
||||
foreach (i; 0..n_fields)
|
||||
{
|
||||
ASTNode * child = cast(ASTNode *)node.fields[i];
|
||||
TreeNode * child = cast(TreeNode *)node.fields[i];
|
||||
if (child && child.position.valid)
|
||||
{
|
||||
if (!position_found)
|
||||
@ -1137,6 +1175,19 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
}
|
||||
}
|
||||
|
||||
public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return parse_from(context, 0u);
|
||||
}
|
||||
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
|
||||
public size_t <%= @grammar.prefix %>parse_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return parse_from(context, <%= i %>u);
|
||||
}
|
||||
<% end %>
|
||||
|
||||
/**
|
||||
* Get the parse result value.
|
||||
*
|
||||
@ -1145,18 +1196,58 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
||||
*
|
||||
* @return Parse result value.
|
||||
*/
|
||||
<% if @grammar.ast %>
|
||||
public <%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% if @grammar.tree %>
|
||||
public <%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return cast(<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> *)context.parse_result;
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
public <%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return cast(<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> *)context.parse_result;
|
||||
}
|
||||
<% end %>
|
||||
<% else %>
|
||||
public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||
<% end %>
|
||||
{
|
||||
<% if @grammar.ast %>
|
||||
return context.parse_result;
|
||||
<% else %>
|
||||
return context.parse_result.v_<%= start_rule_type[0] %>;
|
||||
<% end %>
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
public <%= start_rule_type(i)[1] %> <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context)
|
||||
{
|
||||
return context.parse_result.v_<%= start_rule_type(i)[0] %>;
|
||||
}
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.tree %>
|
||||
private void tree_delete(TreeNode * node)
|
||||
{
|
||||
if (!node.is_token)
|
||||
{
|
||||
for (size_t i = 0u; i < node.n_fields; i++)
|
||||
{
|
||||
if (node.fields[i])
|
||||
{
|
||||
tree_delete(cast(TreeNode *)node.fields[i]);
|
||||
}
|
||||
}
|
||||
GC.removeRange(node);
|
||||
free(node);
|
||||
}
|
||||
}
|
||||
|
||||
void <%= @grammar.prefix %>tree_delete(<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * tree)
|
||||
{
|
||||
tree_delete(cast(TreeNode *)tree);
|
||||
}
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
void <%= @grammar.prefix %>tree_delete_<%= start_rule %>(<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * tree)
|
||||
{
|
||||
tree_delete(cast(TreeNode *)tree);
|
||||
}
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
/**
|
||||
* Get the current text input position.
|
||||
|
||||
@ -53,12 +53,12 @@ typedef struct
|
||||
} <%= @grammar.prefix %>position_t;
|
||||
|
||||
/** Return whether the position is valid. */
|
||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0xFFFFFFFFu)
|
||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0u)
|
||||
|
||||
/** User header code blocks. */
|
||||
<%= @grammar.code_blocks.fetch("header", "") %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<% if @grammar.tree %>
|
||||
/** Parser values type. */
|
||||
typedef <%= @grammar.ptype %> <%= @grammar.prefix %>value_t;
|
||||
<% else %>
|
||||
@ -71,16 +71,19 @@ typedef union
|
||||
} <%= @grammar.prefix %>value_t;
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node types. @{ */
|
||||
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
||||
<% if @grammar.tree %>
|
||||
/** Tree node types. @{ */
|
||||
typedef struct <%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>
|
||||
{
|
||||
/* ASTNode fields must be present in the same order here. */
|
||||
<% # TreeNode fields must be present in the same order here. # %>
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
uint16_t n_fields;
|
||||
uint8_t is_token;
|
||||
<%= @grammar.token_user_fields %>
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
|
||||
} <%= @grammar.tree_prefix %>Token<%= @grammar.tree_suffix %>;
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
@ -91,11 +94,14 @@ struct <%= name %>;
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
typedef struct <%= @grammar.tree_prefix %><%= name %><%= @grammar.tree_suffix %>
|
||||
{
|
||||
<% # TreeNode fields must be present in the same order here. # %>
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<% rule_set.ast_fields.each do |fields| %>
|
||||
uint16_t n_fields;
|
||||
uint8_t is_token;
|
||||
<% rule_set.tree_fields.each do |fields| %>
|
||||
union
|
||||
{
|
||||
<% fields.each do |field_name, type| %>
|
||||
@ -103,7 +109,7 @@ typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
<% end %>
|
||||
};
|
||||
<% end %>
|
||||
} <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>;
|
||||
} <%= @grammar.tree_prefix %><%= name %><%= @grammar.tree_suffix %>;
|
||||
|
||||
<% end %>
|
||||
/** @} */
|
||||
@ -156,8 +162,8 @@ typedef struct
|
||||
/* Parser context data. */
|
||||
|
||||
/** Parse result value. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
||||
<% if @grammar.tree %>
|
||||
void * parse_result;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t parse_result;
|
||||
<% end %>
|
||||
@ -167,6 +173,8 @@ typedef struct
|
||||
|
||||
/** User terminate code. */
|
||||
size_t user_terminate_code;
|
||||
|
||||
<%= @grammar.context_user_fields %>
|
||||
} <%= @grammar.prefix %>context_t;
|
||||
|
||||
/**************************************************************************
|
||||
@ -176,7 +184,9 @@ typedef struct
|
||||
/** Token names. */
|
||||
extern const char * <%= @grammar.prefix %>token_names[];
|
||||
|
||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
|
||||
<%= @grammar.prefix %>context_t * <%= @grammar.prefix %>context_new(uint8_t const * input, size_t input_length);
|
||||
|
||||
void <%= @grammar.prefix %>context_delete(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
|
||||
<%= @grammar.prefix %>code_point_t * out_code_point, uint8_t * out_code_point_length);
|
||||
@ -184,11 +194,27 @@ size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t inp
|
||||
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info);
|
||||
|
||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
size_t <%= @grammar.prefix %>parse_<%= start_rule %>(<%= @grammar.prefix %>context_t * context);
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% if @grammar.tree %>
|
||||
<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context);
|
||||
<% end %>
|
||||
<% else %>
|
||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
<%= start_rule_type(i)[1] %> <%= @grammar.prefix %>result_<%= start_rule %>(<%= @grammar.prefix %>context_t * context);
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.tree %>
|
||||
void <%= @grammar.prefix %>tree_delete(<%= @grammar.tree_prefix %><%= @grammar.start_rules[0] %><%= @grammar.tree_suffix %> * tree);
|
||||
<% @grammar.start_rules.each_with_index do |start_rule, i| %>
|
||||
void <%= @grammar.prefix %>tree_delete_<%= start_rule %>(<%= @grammar.tree_prefix %><%= start_rule %><%= @grammar.tree_suffix %> * tree);
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -17,12 +17,17 @@ syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contai
|
||||
|
||||
syn match propaneComment "#.*"
|
||||
syn match propaneOperator "->"
|
||||
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
||||
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
|
||||
syn match propaneFieldOperator ":" contained
|
||||
syn match propaneOperator "?"
|
||||
syn keyword propaneKeyword drop free_token_node free_token_user_fields module prefix ptype start token token_user_fields tokenid tree tree_prefix tree_suffix
|
||||
|
||||
syn region propaneRegex start="/" end="/" skip="\\/"
|
||||
syn region propaneRegex start="/" end="/" skip="\v\\\\|\\/"
|
||||
|
||||
hi def link propaneComment Comment
|
||||
hi def link propaneKeyword Keyword
|
||||
hi def link propaneRegex String
|
||||
hi def link propaneOperator Operator
|
||||
hi def link propaneFieldOperator Operator
|
||||
hi def link propaneDelimiter Delimiter
|
||||
hi def link propaneFieldAlias Identifier
|
||||
|
||||
@ -11,24 +11,33 @@ class Propane
|
||||
@log = StringIO.new
|
||||
end
|
||||
@language =
|
||||
if output_file =~ /\.([a-z]+)$/
|
||||
$1
|
||||
else
|
||||
if output_file.end_with?(".d")
|
||||
"d"
|
||||
elsif output_file.end_with?(".c")
|
||||
"c"
|
||||
elsif output_file =~ %r{\.(cc|cpp|cxx)$}
|
||||
@cpp = true
|
||||
"c"
|
||||
else
|
||||
raise Error.new("Could not determine target language from output file name (#{output_file})")
|
||||
end
|
||||
@options = options
|
||||
process_grammar!
|
||||
end
|
||||
|
||||
def generate
|
||||
extensions = [@language]
|
||||
extensions = [nil]
|
||||
if @language == "c"
|
||||
extensions += %w[h]
|
||||
end
|
||||
extensions.each do |extension|
|
||||
template = Assets.get("parser.#{extension}.erb")
|
||||
template = Assets.get("parser.#{extension || @language}.erb")
|
||||
if extension
|
||||
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
||||
else
|
||||
output_file = @output_file
|
||||
end
|
||||
erb = ERB.new(template, trim_mode: "<>")
|
||||
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
||||
result = erb.result(binding.clone)
|
||||
File.open(output_file, "wb") do |fh|
|
||||
fh.write(result)
|
||||
@ -43,8 +52,8 @@ class Propane
|
||||
# Assign default pattern mode to patterns without a mode assigned.
|
||||
found_default = false
|
||||
@grammar.patterns.each do |pattern|
|
||||
if pattern.mode.nil?
|
||||
pattern.mode = "default"
|
||||
if pattern.modes.empty?
|
||||
pattern.modes << "default"
|
||||
found_default = true
|
||||
end
|
||||
pattern.ptypename ||= "default"
|
||||
@ -67,12 +76,15 @@ class Propane
|
||||
end
|
||||
tokens_by_name[token.name] = token
|
||||
end
|
||||
# Check for user start rule.
|
||||
unless @grammar.rules.find {|rule| rule.name == @grammar.start_rule}
|
||||
raise Error.new("Start rule `#{@grammar.start_rule}` not found")
|
||||
# Create real start rule(s).
|
||||
real_start_rules = @grammar.start_rules.map do |start_rule|
|
||||
unless @grammar.rules.find {|rule| rule.name == start_rule}
|
||||
raise Error.new("Start rule `#{start_rule}` not found")
|
||||
end
|
||||
Rule.new("$#{start_rule}", [start_rule, "$EOF"], nil, nil, nil)
|
||||
end
|
||||
# Add "real" start rule.
|
||||
@grammar.rules.unshift(Rule.new("$Start", [@grammar.start_rule, "$EOF"], nil, nil, nil))
|
||||
# Add real start rules before user-given rules.
|
||||
@grammar.rules = real_start_rules + @grammar.rules
|
||||
# Generate and add rules for optional components.
|
||||
generate_optional_component_rules!(tokens_by_name)
|
||||
# Build rule sets.
|
||||
@ -258,6 +270,24 @@ class Propane
|
||||
"context.user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$\{context\.(\w+)\}/) do |match|
|
||||
fieldname = $1
|
||||
case @language
|
||||
when "c"
|
||||
"context->#{fieldname}"
|
||||
when "d"
|
||||
"context.#{fieldname}"
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$\{token\.(\w+)\}/) do |match|
|
||||
fieldname = $1
|
||||
case @language
|
||||
when "c"
|
||||
"token_tree_node->#{fieldname}"
|
||||
when "d"
|
||||
"token_tree_node.#{fieldname}"
|
||||
end
|
||||
end
|
||||
if parser
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
case @language
|
||||
@ -291,7 +321,7 @@ class Propane
|
||||
end
|
||||
else
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
if @grammar.ast
|
||||
if @grammar.tree
|
||||
case @language
|
||||
when "c"
|
||||
"out_token_info->pvalue"
|
||||
@ -324,13 +354,21 @@ class Propane
|
||||
code
|
||||
end
|
||||
|
||||
# Get the lex function to use.
|
||||
#
|
||||
# @return [String]
|
||||
# Lex function to use.
|
||||
def lex_fn
|
||||
@grammar.custom_lex_fn || "#{@grammar.prefix}lex"
|
||||
end
|
||||
|
||||
# Get the parser value type for the start rule.
|
||||
#
|
||||
# @return [Array<String>]
|
||||
# Start rule parser value type name and type string.
|
||||
def start_rule_type
|
||||
def start_rule_type(start_rule_index = 0)
|
||||
start_rule = @grammar.rules.find do |rule|
|
||||
rule.name == @grammar.start_rule
|
||||
rule.name == @grammar.start_rules[start_rule_index]
|
||||
end
|
||||
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
|
||||
end
|
||||
|
||||
@ -5,34 +5,44 @@ class Propane
|
||||
# Reserve identifiers beginning with a double-underscore for internal use.
|
||||
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
|
||||
|
||||
attr_reader :ast
|
||||
attr_reader :ast_prefix
|
||||
attr_reader :ast_suffix
|
||||
attr_reader :context_user_fields
|
||||
attr_reader :custom_lex_fn
|
||||
attr_reader :tree
|
||||
attr_reader :tree_prefix
|
||||
attr_reader :tree_suffix
|
||||
attr_reader :free_token_node
|
||||
attr_reader :modulename
|
||||
attr_reader :patterns
|
||||
attr_reader :rules
|
||||
attr_reader :start_rule
|
||||
attr_accessor :rules
|
||||
attr_reader :start_rules
|
||||
attr_reader :tokens
|
||||
attr_reader :code_blocks
|
||||
attr_reader :ptypes
|
||||
attr_reader :prefix
|
||||
attr_reader :on_token_node
|
||||
attr_reader :token_user_fields
|
||||
|
||||
def initialize(input)
|
||||
@patterns = []
|
||||
@start_rule = "Start"
|
||||
@start_rules = []
|
||||
@tokens = []
|
||||
@rules = []
|
||||
@code_blocks = {}
|
||||
@line_number = 1
|
||||
@next_line_number = @line_number
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
@input = input.gsub("\r\n", "\n")
|
||||
@ptypes = {"default" => "void *"}
|
||||
@prefix = "p_"
|
||||
@ast = false
|
||||
@ast_prefix = ""
|
||||
@ast_suffix = ""
|
||||
@tree = false
|
||||
@tree_prefix = ""
|
||||
@tree_suffix = ""
|
||||
@free_token_node = ""
|
||||
@context_user_fields = nil
|
||||
@on_token_node = ""
|
||||
@token_user_fields = nil
|
||||
parse_grammar!
|
||||
@start_rules << "Start" if @start_rules.empty?
|
||||
end
|
||||
|
||||
def ptype
|
||||
@ -58,11 +68,16 @@ class Propane
|
||||
def parse_statement!
|
||||
if parse_white_space!
|
||||
elsif parse_comment_line!
|
||||
elsif @mode.nil? && parse_mode_label!
|
||||
elsif parse_ast_statement!
|
||||
elsif parse_ast_prefix_statement!
|
||||
elsif parse_ast_suffix_statement!
|
||||
elsif @modeline.nil? && parse_mode_label!
|
||||
elsif parse_context_user_fields_statement!
|
||||
elsif parse_custom_lex_fn!
|
||||
elsif parse_tree_statement!
|
||||
elsif parse_tree_prefix_statement!
|
||||
elsif parse_tree_suffix_statement!
|
||||
elsif parse_free_token_node_statement!
|
||||
elsif parse_module_statement!
|
||||
elsif parse_on_token_node_statement!
|
||||
elsif parse_token_user_fields_statement!
|
||||
elsif parse_ptype_statement!
|
||||
elsif parse_pattern_statement!
|
||||
elsif parse_start_statement!
|
||||
@ -81,8 +96,8 @@ class Propane
|
||||
end
|
||||
|
||||
def parse_mode_label!
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*:/)
|
||||
@mode = md[1]
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX}(?:\s*,\s*#{IDENTIFIER_REGEX})*)\s*:/)
|
||||
@modeline = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
@ -94,21 +109,37 @@ class Propane
|
||||
consume!(/#.*\n/)
|
||||
end
|
||||
|
||||
def parse_ast_statement!
|
||||
if consume!(/ast\s*;/)
|
||||
@ast = true
|
||||
def parse_context_user_fields_statement!
|
||||
if md = consume!(/context_user_fields\b\s*/)
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block")
|
||||
end
|
||||
@context_user_fields ||= ""
|
||||
@context_user_fields += code
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_prefix_statement!
|
||||
if md = consume!(/ast_prefix\s+(\w+)\s*;/)
|
||||
@ast_prefix = md[1]
|
||||
def parse_custom_lex_fn!
|
||||
if md = consume!(/custom_lex_fn\b\s*(\w+)\s*;/)
|
||||
@custom_lex_fn = $1
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_suffix_statement!
|
||||
if md = consume!(/ast_suffix\s+(\w+)\s*;/)
|
||||
@ast_suffix = md[1]
|
||||
def parse_tree_statement!
|
||||
if consume!(/tree\s*;/)
|
||||
@tree = true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_tree_prefix_statement!
|
||||
if md = consume!(/tree_prefix\s+(\w+)\s*;/)
|
||||
@tree_prefix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_tree_suffix_statement!
|
||||
if md = consume!(/tree_suffix\s+(\w+)\s*;/)
|
||||
@tree_suffix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
@ -117,17 +148,45 @@ class Propane
|
||||
md = consume!(/([\w.]+)\s*/, "expected module name")
|
||||
@modulename = md[1]
|
||||
consume!(/;/, "expected `;'")
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_on_token_node_statement!
|
||||
if md = consume!(/on_token_node\b\s*/)
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block")
|
||||
end
|
||||
@on_token_node += code
|
||||
end
|
||||
end
|
||||
|
||||
def parse_token_user_fields_statement!
|
||||
if md = consume!(/token_user_fields\b\s*/)
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block")
|
||||
end
|
||||
@token_user_fields ||= ""
|
||||
@token_user_fields += code
|
||||
end
|
||||
end
|
||||
|
||||
def parse_free_token_node_statement!
|
||||
if md = consume!(/free_token_node\b\s*/)
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block")
|
||||
end
|
||||
@free_token_node += code
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ptype_statement!
|
||||
if consume!(/ptype\s+/)
|
||||
name = "default"
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
if @tree
|
||||
raise Error.new("Multiple ptypes are unsupported in tree mode")
|
||||
end
|
||||
name = md[1]
|
||||
end
|
||||
@ -141,8 +200,8 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
if @tree
|
||||
raise Error.new("Multiple ptypes are unsupported in tree mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
@ -153,9 +212,9 @@ class Propane
|
||||
end
|
||||
token = Token.new(name, ptypename, @line_number)
|
||||
@tokens << token
|
||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||
@patterns << pattern
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -165,15 +224,15 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
if @tree
|
||||
raise Error.new("Multiple ptypes are unsupported in tree mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
consume!(/;/, "expected `;'");
|
||||
token = Token.new(name, ptypename, @line_number)
|
||||
@tokens << token
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -185,9 +244,11 @@ class Propane
|
||||
raise Error.new("Line #{@line_number}: expected pattern to follow `drop'")
|
||||
end
|
||||
consume!(/\s+/)
|
||||
consume!(/;/, "expected `;'")
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, mode: @mode)
|
||||
@mode = nil
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected `;' or code block")
|
||||
end
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, modes: get_modes_from_modeline)
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -195,12 +256,12 @@ class Propane
|
||||
def parse_rule_statement!
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
|
||||
rule_name, ptypename = *md[1, 2]
|
||||
if @ast && ptypename
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
if @tree && ptypename
|
||||
raise Error.new("Multiple ptypes are unsupported in tree mode")
|
||||
end
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}(?::#{IDENTIFIER_REGEX})?\??\s*)*)\s*/, "expected rule component list")
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}\??(?::#{IDENTIFIER_REGEX})?\s*)*)\s*/, "expected rule component list")
|
||||
components = md[1].strip.split(/\s+/)
|
||||
if @ast
|
||||
if @tree
|
||||
consume!(/;/, "expected `;'")
|
||||
else
|
||||
unless code = parse_code_block!
|
||||
@ -208,7 +269,7 @@ class Propane
|
||||
end
|
||||
end
|
||||
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -217,23 +278,26 @@ class Propane
|
||||
if pattern = parse_pattern!
|
||||
consume!(/\s+/)
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
if @tree
|
||||
raise Error.new("Multiple ptypes are unsupported in tree mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
||||
end
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||
@mode = nil
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_start_statement!
|
||||
if md = consume!(/start\s+(\w+)\s*;/)
|
||||
@start_rule = md[1]
|
||||
if md = consume!(/start\s+([\w\s]*);/)
|
||||
start_rules = md[1].split(/\s+/).map(&:strip)
|
||||
start_rules.each do |start_rule|
|
||||
@start_rules << start_rule unless @start_rules.include?(start_rule)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@ -247,7 +311,7 @@ class Propane
|
||||
else
|
||||
@code_blocks[name] = code
|
||||
end
|
||||
@mode = nil
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -272,6 +336,8 @@ class Propane
|
||||
end
|
||||
elsif md = consume!(%r{(.)})
|
||||
pattern += md[1]
|
||||
elsif @input == "" || @input.start_with?("\n")
|
||||
raise Error.new("Line #{@line_number}: Unterminated pattern; expected `/`")
|
||||
end
|
||||
end
|
||||
pattern
|
||||
@ -315,6 +381,14 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
def get_modes_from_modeline
|
||||
if @modeline
|
||||
Set[*@modeline.split(",").map(&:strip)]
|
||||
else
|
||||
Set.new
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -26,8 +26,14 @@ class Propane
|
||||
private
|
||||
|
||||
def build_tables!
|
||||
@modes = @grammar.patterns.group_by do |pattern|
|
||||
pattern.mode
|
||||
modenames = @grammar.patterns.reduce(Set.new) do |result, pattern|
|
||||
result + pattern.modes
|
||||
end
|
||||
@modes = modenames.reduce({}) do |result, modename|
|
||||
result[modename] = @grammar.patterns.select do |pattern|
|
||||
pattern.modes.include?(modename)
|
||||
end
|
||||
result
|
||||
end.transform_values do |patterns|
|
||||
{dfa: DFA.new(patterns)}
|
||||
end
|
||||
|
||||
@ -14,12 +14,22 @@ class Propane
|
||||
@item_sets = []
|
||||
@item_sets_set = {}
|
||||
@warnings = Set.new
|
||||
@errors = Set.new
|
||||
@options = options
|
||||
start_item = Item.new(grammar.rules.first, 0)
|
||||
eval_item_sets = Set[ItemSet.new([start_item])]
|
||||
start_items = grammar.rules[0...grammar.start_rules.length].map do |start_rule|
|
||||
Item.new(start_rule, 0)
|
||||
end
|
||||
start_item_sets = start_items.map {|item| ItemSet.new([item])}
|
||||
eval_item_sets = Set[*start_item_sets]
|
||||
|
||||
while eval_item_sets.size > 0
|
||||
item_set = eval_item_sets.first
|
||||
item_set =
|
||||
if start_item_sets.size > 0
|
||||
# Ensure we evaluate start_item_sets first in order
|
||||
start_item_sets.slice!(0)
|
||||
else
|
||||
eval_item_sets.first
|
||||
end
|
||||
eval_item_sets.delete(item_set)
|
||||
unless @item_sets_set.include?(item_set)
|
||||
item_set.id = @item_sets.size
|
||||
@ -39,11 +49,20 @@ class Propane
|
||||
end
|
||||
|
||||
build_reduce_actions!
|
||||
build_follow_sets!
|
||||
build_tables!
|
||||
write_log!
|
||||
errormessage = ""
|
||||
if @errors.size > 0
|
||||
errormessage += @errors.join("\n")
|
||||
end
|
||||
if @warnings.size > 0 && @options[:warnings_as_errors]
|
||||
raise Error.new("Fatal errors (-w):\n" + @warnings.join("\n"))
|
||||
if errormessage != ""
|
||||
errormessage += "\n"
|
||||
end
|
||||
errormessage += "Fatal errors (-w):\n" + @warnings.join("\n")
|
||||
end
|
||||
if errormessage != ""
|
||||
raise Error.new(errormessage)
|
||||
end
|
||||
end
|
||||
|
||||
@ -54,24 +73,13 @@ class Propane
|
||||
@shift_table = []
|
||||
@reduce_table = []
|
||||
@item_sets.each do |item_set|
|
||||
shift_entries = item_set.next_symbols.map do |next_symbol|
|
||||
state_id =
|
||||
if next_symbol.name == "$EOF"
|
||||
0
|
||||
else
|
||||
item_set.next_item_set[next_symbol].id
|
||||
end
|
||||
{
|
||||
symbol: next_symbol,
|
||||
state_id: state_id,
|
||||
}
|
||||
end
|
||||
if item_set.reduce_actions
|
||||
shift_entries.each do |shift_entry|
|
||||
unless item_set.reduce_rules.empty?
|
||||
item_set.shift_entries.each do |shift_entry|
|
||||
token = shift_entry[:symbol]
|
||||
if item_set.reduce_actions.include?(token)
|
||||
rule = item_set.reduce_actions[token]
|
||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
||||
if item_set.reduce_actions
|
||||
if rule = item_set.reduce_actions[token]
|
||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -82,7 +90,7 @@ class Propane
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
||||
elsif reduce_actions = item_set.reduce_actions
|
||||
reduce_actions.map do |token, rule|
|
||||
{token_id: token.id, rule_id: rule.id, rule: rule,
|
||||
{token: token, token_id: token.id, rule_id: rule.id, rule: rule,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
||||
end
|
||||
@ -91,11 +99,11 @@ class Propane
|
||||
end
|
||||
@state_table << {
|
||||
shift_index: @shift_table.size,
|
||||
n_shifts: shift_entries.size,
|
||||
n_shifts: item_set.shift_entries.size,
|
||||
reduce_index: @reduce_table.size,
|
||||
n_reduces: reduce_entries.size,
|
||||
}
|
||||
@shift_table += shift_entries
|
||||
@shift_table += item_set.shift_entries
|
||||
@reduce_table += reduce_entries
|
||||
end
|
||||
end
|
||||
@ -115,7 +123,109 @@ class Propane
|
||||
# @return [void]
|
||||
def build_reduce_actions!
|
||||
@item_sets.each do |item_set|
|
||||
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set)
|
||||
build_shift_entries(item_set)
|
||||
build_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
item_sets_to_process = @item_sets.select do |item_set|
|
||||
# We need lookahead reduce actions if:
|
||||
# 1) There is more than one possible rule to reduce. In this case the
|
||||
# lookahead token can help choose which rule to reduce.
|
||||
# 2) There is at least one shift action and one reduce action for
|
||||
# this item set. In this case the lookahead reduce actions are
|
||||
# needed to test for a Shift/Reduce conflict.
|
||||
item_set.reduce_rules.size > 1 ||
|
||||
(item_set.reduce_rules.size > 0 && item_set.shift_entries.size > 0)
|
||||
end
|
||||
if RbConfig::CONFIG["host_os"] =~ /linux/
|
||||
item_sets_by_id = {}
|
||||
item_sets_to_process.each do |item_set|
|
||||
item_sets_by_id[item_set.object_id] = item_set
|
||||
end
|
||||
tokens_by_id = {}
|
||||
@grammar.tokens.each do |token|
|
||||
tokens_by_id[token.object_id] = token
|
||||
end
|
||||
rules_by_id = {}
|
||||
@grammar.rules.each do |rule|
|
||||
rules_by_id[rule.object_id] = rule
|
||||
end
|
||||
n_threads = Util.determine_n_threads
|
||||
semaphore = Mutex.new
|
||||
queue = Queue.new
|
||||
threads = {}
|
||||
n_threads.times do
|
||||
piper, pipew = IO.pipe
|
||||
thread = Thread.new do
|
||||
loop do
|
||||
item_set = nil
|
||||
semaphore.synchronize do
|
||||
item_set = item_sets_to_process.slice!(0)
|
||||
end
|
||||
break if item_set.nil?
|
||||
fork do
|
||||
piper.close
|
||||
build_lookahead_reduce_actions_for_item_set(item_set, pipew)
|
||||
end
|
||||
end
|
||||
queue.push(Thread.current)
|
||||
end
|
||||
threads[thread] = [piper, pipew]
|
||||
end
|
||||
until threads.empty?
|
||||
thread = queue.pop
|
||||
piper, pipew = threads[thread]
|
||||
pipew.close
|
||||
thread_txt = piper.read
|
||||
thread_txt.each_line do |line|
|
||||
if line.start_with?("RA,")
|
||||
parts = line.split(",")
|
||||
item_set_id, token_id, rule_id = parts[1..3].map(&:to_i)
|
||||
item_set = item_sets_by_id[item_set_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find item set from thread"
|
||||
end
|
||||
token = tokens_by_id[token_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find token from thread"
|
||||
end
|
||||
rule = rules_by_id[rule_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find rule from thread"
|
||||
end
|
||||
item_set.reduce_actions ||= {}
|
||||
item_set.reduce_actions[token] = rule
|
||||
elsif line.start_with?("Error: ")
|
||||
@errors << line.chomp
|
||||
else
|
||||
raise "Internal error: unhandled thread line #{line}"
|
||||
end
|
||||
end
|
||||
thread.join
|
||||
threads.delete(thread)
|
||||
end
|
||||
else
|
||||
# Fall back to single threaded algorithm.
|
||||
item_sets_to_process.each do |item_set|
|
||||
item_set.reduce_actions = build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Build the shift entries for a single item set.
|
||||
#
|
||||
# @return [void]
|
||||
def build_shift_entries(item_set)
|
||||
item_set.shift_entries = item_set.next_symbols.map do |next_symbol|
|
||||
state_id =
|
||||
if next_symbol.name == "$EOF"
|
||||
0
|
||||
else
|
||||
item_set.next_item_set[next_symbol].id
|
||||
end
|
||||
{
|
||||
symbol: next_symbol,
|
||||
state_id: state_id,
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
@ -124,24 +234,16 @@ class Propane
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
#
|
||||
# @return [nil, Hash]
|
||||
# If no reduce actions are possible for the given item set, nil.
|
||||
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
|
||||
# @return [void]
|
||||
def build_reduce_actions_for_item_set(item_set)
|
||||
# To build the reduce actions, we start by looking at any
|
||||
# "complete" items, i.e., items where the parse position is at the
|
||||
# end of a rule. These are the only rules that are candidates for
|
||||
# reduction in the current ItemSet.
|
||||
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
|
||||
if reduce_rules.size == 1
|
||||
item_set.reduce_rule = reduce_rules.first
|
||||
end
|
||||
|
||||
if reduce_rules.size == 0
|
||||
nil
|
||||
else
|
||||
build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
if item_set.reduce_rules.size == 1
|
||||
item_set.reduce_rule = item_set.reduce_rules.first
|
||||
end
|
||||
end
|
||||
|
||||
@ -149,25 +251,28 @@ class Propane
|
||||
#
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
# @param fh [File]
|
||||
# Output file handle for multiprocessing mode.
|
||||
#
|
||||
# @return [Hash]
|
||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
||||
def build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
|
||||
def build_lookahead_reduce_actions_for_item_set(item_set, fh = nil)
|
||||
# We will be looking for all possible tokens that can follow instances of
|
||||
# these rules. Rather than looking through the entire grammar for the
|
||||
# possible following tokens, we will only look in the item sets leading
|
||||
# up to this one. This restriction gives us a more precise lookahead set,
|
||||
# and allows us to parse LALR grammars.
|
||||
item_sets = Set[item_set] + item_set.leading_item_sets
|
||||
reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
||||
lookahead_tokens_for_rule.each do |lookahead_token|
|
||||
if existing_reduce_rule = reduce_actions[lookahead_token]
|
||||
raise Error.new("Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number})")
|
||||
error = "Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number}) for lookahead token #{lookahead_token}"
|
||||
@errors << error
|
||||
fh.puts(error) if fh
|
||||
end
|
||||
reduce_actions[lookahead_token] = reduce_rule
|
||||
fh.puts "RA,#{item_set.object_id},#{lookahead_token.object_id},#{reduce_rule.object_id}" if fh
|
||||
end
|
||||
reduce_actions
|
||||
end
|
||||
@ -214,6 +319,7 @@ class Propane
|
||||
rule_set = item.rule.rule_set
|
||||
unless checked_rule_sets.include?(rule_set)
|
||||
rule_sets_to_check_after << rule_set
|
||||
checked_rule_sets << rule_set
|
||||
end
|
||||
break
|
||||
when Token
|
||||
@ -233,51 +339,6 @@ class Propane
|
||||
lookahead_tokens
|
||||
end
|
||||
|
||||
# Build the follow sets for each ItemSet.
|
||||
#
|
||||
# @return [void]
|
||||
def build_follow_sets!
|
||||
@item_sets.each do |item_set|
|
||||
item_set.follow_set = build_follow_set_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
|
||||
# Build the follow set for the given ItemSet.
|
||||
#
|
||||
# @param item_set [ItemSet]
|
||||
# The ItemSet to build the follow set for.
|
||||
#
|
||||
# @return [Set]
|
||||
# Follow set for the given ItemSet.
|
||||
def build_follow_set_for_item_set(item_set)
|
||||
follow_set = Set.new
|
||||
rule_sets_to_check_after = Set.new
|
||||
item_set.items.each do |item|
|
||||
(1..).each do |offset|
|
||||
case symbol = item.next_symbol(offset)
|
||||
when nil
|
||||
rule_sets_to_check_after << item.rule.rule_set
|
||||
break
|
||||
when Token
|
||||
follow_set << symbol
|
||||
break
|
||||
when RuleSet
|
||||
follow_set += symbol.start_token_set
|
||||
unless symbol.could_be_empty?
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
reduce_lookaheads = build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
reduce_lookaheads.each do |token, rule_set|
|
||||
if rule_sets_to_check_after.include?(rule_set)
|
||||
follow_set << token
|
||||
end
|
||||
end
|
||||
follow_set
|
||||
end
|
||||
|
||||
def write_log!
|
||||
@log.puts Util.banner("Parser Rules")
|
||||
@grammar.rules.each do |rule|
|
||||
|
||||
@ -22,6 +22,7 @@ class Propane
|
||||
def initialize(rule, position)
|
||||
@rule = rule
|
||||
@position = position
|
||||
@_hash = [@rule, @position].hash
|
||||
end
|
||||
|
||||
# Hash function.
|
||||
@ -29,7 +30,7 @@ class Propane
|
||||
# @return [Integer]
|
||||
# Hash code.
|
||||
def hash
|
||||
[@rule, @position].hash
|
||||
@_hash
|
||||
end
|
||||
|
||||
# Compare Item objects.
|
||||
|
||||
@ -2,7 +2,7 @@ class Propane
|
||||
class Parser
|
||||
|
||||
# Represent a parser "item set", which is a set of possible items that the
|
||||
# parser could currently be parsing.
|
||||
# parser could currently be parsing. This is equivalent to a parser state.
|
||||
class ItemSet
|
||||
|
||||
# @return [Set<Item>]
|
||||
@ -25,14 +25,18 @@ class Propane
|
||||
# Rule to reduce if there is only one possibility.
|
||||
attr_accessor :reduce_rule
|
||||
|
||||
# @return [Set<Rule>]
|
||||
# Set of rules that could be reduced in this parser state.
|
||||
attr_accessor :reduce_rules
|
||||
|
||||
# @return [nil, Hash]
|
||||
# Reduce actions, mapping lookahead tokens to rules, if there is
|
||||
# more than one rule that could be reduced.
|
||||
attr_accessor :reduce_actions
|
||||
|
||||
# @return [Set<Token>]
|
||||
# Follow set for the ItemSet.
|
||||
attr_accessor :follow_set
|
||||
# @return [Array<Hash>]
|
||||
# Shift table entries.
|
||||
attr_accessor :shift_entries
|
||||
|
||||
# Build an ItemSet.
|
||||
#
|
||||
@ -50,7 +54,7 @@ class Propane
|
||||
# @return [Set<Token, RuleSet>]
|
||||
# Set of next symbols for all Items in this ItemSet.
|
||||
def next_symbols
|
||||
Set.new(@items.map(&:next_symbol).compact)
|
||||
@_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
|
||||
end
|
||||
|
||||
# Build a next ItemSet for the given next symbol.
|
||||
@ -99,21 +103,24 @@ class Propane
|
||||
# @return [Set<ItemSet>]
|
||||
# Set of all ItemSets that lead up to this ItemSet.
|
||||
def leading_item_sets
|
||||
result = Set.new
|
||||
eval_sets = Set[self]
|
||||
evaled = Set.new
|
||||
while eval_sets.size > 0
|
||||
eval_set = eval_sets.first
|
||||
eval_sets.delete(eval_set)
|
||||
evaled << eval_set
|
||||
eval_set.in_sets.each do |in_set|
|
||||
result << in_set
|
||||
unless evaled.include?(in_set)
|
||||
eval_sets << in_set
|
||||
@_leading_item_sets ||=
|
||||
begin
|
||||
result = Set.new
|
||||
eval_sets = Set[self]
|
||||
evaled = Set.new
|
||||
while eval_sets.size > 0
|
||||
eval_set = eval_sets.first
|
||||
eval_sets.delete(eval_set)
|
||||
evaled << eval_set
|
||||
eval_set.in_sets.each do |in_set|
|
||||
result << in_set
|
||||
unless evaled.include?(in_set)
|
||||
eval_sets << in_set
|
||||
end
|
||||
end
|
||||
end
|
||||
result
|
||||
end
|
||||
end
|
||||
result
|
||||
end
|
||||
|
||||
# Represent the ItemSet as a String.
|
||||
|
||||
@ -26,9 +26,9 @@ class Propane
|
||||
# Regex NFA for matching the pattern.
|
||||
attr_reader :nfa
|
||||
|
||||
# @return [String, nil]
|
||||
# Lexer mode for this pattern.
|
||||
attr_accessor :mode
|
||||
# @return [Set]
|
||||
# Lexer modes for this pattern.
|
||||
attr_accessor :modes
|
||||
|
||||
# @return [String, nil]
|
||||
# Parser value type name.
|
||||
@ -46,16 +46,16 @@ class Propane
|
||||
# Token to be returned by this pattern.
|
||||
# @option options [Integer, nil] :line_number
|
||||
# Line number where the token was defined in the input grammar.
|
||||
# @option options [String, nil] :mode
|
||||
# Lexer mode for this pattern.
|
||||
# @option options [String, nil] :modes
|
||||
# Lexer modes for this pattern.
|
||||
def initialize(options)
|
||||
@code = options[:code]
|
||||
@pattern = options[:pattern]
|
||||
@token = options[:token]
|
||||
@line_number = options[:line_number]
|
||||
@mode = options[:mode]
|
||||
@modes = options[:modes]
|
||||
@ptypename = options[:ptypename]
|
||||
regex = Regex.new(@pattern)
|
||||
regex = Regex.new(@pattern, @line_number)
|
||||
regex.nfa.end_state.accepts = self
|
||||
@nfa = regex.nfa
|
||||
end
|
||||
|
||||
@ -4,12 +4,13 @@ class Propane
|
||||
attr_reader :unit
|
||||
attr_reader :nfa
|
||||
|
||||
def initialize(pattern)
|
||||
def initialize(pattern, line_number)
|
||||
@pattern = pattern.dup
|
||||
@line_number = line_number
|
||||
@unit = parse_alternates
|
||||
@nfa = @unit.to_nfa
|
||||
if @pattern != ""
|
||||
raise Error.new(%[Unexpected "#{@pattern}" in pattern])
|
||||
raise Error.new(%[Line #{@line_number}: unexpected "#{@pattern}" in pattern])
|
||||
end
|
||||
end
|
||||
|
||||
@ -41,7 +42,7 @@ class Propane
|
||||
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
||||
au.replace_last!(mu)
|
||||
else
|
||||
raise Error.new("#{c} follows nothing")
|
||||
raise Error.new("Line #{@line_number}: #{c} follows nothing")
|
||||
end
|
||||
when "|"
|
||||
au.new_alternate!
|
||||
@ -59,7 +60,7 @@ class Propane
|
||||
def parse_group
|
||||
au = parse_alternates
|
||||
if @pattern[0] != ")"
|
||||
raise Error.new("Unterminated group in pattern")
|
||||
raise Error.new("Line #{@line_number}: unterminated group in pattern")
|
||||
end
|
||||
@pattern.slice!(0)
|
||||
au
|
||||
@ -70,7 +71,7 @@ class Propane
|
||||
index = 0
|
||||
loop do
|
||||
if @pattern == ""
|
||||
raise Error.new("Unterminated character class")
|
||||
raise Error.new("Line #{@line_number}: unterminated character class")
|
||||
end
|
||||
c = @pattern.slice!(0)
|
||||
if c == "]"
|
||||
@ -84,13 +85,13 @@ class Propane
|
||||
elsif c == "-" && @pattern[0] != "]"
|
||||
begin_cu = ccu.last_unit
|
||||
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
||||
raise Error.new("Character range must be between single characters")
|
||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||
end
|
||||
if @pattern[0] == "\\"
|
||||
@pattern.slice!(0)
|
||||
end_cu = parse_backslash
|
||||
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
||||
raise Error.new("Character range must be between single characters")
|
||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||
end
|
||||
max_code_point = end_cu.code_point
|
||||
else
|
||||
@ -116,7 +117,7 @@ class Propane
|
||||
elsif max_count.to_s != ""
|
||||
max_count = max_count.to_i
|
||||
if max_count < min_count
|
||||
raise Error.new("Maximum repetition count cannot be less than minimum repetition count")
|
||||
raise Error.new("Line #{@line_number}: maximum repetition count cannot be less than minimum repetition count")
|
||||
end
|
||||
else
|
||||
max_count = nil
|
||||
@ -124,28 +125,33 @@ class Propane
|
||||
@pattern = pattern
|
||||
[min_count, max_count]
|
||||
else
|
||||
raise Error.new("Unexpected match count at #{@pattern}")
|
||||
raise Error.new("Line #{@line_number}: unexpected match count following {")
|
||||
end
|
||||
end
|
||||
|
||||
def parse_backslash
|
||||
if @pattern == ""
|
||||
raise Error.new("Error: unfollowed \\")
|
||||
raise Error.new("Line #{@line_number}: error: unfollowed \\")
|
||||
else
|
||||
c = @pattern.slice!(0)
|
||||
case c
|
||||
when "a"
|
||||
CharacterRangeUnit.new("\a", "\a")
|
||||
CharacterRangeUnit.new("\a")
|
||||
when "b"
|
||||
CharacterRangeUnit.new("\b", "\b")
|
||||
CharacterRangeUnit.new("\b")
|
||||
when "d"
|
||||
CharacterRangeUnit.new("0", "9")
|
||||
when "D"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
when "f"
|
||||
CharacterRangeUnit.new("\f", "\f")
|
||||
CharacterRangeUnit.new("\f")
|
||||
when "n"
|
||||
CharacterRangeUnit.new("\n", "\n")
|
||||
CharacterRangeUnit.new("\n")
|
||||
when "r"
|
||||
CharacterRangeUnit.new("\r", "\r")
|
||||
CharacterRangeUnit.new("\r")
|
||||
when "s"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new(" ")
|
||||
@ -155,10 +161,35 @@ class Propane
|
||||
ccu << CharacterRangeUnit.new("\f")
|
||||
ccu << CharacterRangeUnit.new("\v")
|
||||
ccu
|
||||
when "S"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new(" ")
|
||||
ccu << CharacterRangeUnit.new("\t")
|
||||
ccu << CharacterRangeUnit.new("\r")
|
||||
ccu << CharacterRangeUnit.new("\n")
|
||||
ccu << CharacterRangeUnit.new("\f")
|
||||
ccu << CharacterRangeUnit.new("\v")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
when "t"
|
||||
CharacterRangeUnit.new("\t", "\t")
|
||||
CharacterRangeUnit.new("\t")
|
||||
when "v"
|
||||
CharacterRangeUnit.new("\v", "\v")
|
||||
CharacterRangeUnit.new("\v")
|
||||
when "w"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("_")
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu << CharacterRangeUnit.new("a", "z")
|
||||
ccu << CharacterRangeUnit.new("A", "Z")
|
||||
ccu
|
||||
when "W"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("_")
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu << CharacterRangeUnit.new("a", "z")
|
||||
ccu << CharacterRangeUnit.new("A", "Z")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
else
|
||||
CharacterRangeUnit.new(c)
|
||||
end
|
||||
|
||||
@ -92,16 +92,19 @@ class Propane
|
||||
@units = []
|
||||
@negate = false
|
||||
end
|
||||
def initialize
|
||||
@units = []
|
||||
end
|
||||
def method_missing(*args)
|
||||
@units.__send__(*args)
|
||||
def method_missing(*args, &block)
|
||||
@units.__send__(*args, &block)
|
||||
end
|
||||
def <<(thing)
|
||||
if thing.is_a?(CharacterClassUnit)
|
||||
thing.each do |ccu_unit|
|
||||
@units << ccu_unit
|
||||
if thing.negate
|
||||
CodePointRange.invert_ranges(thing.map(&:code_point_range)).each do |cpr|
|
||||
CharacterRangeUnit.new(cpr.first, cpr.last)
|
||||
end
|
||||
else
|
||||
thing.each do |ccu_unit|
|
||||
@units << ccu_unit
|
||||
end
|
||||
end
|
||||
else
|
||||
@units << thing
|
||||
|
||||
@ -36,7 +36,7 @@ class Propane
|
||||
|
||||
# @return [Array<Integer>]
|
||||
# Map this rule's components to their positions in the parent RuleSet's
|
||||
# node field pointer array. This is used for AST construction.
|
||||
# node field pointer array. This is used for tree construction.
|
||||
attr_accessor :rule_set_node_field_index_map
|
||||
|
||||
# Construct a Rule.
|
||||
|
||||
@ -4,8 +4,8 @@ class Propane
|
||||
class RuleSet
|
||||
|
||||
# @return [Array<Hash>]
|
||||
# AST fields.
|
||||
attr_reader :ast_fields
|
||||
# tree fields.
|
||||
attr_reader :tree_fields
|
||||
|
||||
# @return [Integer]
|
||||
# ID of the RuleSet.
|
||||
@ -100,26 +100,28 @@ class Propane
|
||||
|
||||
# Finalize a RuleSet after adding all Rules to it.
|
||||
def finalize(grammar)
|
||||
if grammar.ast
|
||||
build_ast_fields(grammar)
|
||||
if grammar.tree
|
||||
build_tree_fields(grammar)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Build the set of AST fields for this RuleSet.
|
||||
# Build the set of tree fields for this RuleSet.
|
||||
#
|
||||
# This is an Array of Hashes. Each entry in the Array corresponds to a
|
||||
# field location in the AST node. The entry is a Hash. It could have one or
|
||||
# field location in the tree node. The entry is a Hash. It could have one or
|
||||
# two keys. It will always have the field name with a positional suffix as
|
||||
# a key. It may also have the field name without the positional suffix if
|
||||
# that field only exists in one position across all Rules in the RuleSet.
|
||||
#
|
||||
# @return [void]
|
||||
def build_ast_fields(grammar)
|
||||
field_ast_node_indexes = {}
|
||||
def build_tree_fields(grammar)
|
||||
field_tree_node_indexes = {}
|
||||
field_indexes_across_all_rules = {}
|
||||
@ast_fields = []
|
||||
# Stores the index into @tree_fields by field alias name.
|
||||
field_aliases = {}
|
||||
@tree_fields = []
|
||||
@rules.each do |rule|
|
||||
rule.components.each_with_index do |component, i|
|
||||
if component.is_a?(RuleSet) && component.optional?
|
||||
@ -130,15 +132,25 @@ class Propane
|
||||
else
|
||||
node_name = component.name
|
||||
end
|
||||
struct_name = "#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
struct_name = "#{grammar.tree_prefix}#{node_name}#{grammar.tree_suffix}"
|
||||
field_name = "p#{node_name}#{i + 1}"
|
||||
unless field_ast_node_indexes[field_name]
|
||||
field_ast_node_indexes[field_name] = @ast_fields.size
|
||||
@ast_fields << {field_name => struct_name}
|
||||
unless field_tree_node_indexes[field_name]
|
||||
field_tree_node_indexes[field_name] = @tree_fields.size
|
||||
@tree_fields << {field_name => struct_name}
|
||||
end
|
||||
rule.aliases.each do |alias_name, index|
|
||||
if index == i
|
||||
alias_tree_fields_index = field_tree_node_indexes[field_name]
|
||||
if field_aliases[alias_name] && field_aliases[alias_name] != alias_tree_fields_index
|
||||
raise Error.new("Error: conflicting tree node field positions for alias `#{alias_name}` in rule #{rule.name} defined on line #{rule.line_number}")
|
||||
end
|
||||
field_aliases[alias_name] = alias_tree_fields_index
|
||||
@tree_fields[alias_tree_fields_index][alias_name] = @tree_fields[alias_tree_fields_index].first[1]
|
||||
end
|
||||
end
|
||||
field_indexes_across_all_rules[node_name] ||= Set.new
|
||||
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
||||
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
||||
field_indexes_across_all_rules[node_name] << field_tree_node_indexes[field_name]
|
||||
rule.rule_set_node_field_index_map[i] = field_tree_node_indexes[field_name]
|
||||
end
|
||||
end
|
||||
field_indexes_across_all_rules.each do |node_name, indexes_across_all_rules|
|
||||
@ -146,20 +158,8 @@ class Propane
|
||||
# If this field was only seen in one position across all rules,
|
||||
# then add an alias to the positional field name that does not
|
||||
# include the position.
|
||||
@ast_fields[indexes_across_all_rules.first]["p#{node_name}"] =
|
||||
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
end
|
||||
end
|
||||
# Now merge in the field aliases as given by the user in the
|
||||
# grammar.
|
||||
field_aliases = {}
|
||||
@rules.each do |rule|
|
||||
rule.aliases.each do |alias_name, index|
|
||||
if field_aliases[alias_name] && field_aliases[alias_name] != index
|
||||
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}`")
|
||||
end
|
||||
field_aliases[alias_name] = index
|
||||
@ast_fields[index][alias_name] = @ast_fields[index].first[1]
|
||||
@tree_fields[indexes_across_all_rules.first]["p#{node_name}"] =
|
||||
"#{grammar.tree_prefix}#{node_name}#{grammar.tree_suffix}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@ -10,6 +10,32 @@ class Propane
|
||||
"#{s}\n* #{message} *\n#{s}\n"
|
||||
end
|
||||
|
||||
# Determine the number of threads to use.
|
||||
#
|
||||
# @return [Integer]
|
||||
# The number of threads to use.
|
||||
def determine_n_threads
|
||||
# Try to figure out how many threads are available on the host hardware.
|
||||
begin
|
||||
case RbConfig::CONFIG["host_os"]
|
||||
when /linux/
|
||||
return File.read("/proc/cpuinfo").scan(/^processor\s*:/).size
|
||||
when /mswin|mingw|msys/
|
||||
if `wmic cpu get NumberOfLogicalProcessors -value` =~ /NumberOfLogicalProcessors=(\d+)/
|
||||
return $1.to_i
|
||||
end
|
||||
when /darwin/
|
||||
if `sysctl -n hw.ncpu` =~ /(\d+)/
|
||||
return $1.to_i
|
||||
end
|
||||
end
|
||||
rescue
|
||||
end
|
||||
|
||||
# If we can't figure it out, default to 4.
|
||||
4
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
class Propane
|
||||
VERSION = "1.4.0"
|
||||
VERSION = "4.1.0"
|
||||
end
|
||||
|
||||
@ -120,11 +120,11 @@ string: /\\t/ <<
|
||||
>>
|
||||
string: /\\u[0-9a-fA-F]{4}/ <<
|
||||
/* Not actually going to encode the code point for this example... */
|
||||
char s[] = {'{', match[2], match[3], match[4], match[5], '}', 0};
|
||||
char s[] = {'{', (char)match[2], (char)match[3], (char)match[4], (char)match[5], '}', 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
string: /[^\\]/ <<
|
||||
char s[] = {match[0], 0};
|
||||
char s[] = {(char)match[0], 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
Start -> Value <<
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
|
||||
JSONValue * JSONValue_new(size_t id)
|
||||
{
|
||||
JSONValue * jv = calloc(1, sizeof(JSONValue));
|
||||
JSONValue * jv = (JSONValue *)calloc(1, sizeof(JSONValue));
|
||||
jv->id = id;
|
||||
return jv;
|
||||
}
|
||||
@ -29,7 +29,7 @@ void JSONObject_append(JSONValue * object, char const * name, JSONValue * value)
|
||||
}
|
||||
}
|
||||
size_t const new_size = size + 1;
|
||||
void * new_entries = malloc(sizeof(object->object.entries[0]) * new_size);
|
||||
JSONObjectEntry * new_entries = (JSONObjectEntry *)malloc(sizeof(object->object.entries[0]) * new_size);
|
||||
if (size > 0)
|
||||
{
|
||||
memcpy(new_entries, object->object.entries, size * sizeof(object->object.entries[0]));
|
||||
@ -52,7 +52,7 @@ void JSONArray_append(JSONValue * array, JSONValue * value)
|
||||
{
|
||||
size_t const size = array->array.size;
|
||||
size_t const new_size = size + 1;
|
||||
JSONValue ** new_entries = malloc(sizeof(JSONValue *) * new_size);
|
||||
JSONValue ** new_entries = (JSONValue **)malloc(sizeof(JSONValue *) * new_size);
|
||||
if (array->array.size > 0)
|
||||
{
|
||||
memcpy(new_entries, array->array.entries, sizeof(JSONValue *) * size);
|
||||
|
||||
@ -11,6 +11,12 @@
|
||||
#define JSON_FALSE 5u
|
||||
#define JSON_NULL 6u
|
||||
|
||||
typedef struct JSONObjectEntry_s
|
||||
{
|
||||
char const * name;
|
||||
struct JSONValue_s * value;
|
||||
} JSONObjectEntry;
|
||||
|
||||
typedef struct JSONValue_s
|
||||
{
|
||||
size_t id;
|
||||
@ -19,11 +25,7 @@ typedef struct JSONValue_s
|
||||
struct
|
||||
{
|
||||
size_t size;
|
||||
struct
|
||||
{
|
||||
char const * name;
|
||||
struct JSONValue_s * value;
|
||||
} * entries;
|
||||
JSONObjectEntry * entries;
|
||||
} object;
|
||||
struct
|
||||
{
|
||||
|
||||
@ -151,30 +151,30 @@ EOF
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to be_nil
|
||||
expect(o.modes).to be_empty
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "b"}
|
||||
expect(o).to_not be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to eq "m1"
|
||||
expect(o.modes).to eq Set["m1"]
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to be_nil
|
||||
expect(o.modes).to be_empty
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to eq "m2"
|
||||
expect(o.modes).to eq Set["m2"]
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to be_nil
|
||||
expect(o.modes).to be_empty
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.mode).to eq "m3"
|
||||
expect(o.modes).to eq Set["m3"]
|
||||
end
|
||||
|
||||
it "allows assigning ptypes to tokens and rules" do
|
||||
|
||||
@ -126,6 +126,74 @@ EOF
|
||||
]
|
||||
expect(run(<<EOF, ";")).to eq expected
|
||||
token semicolon /;/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches a negated character class" do
|
||||
expected = [
|
||||
["pattern", "/abc/"],
|
||||
]
|
||||
expect(run(<<EOF, "/abc/")).to eq expected
|
||||
token pattern /\\/[^\\s]*\\//;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches special character classes " do
|
||||
expected = [
|
||||
["a", "abc123_FOO"],
|
||||
]
|
||||
expect(run(<<EOF, "abc123_FOO")).to eq expected
|
||||
token a /\\w+/;
|
||||
EOF
|
||||
expected = [
|
||||
["b", "FROG*%$#"],
|
||||
]
|
||||
expect(run(<<EOF, "FROG*%$#")).to eq expected
|
||||
token b /FROG\\D{1,4}/;
|
||||
EOF
|
||||
expected = [
|
||||
["c", "$883366"],
|
||||
]
|
||||
expect(run(<<EOF, "$883366")).to eq expected
|
||||
token c /$\\d+/;
|
||||
EOF
|
||||
expected = [
|
||||
["d", "^&$@"],
|
||||
]
|
||||
expect(run(<<EOF, "^&$@")).to eq expected
|
||||
token d /^\\W+/;
|
||||
EOF
|
||||
expected = [
|
||||
["a", "abc123_FOO"],
|
||||
[nil, " "],
|
||||
["b", "FROG*%$#"],
|
||||
[nil, " "],
|
||||
["c", "$883366"],
|
||||
[nil, " "],
|
||||
["d", "^&$@"],
|
||||
]
|
||||
expect(run(<<EOF, "abc123_FOO FROG*%$# $883366 ^&$@")).to eq expected
|
||||
token a /\\w+/;
|
||||
token b /FROG\\D{1,4}/;
|
||||
token c /$\\d+/;
|
||||
token d /^\\W+/;
|
||||
drop /\\s+/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches a negated character class with a nested inner negated character class" do
|
||||
expected = [
|
||||
["t", "$&*"],
|
||||
]
|
||||
expect(run(<<EOF, "$&*")).to eq expected
|
||||
token t /[^%\\W]+/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "\\s matches a newline" do
|
||||
expected = [["s", "\n"]]
|
||||
expect(run(<<EOF, "\n")).to eq expected
|
||||
token s /\\s/;
|
||||
EOF
|
||||
end
|
||||
end
|
||||
|
||||
@ -2,14 +2,14 @@ class Propane
|
||||
RSpec.describe Regex do
|
||||
|
||||
it "parses an empty expression" do
|
||||
regex = Regex.new("")
|
||||
regex = Regex.new("", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0].size).to eq 0
|
||||
end
|
||||
|
||||
it "parses a single character unit expression" do
|
||||
regex = Regex.new("a")
|
||||
regex = Regex.new("a", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -19,7 +19,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a group with a single character unit expression" do
|
||||
regex = Regex.new("(a)")
|
||||
regex = Regex.new("(a)", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -33,7 +33,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a *" do
|
||||
regex = Regex.new("a*")
|
||||
regex = Regex.new("a*", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -47,7 +47,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a +" do
|
||||
regex = Regex.new("a+")
|
||||
regex = Regex.new("a+", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -61,7 +61,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a ?" do
|
||||
regex = Regex.new("a?")
|
||||
regex = Regex.new("a?", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -75,7 +75,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a multiplicity count" do
|
||||
regex = Regex.new("a{5}")
|
||||
regex = Regex.new("a{5}", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -89,7 +89,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a minimum-only multiplicity count" do
|
||||
regex = Regex.new("a{5,}")
|
||||
regex = Regex.new("a{5,}", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -103,7 +103,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a minimum and maximum multiplicity count" do
|
||||
regex = Regex.new("a{5,8}")
|
||||
regex = Regex.new("a{5,8}", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -118,7 +118,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped *" do
|
||||
regex = Regex.new("a\\*")
|
||||
regex = Regex.new("a\\*", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -131,7 +131,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped +" do
|
||||
regex = Regex.new("a\\+")
|
||||
regex = Regex.new("a\\+", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -144,7 +144,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped \\" do
|
||||
regex = Regex.new("\\\\d")
|
||||
regex = Regex.new("\\\\d", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -157,7 +157,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a character class" do
|
||||
regex = Regex.new("[a-z_]")
|
||||
regex = Regex.new("[a-z_]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -175,7 +175,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a negated character class" do
|
||||
regex = Regex.new("[^xyz]")
|
||||
regex = Regex.new("[^xyz]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -189,8 +189,25 @@ class Propane
|
||||
expect(ccu[0].first).to eq "x".ord
|
||||
end
|
||||
|
||||
it "parses a negated character class with inner character classes" do
|
||||
regex = Regex.new("[^x\\sz]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
seq_unit = regex.unit.alternates[0]
|
||||
expect(seq_unit.size).to eq 1
|
||||
expect(seq_unit[0]).to be_a Regex::CharacterClassUnit
|
||||
ccu = seq_unit[0]
|
||||
expect(ccu.negate).to be_truthy
|
||||
expect(ccu.size).to eq 8
|
||||
expect(ccu[0]).to be_a Regex::CharacterRangeUnit
|
||||
expect(ccu[0].first).to eq "x".ord
|
||||
expect(ccu[1].first).to eq " ".ord
|
||||
expect(ccu[7].first).to eq "z".ord
|
||||
end
|
||||
|
||||
it "parses - as a plain character at beginning of a character class" do
|
||||
regex = Regex.new("[-9]")
|
||||
regex = Regex.new("[-9]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -204,7 +221,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character at end of a character class" do
|
||||
regex = Regex.new("[0-]")
|
||||
regex = Regex.new("[0-]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -220,7 +237,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character at beginning of a negated character class" do
|
||||
regex = Regex.new("[^-9]")
|
||||
regex = Regex.new("[^-9]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -235,7 +252,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses . as a plain character in a character class" do
|
||||
regex = Regex.new("[.]")
|
||||
regex = Regex.new("[.]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -250,7 +267,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character when escaped in middle of character class" do
|
||||
regex = Regex.new("[0\\-9]")
|
||||
regex = Regex.new("[0\\-9]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -269,7 +286,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses alternates" do
|
||||
regex = Regex.new("ab|c")
|
||||
regex = Regex.new("ab|c", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 2
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -279,7 +296,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a ." do
|
||||
regex = Regex.new("a.b")
|
||||
regex = Regex.new("a.b", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -290,7 +307,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses something complex" do
|
||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+")
|
||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 4
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -2,6 +2,10 @@ unless ENV["dist_specs"]
|
||||
require "bundler/setup"
|
||||
require "simplecov"
|
||||
|
||||
class MyFormatter
|
||||
def format(*args)
|
||||
end
|
||||
end
|
||||
SimpleCov.start do
|
||||
add_filter "/spec/"
|
||||
add_filter "/.bundle/"
|
||||
@ -12,6 +16,7 @@ unless ENV["dist_specs"]
|
||||
end
|
||||
project_name "Propane"
|
||||
merge_timeout 3600
|
||||
formatter(MyFormatter)
|
||||
end
|
||||
|
||||
RSpec.configure do |config|
|
||||
|
||||
@ -1,102 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(2, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(7, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(7, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(7, start->end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->pA->position.row);
|
||||
assert_eq(0, start->pT1->pA->position.col);
|
||||
assert_eq(1, start->pT1->pA->end_position.row);
|
||||
assert_eq(1, start->pT1->pA->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(1, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(1, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->pA->position.row);
|
||||
assert_eq(0, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(0, start->pT1->pA->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(0, start->end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert(!p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(0, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(0, start->end_position.row);
|
||||
assert_eq(0, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,104 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(2, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(7, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(7, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(7, start.end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.pA.position.row);
|
||||
assert_eq(0, start.pT1.pA.position.col);
|
||||
assert_eq(1, start.pT1.pA.end_position.row);
|
||||
assert_eq(1, start.pT1.pA.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(1, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(1, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.pA.position.row);
|
||||
assert_eq(0, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(0, start.pT1.pA.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(0, start.end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert(!start.pT1.pA.position.valid);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(0, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(0, start.end_position.row);
|
||||
assert_eq(0, start.end_position.col);
|
||||
}
|
||||
@ -1,84 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(0, start->pT1->pToken->position.row);
|
||||
assert_eq(0, start->pT1->pToken->position.col);
|
||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
||||
assert_eq(0, start->pT1->position.row);
|
||||
assert_eq(0, start->pT1->position.col);
|
||||
assert_eq(0, start->pT1->end_position.row);
|
||||
assert_eq(0, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(0, start->pT2->pToken->position.row);
|
||||
assert_eq(1, start->pT2->pToken->position.col);
|
||||
assert_eq(0, start->pT2->pToken->end_position.row);
|
||||
assert_eq(2, start->pT2->pToken->end_position.col);
|
||||
assert_eq(0, start->pT2->position.row);
|
||||
assert_eq(1, start->pT2->position.col);
|
||||
assert_eq(0, start->pT2->end_position.row);
|
||||
assert_eq(2, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(0, start->pT3->pToken->position.row);
|
||||
assert_eq(3, start->pT3->pToken->position.col);
|
||||
assert_eq(0, start->pT3->pToken->end_position.row);
|
||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
||||
assert_eq(0, start->pT3->position.row);
|
||||
assert_eq(3, start->pT3->position.col);
|
||||
assert_eq(0, start->pT3->end_position.row);
|
||||
assert_eq(5, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(0, start->position.row);
|
||||
assert_eq(0, start->position.col);
|
||||
assert_eq(0, start->end_position.row);
|
||||
assert_eq(5, start->end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(2, start->pT1->pToken->position.row);
|
||||
assert_eq(2, start->pT1->pToken->position.col);
|
||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
||||
assert_eq(3, start->pT1->pToken->end_position.col);
|
||||
assert_eq(2, start->pT1->position.row);
|
||||
assert_eq(2, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(3, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(3, start->pT2->pToken->position.row);
|
||||
assert_eq(0, start->pT2->pToken->position.col);
|
||||
assert_eq(4, start->pT2->pToken->end_position.row);
|
||||
assert_eq(1, start->pT2->pToken->end_position.col);
|
||||
assert_eq(3, start->pT2->position.row);
|
||||
assert_eq(0, start->pT2->position.col);
|
||||
assert_eq(4, start->pT2->end_position.row);
|
||||
assert_eq(1, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(6, start->pT3->pToken->position.row);
|
||||
assert_eq(5, start->pT3->pToken->position.col);
|
||||
assert_eq(6, start->pT3->pToken->end_position.row);
|
||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
||||
assert_eq(6, start->pT3->position.row);
|
||||
assert_eq(5, start->pT3->position.col);
|
||||
assert_eq(6, start->pT3->end_position.row);
|
||||
assert_eq(5, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(2, start->position.row);
|
||||
assert_eq(2, start->position.col);
|
||||
assert_eq(6, start->end_position.row);
|
||||
assert_eq(5, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,86 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(0, start.pT1.pToken.position.row);
|
||||
assert_eq(0, start.pT1.pToken.position.col);
|
||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
||||
assert_eq(0, start.pT1.position.row);
|
||||
assert_eq(0, start.pT1.position.col);
|
||||
assert_eq(0, start.pT1.end_position.row);
|
||||
assert_eq(0, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(0, start.pT2.pToken.position.row);
|
||||
assert_eq(1, start.pT2.pToken.position.col);
|
||||
assert_eq(0, start.pT2.pToken.end_position.row);
|
||||
assert_eq(2, start.pT2.pToken.end_position.col);
|
||||
assert_eq(0, start.pT2.position.row);
|
||||
assert_eq(1, start.pT2.position.col);
|
||||
assert_eq(0, start.pT2.end_position.row);
|
||||
assert_eq(2, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(0, start.pT3.pToken.position.row);
|
||||
assert_eq(3, start.pT3.pToken.position.col);
|
||||
assert_eq(0, start.pT3.pToken.end_position.row);
|
||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
||||
assert_eq(0, start.pT3.position.row);
|
||||
assert_eq(3, start.pT3.position.col);
|
||||
assert_eq(0, start.pT3.end_position.row);
|
||||
assert_eq(5, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(0, start.position.row);
|
||||
assert_eq(0, start.position.col);
|
||||
assert_eq(0, start.end_position.row);
|
||||
assert_eq(5, start.end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(2, start.pT1.pToken.position.row);
|
||||
assert_eq(2, start.pT1.pToken.position.col);
|
||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
||||
assert_eq(3, start.pT1.pToken.end_position.col);
|
||||
assert_eq(2, start.pT1.position.row);
|
||||
assert_eq(2, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(3, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(3, start.pT2.pToken.position.row);
|
||||
assert_eq(0, start.pT2.pToken.position.col);
|
||||
assert_eq(4, start.pT2.pToken.end_position.row);
|
||||
assert_eq(1, start.pT2.pToken.end_position.col);
|
||||
assert_eq(3, start.pT2.position.row);
|
||||
assert_eq(0, start.pT2.position.col);
|
||||
assert_eq(4, start.pT2.end_position.row);
|
||||
assert_eq(1, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(6, start.pT3.pToken.position.row);
|
||||
assert_eq(5, start.pT3.pToken.position.col);
|
||||
assert_eq(6, start.pT3.pToken.end_position.row);
|
||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
||||
assert_eq(6, start.pT3.position.row);
|
||||
assert_eq(5, start.pT3.position.col);
|
||||
assert_eq(6, start.pT3.end_position.row);
|
||||
assert_eq(5, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(2, start.position.row);
|
||||
assert_eq(2, start.position.col);
|
||||
assert_eq(6, start.end_position.row);
|
||||
assert_eq(5, start.end_position.col);
|
||||
}
|
||||
@ -5,25 +5,29 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "1 + 2 * 3 + 4";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(11, p_result(&context));
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(11, p_result(context));
|
||||
p_context_delete(context);
|
||||
|
||||
input = "1 * 2 ** 4 * 3";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(48, p_result(&context));
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(48, p_result(context));
|
||||
p_context_delete(context);
|
||||
|
||||
input = "(1 + 2) * 3 + 4";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(13, p_result(&context));
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(13, p_result(context));
|
||||
p_context_delete(context);
|
||||
|
||||
input = "(2 * 2) ** 3 + 4 + 5";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(73, p_result(&context));
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(73, p_result(context));
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10,23 +10,23 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "1 + 2 * 3 + 4";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(11, p_result(&context));
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(11, p_result(context));
|
||||
|
||||
input = "1 * 2 ** 4 * 3";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(48, p_result(&context));
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(48, p_result(context));
|
||||
|
||||
input = "(1 + 2) * 3 + 4";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(13, p_result(&context));
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(13, p_result(context));
|
||||
|
||||
input = "(2 * 2) ** 3 + 4 + 5";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(73, p_result(&context));
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
assert_eq(73, p_result(context));
|
||||
}
|
||||
|
||||
15
spec/test_drop_code_block.c
Normal file
15
spec/test_drop_code_block.c
Normal file
@ -0,0 +1,15 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = " # comment 1\n# comment 2\na\n";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
16
spec/test_drop_code_block.d
Normal file
16
spec/test_drop_code_block.d
Normal file
@ -0,0 +1,16 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = " # comment 1\n# comment 2\na\n";
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
@ -5,38 +5,43 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "a 42";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a\n123\na a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 2);
|
||||
assert(p_position(&context).col == 3);
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context).row == 3);
|
||||
assert(p_position(context).col == 4);
|
||||
assert(p_token(context) == TOKEN_a);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 0);
|
||||
assert(p_position(&context).col == 0);
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context).row == 1);
|
||||
assert(p_position(context).col == 1);
|
||||
assert(p_token(context) == TOKEN_num);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(&context).row == 2);
|
||||
assert(p_position(&context).col == 1);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(context).row == 3);
|
||||
assert(p_position(context).col == 2);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
assert(p_position(&context).row == 5);
|
||||
assert(p_position(&context).col == 4);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_DECODE_ERROR);
|
||||
assert(p_position(context).row == 6);
|
||||
assert(p_position(context).col == 5);
|
||||
|
||||
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
||||
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,31 +9,31 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "a 42";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = "a\n123\na a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(2, 3));
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context) == p_position_t(3, 4));
|
||||
assert(p_token(context) == TOKEN_a);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(0, 0));
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context) == p_position_t(1, 1));
|
||||
assert(p_token(context) == TOKEN_num);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(&context) == p_position_t(2, 1));
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(context) == p_position_t(3, 2));
|
||||
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
assert(p_position(&context) == p_position_t(5, 4));
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_DECODE_ERROR);
|
||||
assert(p_position(context) == p_position_t(6, 5));
|
||||
|
||||
assert(p_token_names[TOKEN_a] == "a");
|
||||
assert(p_token_names[TOKEN_num] == "num");
|
||||
|
||||
@ -6,8 +6,9 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -38,73 +38,75 @@ int main()
|
||||
|
||||
p_token_info_t token_info;
|
||||
char const * input = "5 + 4 * \n677 + 567";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 0u);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 1u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 2u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 2u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 3u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 3u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 4u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 4u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 5u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 5u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 6u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 6u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 7u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 7u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_times);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 2u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 3u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 4u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 4u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 5u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 5u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 6u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 8u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 7u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 9u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 9u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 9u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 10u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 10u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
p_context_delete(context);
|
||||
|
||||
p_context_init(&context, (uint8_t const *)"", 0u);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 0u);
|
||||
assert(token_info.position.col == 0u);
|
||||
assert(token_info.end_position.row == 0u);
|
||||
assert(token_info.end_position.col == 0u);
|
||||
context = p_context_new((uint8_t const *)"", 0u);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 1u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -44,26 +44,26 @@ unittest
|
||||
{
|
||||
p_token_info_t token_info;
|
||||
string input = "5 + 4 * \n677 + 567";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 2), p_position_t(0, 2), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 4), p_position_t(0, 4), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 6), p_position_t(0, 6), 1, TOKEN_times));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 0), p_position_t(1, 2), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 4), p_position_t(1, 4), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 6), p_position_t(1, 8), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 9), p_position_t(1, 9), 0, TOKEN___EOF));
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 1, TOKEN_int));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 3), p_position_t(1, 3), 1, TOKEN_plus));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 5), p_position_t(1, 5), 1, TOKEN_int));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 7), p_position_t(1, 7), 1, TOKEN_times));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 1), p_position_t(2, 3), 3, TOKEN_int));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 5), p_position_t(2, 5), 1, TOKEN_plus));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 7), p_position_t(2, 9), 3, TOKEN_int));
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 10), p_position_t(2, 10), 0, TOKEN___EOF));
|
||||
|
||||
p_context_init(&context, "");
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 0, TOKEN___EOF));
|
||||
context = p_context_new("");
|
||||
assert(p_lex(context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 0, TOKEN___EOF));
|
||||
}
|
||||
|
||||
@ -6,10 +6,11 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "identifier_123";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,8 +9,8 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `identifier_123`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass1");
|
||||
}
|
||||
|
||||
@ -6,15 +6,17 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "abc \"a string\" def";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abc \"abc def\" def";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,13 +9,13 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `abc "a string" def`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass1");
|
||||
|
||||
input = `abc "abc def" def`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass2");
|
||||
}
|
||||
|
||||
22
spec/test_lexer_multiple_modes.c
Normal file
22
spec/test_lexer_multiple_modes.c
Normal file
@ -0,0 +1,22 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abc.def";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abc . abc";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
21
spec/test_lexer_multiple_modes.d
Normal file
21
spec/test_lexer_multiple_modes.d
Normal file
@ -0,0 +1,21 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = `abc.def`;
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass1");
|
||||
|
||||
input = `abc . abc`;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass2");
|
||||
}
|
||||
@ -5,15 +5,17 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 1u);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "fabulous";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 8u);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 8u);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,13 +9,13 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `x`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 1u);
|
||||
|
||||
input = `fabulous`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 8u);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 8u);
|
||||
}
|
||||
|
||||
@ -5,14 +5,16 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_UNEXPECTED_INPUT);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "123";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 123u);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 123u);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,12 +9,12 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `x`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_UNEXPECTED_INPUT);
|
||||
|
||||
input = `123`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 123u);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 123u);
|
||||
}
|
||||
|
||||
@ -5,9 +5,10 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -6,14 +6,16 @@
|
||||
int main()
|
||||
{
|
||||
char const * input1 = "a\n1";
|
||||
myp1_context_t context1;
|
||||
myp1_context_init(&context1, (uint8_t const *)input1, strlen(input1));
|
||||
assert(myp1_parse(&context1) == MYP1_SUCCESS);
|
||||
myp1_context_t * context1;
|
||||
context1 = myp1_context_new((uint8_t const *)input1, strlen(input1));
|
||||
assert(myp1_parse(context1) == MYP1_SUCCESS);
|
||||
myp1_context_delete(context1);
|
||||
|
||||
char const * input2 = "bcb";
|
||||
myp2_context_t context2;
|
||||
myp2_context_init(&context2, (uint8_t const *)input2, strlen(input2));
|
||||
assert(myp2_parse(&context2) == MYP2_SUCCESS);
|
||||
myp2_context_t * context2;
|
||||
context2 = myp2_context_new((uint8_t const *)input2, strlen(input2));
|
||||
assert(myp2_parse(context2) == MYP2_SUCCESS);
|
||||
myp2_context_delete(context2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10,12 +10,12 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input1 = "a\n1";
|
||||
myp1_context_t context1;
|
||||
myp1_context_init(&context1, input1);
|
||||
assert(myp1_parse(&context1) == MYP1_SUCCESS);
|
||||
myp1_context_t * context1;
|
||||
context1 = myp1_context_new(input1);
|
||||
assert(myp1_parse(context1) == MYP1_SUCCESS);
|
||||
|
||||
string input2 = "bcb";
|
||||
myp2_context_t context2;
|
||||
myp2_context_init(&context2, input2);
|
||||
assert(myp2_parse(&context2) == MYP2_SUCCESS);
|
||||
myp2_context_t * context2;
|
||||
context2 = myp2_context_new(input2);
|
||||
assert(myp2_parse(context2) == MYP2_SUCCESS);
|
||||
}
|
||||
|
||||
54
spec/test_named_optional_rule_component_tree.c
Normal file
54
spec/test_named_optional_rule_component_tree.c
Normal file
@ -0,0 +1,54 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start->a == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert_eq(TOKEN_b, start->pToken2->token);
|
||||
assert(start->pR3 == NULL);
|
||||
assert(start->pR == NULL);
|
||||
assert(start->r == NULL);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abcd";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start->a != NULL);
|
||||
assert_eq(TOKEN_a, start->pToken1->token);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR3 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert(start->r != NULL);
|
||||
assert(start->pR == start->pR3);
|
||||
assert(start->pR == start->r);
|
||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "bdc";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start->a == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->r != NULL);
|
||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
51
spec/test_named_optional_rule_component_tree.d
Normal file
51
spec/test_named_optional_rule_component_tree.d
Normal file
@ -0,0 +1,51 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert_eq(TOKEN_b, start.pToken2.token);
|
||||
assert(start.pR3 is null);
|
||||
assert(start.pR is null);
|
||||
assert(start.r is null);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "abcd";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start.pToken1 != null);
|
||||
assert_eq(TOKEN_a, start.pToken1.token);
|
||||
assert(start.pToken2 != null);
|
||||
assert(start.pR3 != null);
|
||||
assert(start.pR != null);
|
||||
assert(start.r != null);
|
||||
assert(start.pR == start.pR3);
|
||||
assert(start.pR == start.r);
|
||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "bdc";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert(start.pR !is null);
|
||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
@ -5,17 +5,20 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,15 +9,15 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -6,20 +6,23 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert_eq(TOKEN_b, start->pToken2->token);
|
||||
assert(start->pR3 == NULL);
|
||||
assert(start->pR == NULL);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, start->pToken1->token);
|
||||
assert(start->pToken2 != NULL);
|
||||
@ -28,15 +31,21 @@ int main()
|
||||
assert(start->pR == start->pR3);
|
||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10,20 +10,21 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert_eq(TOKEN_b, start.pToken2.token);
|
||||
assert(start.pR3 is null);
|
||||
assert(start.pR is null);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start.pToken1 != null);
|
||||
assert_eq(TOKEN_a, start.pToken1.token);
|
||||
assert(start.pToken2 != null);
|
||||
@ -32,12 +33,16 @@ unittest
|
||||
assert(start.pR == start.pR3);
|
||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert(start.pR !is null);
|
||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
@ -5,13 +5,15 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "aba";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "abb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,11 +9,11 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "aba";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = "abb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -5,20 +5,23 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 0);
|
||||
assert(p_position(&context).col == 1);
|
||||
assert(context.token == TOKEN___EOF);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context).row == 1);
|
||||
assert(p_position(context).col == 2);
|
||||
assert(context->token == TOKEN___EOF);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a b";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "bb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,17 +9,17 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(0, 1));
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(context) == p_position_t(1, 2));
|
||||
assert(context.token == TOKEN___EOF);
|
||||
|
||||
input = "a b";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = "bb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -5,9 +5,10 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "ab";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "ab";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -6,51 +6,58 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "{}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_OBJECT);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context)->id == JSON_OBJECT);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "[]";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_ARRAY);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context)->id == JSON_ARRAY);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "-45.6";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == -45.6);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context)->id == JSON_NUMBER);
|
||||
assert(p_result(context)->number == -45.6);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "2E-2";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == 0.02);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context)->id == JSON_NUMBER);
|
||||
assert(p_result(context)->number == 0.02);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "{\"hi\":true}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
JSONValue * o = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
JSONValue * o = p_result(context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(1, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "hi") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_TRUE);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "{\"ff\": false, \"nn\": null}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
o = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
o = p_result(context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(2, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "ff") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_FALSE);
|
||||
assert(strcmp(o->object.entries[1].name, "nn") == 0);
|
||||
assert(o->object.entries[1].value->id == JSON_NULL);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10,45 +10,45 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = ``;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
|
||||
input = `{}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(context));
|
||||
|
||||
input = `[]`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONArray)p_result(&context));
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONArray)p_result(context));
|
||||
|
||||
input = `-45.6`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(&context));
|
||||
assert((cast(JSONNumber)p_result(&context)).value == -45.6);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(context));
|
||||
assert((cast(JSONNumber)p_result(context)).value == -45.6);
|
||||
|
||||
input = `2E-2`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(&context));
|
||||
assert((cast(JSONNumber)p_result(&context)).value == 0.02);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(context));
|
||||
assert((cast(JSONNumber)p_result(context)).value == 0.02);
|
||||
|
||||
input = `{"hi":true}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
JSONObject o = cast(JSONObject)p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(context));
|
||||
JSONObject o = cast(JSONObject)p_result(context);
|
||||
assert(o.value["hi"]);
|
||||
assert(cast(JSONTrue)o.value["hi"]);
|
||||
|
||||
input = `{"ff": false, "nn": null}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
o = cast(JSONObject)p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(context));
|
||||
o = cast(JSONObject)p_result(context);
|
||||
assert(o.value["ff"]);
|
||||
assert(cast(JSONFalse)o.value["ff"]);
|
||||
assert(o.value["nn"]);
|
||||
|
||||
@ -5,20 +5,23 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 1u);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 0u);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 0u);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "aaaaaaaaaaaaaaaa";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 16u);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 16u);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,18 +9,18 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 1u);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 0u);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 0u);
|
||||
|
||||
input = "aaaaaaaaaaaaaaaa";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 16u);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
assert(p_result(context) == 16u);
|
||||
}
|
||||
|
||||
@ -6,15 +6,17 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
p_context_delete(context);
|
||||
|
||||
input = "defabcdef";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,13 +9,13 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass1");
|
||||
|
||||
input = "defabcdef";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
writeln("pass2");
|
||||
}
|
||||
|
||||
@ -5,9 +5,10 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "defghidef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "defghidef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
}
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "hi";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Top * top = p_result(&context);
|
||||
assert(top->pToken != NULL);
|
||||
assert_eq(TOKEN_hi, top->pToken->token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
20
spec/test_start_rule_tree.c
Normal file
20
spec/test_start_rule_tree.c
Normal file
@ -0,0 +1,20 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "hi";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
Top * top = p_result(context);
|
||||
assert(top->pToken != NULL);
|
||||
assert_eq(TOKEN_hi, top->pToken->token);
|
||||
|
||||
p_tree_delete(top);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -10,10 +10,10 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "hi";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Top * top = p_result(&context);
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
Top * top = p_result(context);
|
||||
assert(top.pToken !is null);
|
||||
assert_eq(TOKEN_hi, top.pToken.token);
|
||||
}
|
||||
30
spec/test_starting_rules.c
Normal file
30
spec/test_starting_rules.c
Normal file
@ -0,0 +1,30 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "bbbb";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
int result = p_result(context);
|
||||
assert_eq(8, result);
|
||||
p_context_delete(context);
|
||||
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse_Bs(context) == P_SUCCESS);
|
||||
result = p_result_Bs(context);
|
||||
assert_eq(8, result);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "c";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse_R(context) == P_SUCCESS);
|
||||
result = p_result_R(context);
|
||||
assert_eq(3, result);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
29
spec/test_starting_rules.d
Normal file
29
spec/test_starting_rules.d
Normal file
@ -0,0 +1,29 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "bbbb";
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
int result = p_result(context);
|
||||
assert(result == 8);
|
||||
|
||||
context = p_context_new(input);
|
||||
assert(p_parse_Bs(context) == P_SUCCESS);
|
||||
result = p_result_Bs(context);
|
||||
assert(result == 8);
|
||||
|
||||
input = "c";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse_R(context) == P_SUCCESS);
|
||||
result = p_result_R(context);
|
||||
assert(result == 3);
|
||||
}
|
||||
40
spec/test_starting_rules_tree.c
Normal file
40
spec/test_starting_rules_tree.c
Normal file
@ -0,0 +1,40 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "bbbb";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert_not_null(start->bs);
|
||||
assert_not_null(start->bs->b);
|
||||
assert_not_null(start->bs->bs->b);
|
||||
assert_not_null(start->bs->bs->bs->b);
|
||||
assert_not_null(start->bs->bs->bs->bs->b);
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse_Bs(context) == P_SUCCESS);
|
||||
Bs * bs = p_result_Bs(context);
|
||||
assert_not_null(bs->b);
|
||||
assert_not_null(bs->bs->b);
|
||||
assert_not_null(bs->bs->bs->b);
|
||||
assert_not_null(bs->bs->bs->bs->b);
|
||||
p_tree_delete_Bs(bs);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "c";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse_R(context) == P_SUCCESS);
|
||||
R * r = p_result_R(context);
|
||||
assert_not_null(r->c);
|
||||
p_tree_delete_R(r);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
41
spec/test_starting_rules_tree.d
Normal file
41
spec/test_starting_rules_tree.d
Normal file
@ -0,0 +1,41 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "bbbb";
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start.bs);
|
||||
assert(start.bs.b);
|
||||
assert(start.bs.bs.b);
|
||||
assert(start.bs.bs.bs.b);
|
||||
assert(start.bs.bs.bs.bs.b);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
context = p_context_new(input);
|
||||
assert(p_parse_Bs(context) == P_SUCCESS);
|
||||
Bs * bs = p_result_Bs(context);
|
||||
assert(bs.b);
|
||||
assert(bs.bs.b);
|
||||
assert(bs.bs.bs.b);
|
||||
assert(bs.bs.bs.bs.b);
|
||||
|
||||
p_tree_delete_Bs(bs);
|
||||
|
||||
input = "c";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse_R(context) == P_SUCCESS);
|
||||
R * r = p_result_R(context);
|
||||
assert(r.c);
|
||||
|
||||
p_tree_delete_R(r);
|
||||
}
|
||||
46
spec/test_token_user_fields.c
Normal file
46
spec/test_token_user_fields.c
Normal file
@ -0,0 +1,46 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input =
|
||||
"# c1\n"
|
||||
"# c2\n"
|
||||
"\n"
|
||||
"first\n"
|
||||
"\n \n \n"
|
||||
" # s1\n"
|
||||
" # s2\n"
|
||||
"second\n";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start->pIDs);
|
||||
assert(start->pIDs->id);
|
||||
#ifdef __cplusplus
|
||||
assert(start->pIDs->id->comments == "# c1\n# c2\n");
|
||||
#else
|
||||
assert(start->pIDs->id->comments);
|
||||
assert(strcmp(start->pIDs->id->comments, "# c1\n# c2\n") == 0);
|
||||
#endif
|
||||
assert(start->pIDs->pIDs);
|
||||
assert(start->pIDs->pIDs->id);
|
||||
#ifdef __cplusplus
|
||||
assert(start->pIDs->pIDs->id->comments == "# s1\n# s2\n");
|
||||
#else
|
||||
assert(start->pIDs->pIDs->id->comments);
|
||||
assert(strcmp(start->pIDs->pIDs->id->comments, "# s1\n# s2\n") == 0);
|
||||
#endif
|
||||
|
||||
#ifndef __cplusplus
|
||||
free(context->comments);
|
||||
#endif
|
||||
p_context_delete(context);
|
||||
p_tree_delete(start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
31
spec/test_token_user_fields.d
Normal file
31
spec/test_token_user_fields.d
Normal file
@ -0,0 +1,31 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input =
|
||||
"# c1\n" ~
|
||||
"# c2\n" ~
|
||||
"\n" ~
|
||||
"first\n" ~
|
||||
"\n \n \n" ~
|
||||
" # s1\n" ~
|
||||
" # s2\n" ~
|
||||
"second\n";
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
assert(start.pIDs);
|
||||
assert(start.pIDs.id);
|
||||
assert(start.pIDs.id.comments == "# c1\n# c2\n");
|
||||
assert(start.pIDs.pIDs);
|
||||
assert(start.pIDs.pIDs.id);
|
||||
assert(start.pIDs.pIDs.id.comments == "# s1\n# s2\n");
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
@ -6,10 +6,10 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
Start * start = p_result(context);
|
||||
assert(start->pItems1 != NULL);
|
||||
assert(start->pItems != NULL);
|
||||
Items * items = start->pItems;
|
||||
@ -33,16 +33,22 @@ int main()
|
||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore == NULL);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
start = p_result(context);
|
||||
assert(start->pItems == NULL);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
start = p_result(context);
|
||||
assert(start->pItems != NULL);
|
||||
assert(start->pItems->pItem != NULL);
|
||||
assert(start->pItems->pItem->pDual != NULL);
|
||||
@ -51,5 +57,8 @@ int main()
|
||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -10,10 +10,9 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
Start * start = p_result(context);
|
||||
assert(start.pItems1 !is null);
|
||||
assert(start.pItems !is null);
|
||||
Items * items = start.pItems;
|
||||
@ -37,16 +36,20 @@ unittest
|
||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore is null);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
start = p_result(context);
|
||||
assert(start.pItems is null);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
context = p_context_new(input);
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
start = p_result(context);
|
||||
assert(start.pItems !is null);
|
||||
assert(start.pItems.pItem !is null);
|
||||
assert(start.pItems.pItem.pDual !is null);
|
||||
@ -54,4 +57,6 @@ unittest
|
||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
20
spec/test_tree_delete_token_node_memory.c
Normal file
20
spec/test_tree_delete_token_node_memory.c
Normal file
@ -0,0 +1,20 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "ab";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(context));
|
||||
Start * start = p_result(context);
|
||||
assert(start->a != NULL);
|
||||
assert(*start->a->pvalue == 1);
|
||||
assert(start->b != NULL);
|
||||
assert(*start->b->pvalue == 2);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
}
|
||||
@ -6,14 +6,17 @@
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
|
||||
assert_eq(TOKEN_a, start->first->pToken->token);
|
||||
assert_eq(TOKEN_b, start->second->pToken->token);
|
||||
assert_eq(TOKEN_c, start->third->pToken->token);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -10,12 +10,13 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
|
||||
assert_eq(TOKEN_a, start.first.pToken.token);
|
||||
assert_eq(TOKEN_b, start.second.pToken.token);
|
||||
assert_eq(TOKEN_c, start.third.pToken.token);
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
114
spec/test_tree_invalid_positions.c
Normal file
114
spec/test_tree_invalid_positions.c
Normal file
@ -0,0 +1,114 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\n bb ccc";
|
||||
p_context_t * context;
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
|
||||
assert_eq(2, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(3, start->pT1->pA->position.row);
|
||||
assert_eq(3, start->pT1->pA->position.col);
|
||||
assert_eq(3, start->pT1->pA->end_position.row);
|
||||
assert_eq(8, start->pT1->pA->end_position.col);
|
||||
assert_eq(2, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(3, start->pT1->end_position.row);
|
||||
assert_eq(8, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(2, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(3, start->end_position.row);
|
||||
assert_eq(8, start->end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a\nbb";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(1, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(2, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(2, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(2, start->end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a\nc\nc";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(1, start->pT1->pA->position.col);
|
||||
assert_eq(3, start->pT1->pA->end_position.row);
|
||||
assert_eq(1, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(3, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(3, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
input = "a";
|
||||
context = p_context_new((uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(!p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(1, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(1, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
111
spec/test_tree_invalid_positions.d
Normal file
111
spec/test_tree_invalid_positions.d
Normal file
@ -0,0 +1,111 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\n bb ccc";
|
||||
p_context_t * context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
Start * start = p_result(context);
|
||||
|
||||
assert_eq(2, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(3, start.pT1.pA.position.row);
|
||||
assert_eq(3, start.pT1.pA.position.col);
|
||||
assert_eq(3, start.pT1.pA.end_position.row);
|
||||
assert_eq(8, start.pT1.pA.end_position.col);
|
||||
assert_eq(2, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(3, start.pT1.end_position.row);
|
||||
assert_eq(8, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(2, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(3, start.end_position.row);
|
||||
assert_eq(8, start.end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "a\nbb";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(1, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(2, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(2, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(2, start.end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "a\nc\nc";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(1, start.pT1.pA.position.col);
|
||||
assert_eq(3, start.pT1.pA.end_position.row);
|
||||
assert_eq(1, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(3, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(3, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
|
||||
input = "a";
|
||||
context = p_context_new(input);
|
||||
assert(p_parse(context) == P_SUCCESS);
|
||||
start = p_result(context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(!start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(1, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(1, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
|
||||
p_tree_delete(start);
|
||||
}
|
||||
419
spec/test_tree_node_memory_remains.c
Normal file
419
spec/test_tree_node_memory_remains.c
Normal file
@ -0,0 +1,419 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main(int argc, char * argv[])
|
||||
{
|
||||
const char * input =
|
||||
"# 0\n"
|
||||
"def byte_val() -> byte\n"
|
||||
"{\n"
|
||||
" return 0x42;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 1\n"
|
||||
"def short_val() -> short\n"
|
||||
"{\n"
|
||||
" return 0x4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 2\n"
|
||||
"def int_val() -> int\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 3\n"
|
||||
"def long_val() -> long\n"
|
||||
"{\n"
|
||||
" return 0x4242_4242_4242_4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 4\n"
|
||||
"def ssize_t_val() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 5\n"
|
||||
"def byte_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 6\n"
|
||||
"def byte_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 7\n"
|
||||
"def byte_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 8\n"
|
||||
"def byte_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 9\n"
|
||||
"def short_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 10\n"
|
||||
"def short_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 11\n"
|
||||
"def short_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 12\n"
|
||||
"def short_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 13\n"
|
||||
"def int_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 14\n"
|
||||
"def int_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 15\n"
|
||||
"def int_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 16\n"
|
||||
"def int_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 17\n"
|
||||
"def long_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 18\n"
|
||||
"def long_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 19\n"
|
||||
"def long_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 20\n"
|
||||
"def long_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 21\n"
|
||||
"def ssize_t_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 22\n"
|
||||
"def ssize_t_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 23\n"
|
||||
"def ssize_t_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 24\n"
|
||||
"def ssize_t_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 25\n"
|
||||
"def ubyte_val() -> ubyte\n"
|
||||
"{\n"
|
||||
" return 0x42;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 26\n"
|
||||
"def ushort_val() -> ushort\n"
|
||||
"{\n"
|
||||
" return 0x4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 27\n"
|
||||
"def uint_val() -> uint\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 28\n"
|
||||
"def ulong_val() -> ulong\n"
|
||||
"{\n"
|
||||
" return 0x4242_4242_4242_4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 29\n"
|
||||
"def size_t_val() -> size_t\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 30\n"
|
||||
"def ubyte_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 31\n"
|
||||
"def ubyte_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 32\n"
|
||||
"def ubyte_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 33\n"
|
||||
"def ubyte_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 34\n"
|
||||
"def ushort_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 35\n"
|
||||
"def ushort_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 36\n"
|
||||
"def ushort_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 37\n"
|
||||
"def ushort_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 38\n"
|
||||
"def uint_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 39\n"
|
||||
"def uint_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 40\n"
|
||||
"def uint_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 41\n"
|
||||
"def uint_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 42\n"
|
||||
"def ulong_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 43\n"
|
||||
"def ulong_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 44\n"
|
||||
"def ulong_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 45\n"
|
||||
"def ulong_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 46\n"
|
||||
"def size_t_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 47\n"
|
||||
"def size_t_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 48\n"
|
||||
"def size_t_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 49\n"
|
||||
"def size_t_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 50\n"
|
||||
"def main() -> int\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n";
|
||||
struct
|
||||
{
|
||||
const char * name;
|
||||
p_token_t token;
|
||||
} expected[] = {
|
||||
{"byte_val", TOKEN_byte},
|
||||
{"short_val", TOKEN_short},
|
||||
{"int_val", TOKEN_int},
|
||||
{"long_val", TOKEN_long},
|
||||
{"ssize_t_val", TOKEN_ssize_t},
|
||||
{"byte_to_short", TOKEN_short},
|
||||
{"byte_to_int", TOKEN_int},
|
||||
{"byte_to_long", TOKEN_long},
|
||||
{"byte_to_ssize_t", TOKEN_ssize_t},
|
||||
{"short_to_byte", TOKEN_byte},
|
||||
{"short_to_int", TOKEN_int},
|
||||
{"short_to_long", TOKEN_long},
|
||||
{"short_to_ssize_t", TOKEN_ssize_t},
|
||||
{"int_to_byte", TOKEN_byte},
|
||||
{"int_to_short", TOKEN_short},
|
||||
{"int_to_long", TOKEN_long},
|
||||
{"int_to_ssize_t", TOKEN_ssize_t},
|
||||
{"long_to_byte", TOKEN_byte},
|
||||
{"long_to_short", TOKEN_short},
|
||||
{"long_to_int", TOKEN_int},
|
||||
{"long_to_ssize_t", TOKEN_ssize_t},
|
||||
{"ssize_t_to_byte", TOKEN_byte},
|
||||
{"ssize_t_to_short", TOKEN_short},
|
||||
{"ssize_t_to_int", TOKEN_int},
|
||||
{"ssize_t_to_long", TOKEN_long},
|
||||
{"ubyte_val", TOKEN_ubyte},
|
||||
{"ushort_val", TOKEN_ushort},
|
||||
{"uint_val", TOKEN_uint},
|
||||
{"ulong_val", TOKEN_ulong},
|
||||
{"size_t_val", TOKEN_size_t},
|
||||
{"ubyte_to_ushort", TOKEN_ushort},
|
||||
{"ubyte_to_uint", TOKEN_uint},
|
||||
{"ubyte_to_ulong", TOKEN_ulong},
|
||||
{"ubyte_to_size_t", TOKEN_size_t},
|
||||
{"ushort_to_ubyte", TOKEN_ubyte},
|
||||
{"ushort_to_uint", TOKEN_uint},
|
||||
{"ushort_to_ulong", TOKEN_ulong},
|
||||
{"ushort_to_size_t", TOKEN_size_t},
|
||||
{"uint_to_ubyte", TOKEN_ubyte},
|
||||
{"uint_to_ushort", TOKEN_ushort},
|
||||
{"uint_to_ulong", TOKEN_ulong},
|
||||
{"uint_to_size_t", TOKEN_size_t},
|
||||
{"ulong_to_ubyte", TOKEN_ubyte},
|
||||
{"ulong_to_ushort", TOKEN_ushort},
|
||||
{"ulong_to_uint", TOKEN_uint},
|
||||
{"ulong_to_size_t", TOKEN_size_t},
|
||||
{"size_t_to_ubyte", TOKEN_ubyte},
|
||||
{"size_t_to_ushort", TOKEN_ushort},
|
||||
{"size_t_to_int", TOKEN_int},
|
||||
{"size_t_to_ulong", TOKEN_ulong},
|
||||
{"main", TOKEN_int},
|
||||
};
|
||||
p_context_t * context;
|
||||
context = p_context_new((const uint8_t *)input, strlen(input));
|
||||
size_t result = p_parse(context);
|
||||
assert_eq(P_SUCCESS, result);
|
||||
PModule * pmod = p_result(context);
|
||||
PModuleItems * pmis = pmod->pModuleItems;
|
||||
PFunctionDefinition ** pfds;
|
||||
size_t n_pfds = 0u;
|
||||
while (pmis != NULL)
|
||||
{
|
||||
PModuleItem * pmi = pmis->pModuleItem;
|
||||
if (pmi->pFunctionDefinition != NULL)
|
||||
{
|
||||
n_pfds++;
|
||||
}
|
||||
pmis = pmis->pModuleItems;
|
||||
}
|
||||
pfds = (PFunctionDefinition **)malloc(n_pfds * sizeof(PModuleItems *));
|
||||
pmis = pmod->pModuleItems;
|
||||
size_t pfd_i = n_pfds;
|
||||
while (pmis != NULL)
|
||||
{
|
||||
PModuleItem * pmi = pmis->pModuleItem;
|
||||
PFunctionDefinition * pfd = pmi->pFunctionDefinition;
|
||||
if (pfd != NULL)
|
||||
{
|
||||
pfd_i--;
|
||||
assert(pfd_i < n_pfds);
|
||||
pfds[pfd_i] = pfd;
|
||||
}
|
||||
pmis = pmis->pModuleItems;
|
||||
}
|
||||
assert_eq(51, n_pfds);
|
||||
for (size_t i = 0; i < n_pfds; i++)
|
||||
{
|
||||
if (strncmp(expected[i].name, (const char *)pfds[i]->name->pvalue.s, strlen(expected[i].name)) != 0 ||
|
||||
(expected[i].token != pfds[i]->returntype->pType->pTypeBase->pToken1->token))
|
||||
{
|
||||
fprintf(stderr, "Index %lu: expected %s/%u, got %u\n", i, expected[i].name, expected[i].token, pfds[i]->returntype->pType->pTypeBase->pToken1->token);
|
||||
}
|
||||
}
|
||||
|
||||
free(pfds);
|
||||
p_tree_delete(pmod);
|
||||
p_context_delete(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
409
spec/test_tree_node_memory_remains.d
Normal file
409
spec/test_tree_node_memory_remains.d
Normal file
@ -0,0 +1,409 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "
|
||||
# 0
|
||||
def byte_val() -> byte
|
||||
{
|
||||
return 0x42;
|
||||
}
|
||||
|
||||
# 1
|
||||
def short_val() -> short
|
||||
{
|
||||
return 0x4242;
|
||||
}
|
||||
|
||||
# 2
|
||||
def int_val() -> int
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 3
|
||||
def long_val() -> long
|
||||
{
|
||||
return 0x4242_4242_4242_4242;
|
||||
}
|
||||
|
||||
# 4
|
||||
def ssize_t_val() -> ssize_t
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 5
|
||||
def byte_to_short() -> short
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 6
|
||||
def byte_to_int() -> int
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 7
|
||||
def byte_to_long() -> long
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 8
|
||||
def byte_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 9
|
||||
def short_to_byte() -> byte
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 10
|
||||
def short_to_int() -> int
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 11
|
||||
def short_to_long() -> long
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 12
|
||||
def short_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 13
|
||||
def int_to_byte() -> byte
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 14
|
||||
def int_to_short() -> short
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 15
|
||||
def int_to_long() -> long
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 16
|
||||
def int_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 17
|
||||
def long_to_byte() -> byte
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 18
|
||||
def long_to_short() -> short
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 19
|
||||
def long_to_int() -> int
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 20
|
||||
def long_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 21
|
||||
def ssize_t_to_byte() -> byte
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 22
|
||||
def ssize_t_to_short() -> short
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 23
|
||||
def ssize_t_to_int() -> int
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 24
|
||||
def ssize_t_to_long() -> long
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 25
|
||||
def ubyte_val() -> ubyte
|
||||
{
|
||||
return 0x42;
|
||||
}
|
||||
|
||||
# 26
|
||||
def ushort_val() -> ushort
|
||||
{
|
||||
return 0x4242;
|
||||
}
|
||||
|
||||
# 27
|
||||
def uint_val() -> uint
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 28
|
||||
def ulong_val() -> ulong
|
||||
{
|
||||
return 0x4242_4242_4242_4242;
|
||||
}
|
||||
|
||||
# 29
|
||||
def size_t_val() -> size_t
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 30
|
||||
def ubyte_to_ushort() -> ushort
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 31
|
||||
def ubyte_to_uint() -> uint
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 32
|
||||
def ubyte_to_ulong() -> ulong
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 33
|
||||
def ubyte_to_size_t() -> size_t
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 34
|
||||
def ushort_to_ubyte() -> ubyte
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 35
|
||||
def ushort_to_uint() -> uint
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 36
|
||||
def ushort_to_ulong() -> ulong
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 37
|
||||
def ushort_to_size_t() -> size_t
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 38
|
||||
def uint_to_ubyte() -> ubyte
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 39
|
||||
def uint_to_ushort() -> ushort
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 40
|
||||
def uint_to_ulong() -> ulong
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 41
|
||||
def uint_to_size_t() -> size_t
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 42
|
||||
def ulong_to_ubyte() -> ubyte
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 43
|
||||
def ulong_to_ushort() -> ushort
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 44
|
||||
def ulong_to_uint() -> uint
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 45
|
||||
def ulong_to_size_t() -> size_t
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 46
|
||||
def size_t_to_ubyte() -> ubyte
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 47
|
||||
def size_t_to_ushort() -> ushort
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 48
|
||||
def size_t_to_int() -> int
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 49
|
||||
def size_t_to_ulong() -> ulong
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 50
|
||||
def main() -> int
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
";
|
||||
struct Expected
|
||||
{
|
||||
string name;
|
||||
p_token_t token;
|
||||
}
|
||||
Expected[] expected = [
|
||||
Expected("byte_val", TOKEN_byte),
|
||||
Expected("short_val", TOKEN_short),
|
||||
Expected("int_val", TOKEN_int),
|
||||
Expected("long_val", TOKEN_long),
|
||||
Expected("ssize_t_val", TOKEN_ssize_t),
|
||||
Expected("byte_to_short", TOKEN_short),
|
||||
Expected("byte_to_int", TOKEN_int),
|
||||
Expected("byte_to_long", TOKEN_long),
|
||||
Expected("byte_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("short_to_byte", TOKEN_byte),
|
||||
Expected("short_to_int", TOKEN_int),
|
||||
Expected("short_to_long", TOKEN_long),
|
||||
Expected("short_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("int_to_byte", TOKEN_byte),
|
||||
Expected("int_to_short", TOKEN_short),
|
||||
Expected("int_to_long", TOKEN_long),
|
||||
Expected("int_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("long_to_byte", TOKEN_byte),
|
||||
Expected("long_to_short", TOKEN_short),
|
||||
Expected("long_to_int", TOKEN_int),
|
||||
Expected("long_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("ssize_t_to_byte", TOKEN_byte),
|
||||
Expected("ssize_t_to_short", TOKEN_short),
|
||||
Expected("ssize_t_to_int", TOKEN_int),
|
||||
Expected("ssize_t_to_long", TOKEN_long),
|
||||
Expected("ubyte_val", TOKEN_ubyte),
|
||||
Expected("ushort_val", TOKEN_ushort),
|
||||
Expected("uint_val", TOKEN_uint),
|
||||
Expected("ulong_val", TOKEN_ulong),
|
||||
Expected("size_t_val", TOKEN_size_t),
|
||||
Expected("ubyte_to_ushort", TOKEN_ushort),
|
||||
Expected("ubyte_to_uint", TOKEN_uint),
|
||||
Expected("ubyte_to_ulong", TOKEN_ulong),
|
||||
Expected("ubyte_to_size_t", TOKEN_size_t),
|
||||
Expected("ushort_to_ubyte", TOKEN_ubyte),
|
||||
Expected("ushort_to_uint", TOKEN_uint),
|
||||
Expected("ushort_to_ulong", TOKEN_ulong),
|
||||
Expected("ushort_to_size_t", TOKEN_size_t),
|
||||
Expected("uint_to_ubyte", TOKEN_ubyte),
|
||||
Expected("uint_to_ushort", TOKEN_ushort),
|
||||
Expected("uint_to_ulong", TOKEN_ulong),
|
||||
Expected("uint_to_size_t", TOKEN_size_t),
|
||||
Expected("ulong_to_ubyte", TOKEN_ubyte),
|
||||
Expected("ulong_to_ushort", TOKEN_ushort),
|
||||
Expected("ulong_to_uint", TOKEN_uint),
|
||||
Expected("ulong_to_size_t", TOKEN_size_t),
|
||||
Expected("size_t_to_ubyte", TOKEN_ubyte),
|
||||
Expected("size_t_to_ushort", TOKEN_ushort),
|
||||
Expected("size_t_to_int", TOKEN_int),
|
||||
Expected("size_t_to_ulong", TOKEN_ulong),
|
||||
Expected("main", TOKEN_int),
|
||||
];
|
||||
p_context_t * context;
|
||||
context = p_context_new(input);
|
||||
size_t result = p_parse(context);
|
||||
assert_eq(P_SUCCESS, result);
|
||||
PModule * pmod = p_result(context);
|
||||
PModuleItems * pmis = pmod.pModuleItems;
|
||||
PFunctionDefinition *[] pfds;
|
||||
while (pmis !is null)
|
||||
{
|
||||
PModuleItem * pmi = pmis.pModuleItem;
|
||||
if (pmi is null)
|
||||
{
|
||||
stderr.writeln("pmi is null!!!?");
|
||||
assert(0);
|
||||
}
|
||||
PFunctionDefinition * pfd = pmi.pFunctionDefinition;
|
||||
if (pfd !is null)
|
||||
{
|
||||
pfds = [pfd] ~ pfds;
|
||||
}
|
||||
pmis = pmis.pModuleItems;
|
||||
}
|
||||
assert_eq(51, pfds.length);
|
||||
for (size_t i = 0; i < pfds.length; i++)
|
||||
{
|
||||
if ((expected[i].name != pfds[i].name.pvalue.s) ||
|
||||
(expected[i].token != pfds[i].returntype.pType.pTypeBase.pToken1.token))
|
||||
{
|
||||
stderr.writeln("Index ", i, ": expected ", expected[i].name, "/", expected[i].token, ", got ", pfds[i].name.pvalue.s, "/", pfds[i].returntype.pType.pTypeBase.pToken1.token);
|
||||
}
|
||||
}
|
||||
p_tree_delete(pmod);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user