Compare commits
42 Commits
c24f323ff0
...
8a393f554a
| Author | SHA1 | Date | |
|---|---|---|---|
| 8a393f554a | |||
| 66f95cb6d8 | |||
| dbe0bf8ad0 | |||
| 75fb627602 | |||
| 7ccb4c8730 | |||
| 962b7125ec | |||
| 2df27b04fe | |||
| 17f1454a4f | |||
| b371f4b404 | |||
| 91f476187b | |||
| 54a0629e60 | |||
| 69aa3097c3 | |||
| 5486e5f138 | |||
| 5b243507cf | |||
| 25d6e3bc34 | |||
| 035bb2fc60 | |||
| 125c149750 | |||
| 9ef80e61d4 | |||
| 5b94b03b04 | |||
| 9d686989ec | |||
| 87d892d0a3 | |||
| 4ec57fa48d | |||
| 8b38ea4261 | |||
| 92da10e483 | |||
| 839174a635 | |||
| 659de44c31 | |||
| 207201d589 | |||
| 89bc52fd80 | |||
| eb9d9026fc | |||
| 54bb3307cd | |||
| 5ebcbb2d6d | |||
| 61ebbb4f19 | |||
| 1b4ca59158 | |||
| b02c9205c0 | |||
| 7344554b5f | |||
| 77571a3449 | |||
| e098b7e445 | |||
| 3ea344a520 | |||
| 530878a796 | |||
| 1d468b6d3c | |||
| 98e10d3d14 | |||
| 36c74e439e |
38
.github/workflows/run-tests.yml
vendored
Normal file
38
.github/workflows/run-tests.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
name: Run Propane Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest]
|
||||||
|
ruby-version: ['3.4']
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Install dependencies (Linux)
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y gcc gdc ldc
|
||||||
|
|
||||||
|
- name: Install dependencies (macOS)
|
||||||
|
if: runner.os == 'macOS'
|
||||||
|
run: brew install gcc ldc
|
||||||
|
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Ruby
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: ${{ matrix.ruby-version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: bundle install
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: rake all
|
||||||
60
CHANGELOG.md
60
CHANGELOG.md
@ -1,3 +1,63 @@
|
|||||||
|
## v2.3.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
- Add \D, \S, \w, \W special character classes
|
||||||
|
|
||||||
|
### Improvements
|
||||||
|
|
||||||
|
- Include line numbers for pattern errors
|
||||||
|
- Improve performance in a few places
|
||||||
|
- Parallelize parser table generation on Linux hosts
|
||||||
|
- Add github workflow to run unit tests
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Fix a couple clang warnings for C backend
|
||||||
|
- Fix C backend not fully initializing pvalues when multiple ptypes are used with different sizes.
|
||||||
|
- Fix some user guide examples
|
||||||
|
|
||||||
|
## v2.2.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Fix GC issue for D backend when AST is enabled (#36)
|
||||||
|
|
||||||
|
## v2.2.0
|
||||||
|
|
||||||
|
### Improvements
|
||||||
|
|
||||||
|
- Allow multiple lexer modes to be specified for a lexer pattern (#35)
|
||||||
|
- Document p_decode_code_point() API function (#34)
|
||||||
|
|
||||||
|
## v2.1.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Field aliases for AST node fields could alias incorrect field when multiple rule alternatives present for one rule set (#33)
|
||||||
|
|
||||||
|
## v2.1.0
|
||||||
|
|
||||||
|
### Improvements
|
||||||
|
|
||||||
|
- Report rule name and line number for conflicting AST node field positions errors (#32)
|
||||||
|
|
||||||
|
## v2.0.0
|
||||||
|
|
||||||
|
### Improvements
|
||||||
|
|
||||||
|
- Log conflicting rules on reduce/reduce conflict (#31)
|
||||||
|
- Use 1-based row and column values for position values (#30)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Fix named optional rules (#29)
|
||||||
|
|
||||||
|
### Upgrading
|
||||||
|
|
||||||
|
- Adjust all uses of p_position_t row and col values to expect 1-based instead
|
||||||
|
of 0-based values.
|
||||||
|
|
||||||
## v1.5.1
|
## v1.5.1
|
||||||
|
|
||||||
### Improvements
|
### Improvements
|
||||||
|
|||||||
1
Gemfile
1
Gemfile
@ -1,5 +1,6 @@
|
|||||||
source "https://rubygems.org"
|
source "https://rubygems.org"
|
||||||
|
|
||||||
|
gem "base64"
|
||||||
gem "rake"
|
gem "rake"
|
||||||
gem "rspec"
|
gem "rspec"
|
||||||
gem "rdoc"
|
gem "rdoc"
|
||||||
|
|||||||
44
Gemfile.lock
44
Gemfile.lock
@ -1,40 +1,46 @@
|
|||||||
GEM
|
GEM
|
||||||
remote: https://rubygems.org/
|
remote: https://rubygems.org/
|
||||||
specs:
|
specs:
|
||||||
diff-lcs (1.5.0)
|
base64 (0.3.0)
|
||||||
docile (1.4.0)
|
date (3.4.1)
|
||||||
psych (5.1.0)
|
diff-lcs (1.6.2)
|
||||||
|
docile (1.4.1)
|
||||||
|
erb (5.0.2)
|
||||||
|
psych (5.2.6)
|
||||||
|
date
|
||||||
stringio
|
stringio
|
||||||
rake (13.0.6)
|
rake (13.3.0)
|
||||||
rdoc (6.5.0)
|
rdoc (6.14.2)
|
||||||
|
erb
|
||||||
psych (>= 4.0.0)
|
psych (>= 4.0.0)
|
||||||
redcarpet (3.6.0)
|
redcarpet (3.6.1)
|
||||||
rspec (3.12.0)
|
rspec (3.13.1)
|
||||||
rspec-core (~> 3.12.0)
|
rspec-core (~> 3.13.0)
|
||||||
rspec-expectations (~> 3.12.0)
|
rspec-expectations (~> 3.13.0)
|
||||||
rspec-mocks (~> 3.12.0)
|
rspec-mocks (~> 3.13.0)
|
||||||
rspec-core (3.12.2)
|
rspec-core (3.13.5)
|
||||||
rspec-support (~> 3.12.0)
|
rspec-support (~> 3.13.0)
|
||||||
rspec-expectations (3.12.3)
|
rspec-expectations (3.13.5)
|
||||||
diff-lcs (>= 1.2.0, < 2.0)
|
diff-lcs (>= 1.2.0, < 2.0)
|
||||||
rspec-support (~> 3.12.0)
|
rspec-support (~> 3.13.0)
|
||||||
rspec-mocks (3.12.6)
|
rspec-mocks (3.13.5)
|
||||||
diff-lcs (>= 1.2.0, < 2.0)
|
diff-lcs (>= 1.2.0, < 2.0)
|
||||||
rspec-support (~> 3.12.0)
|
rspec-support (~> 3.13.0)
|
||||||
rspec-support (3.12.1)
|
rspec-support (3.13.4)
|
||||||
simplecov (0.22.0)
|
simplecov (0.22.0)
|
||||||
docile (~> 1.1)
|
docile (~> 1.1)
|
||||||
simplecov-html (~> 0.11)
|
simplecov-html (~> 0.11)
|
||||||
simplecov_json_formatter (~> 0.1)
|
simplecov_json_formatter (~> 0.1)
|
||||||
simplecov-html (0.12.3)
|
simplecov-html (0.13.2)
|
||||||
simplecov_json_formatter (0.1.4)
|
simplecov_json_formatter (0.1.4)
|
||||||
stringio (3.0.7)
|
stringio (3.1.7)
|
||||||
syntax (1.2.2)
|
syntax (1.2.2)
|
||||||
|
|
||||||
PLATFORMS
|
PLATFORMS
|
||||||
ruby
|
ruby
|
||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
|
base64
|
||||||
rake
|
rake
|
||||||
rdoc
|
rdoc
|
||||||
redcarpet
|
redcarpet
|
||||||
|
|||||||
@ -55,11 +55,14 @@ const char * <%= @grammar.prefix %>token_names[] = {
|
|||||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length)
|
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length)
|
||||||
{
|
{
|
||||||
/* New default-initialized context structure. */
|
/* New default-initialized context structure. */
|
||||||
<%= @grammar.prefix %>context_t newcontext = {0};
|
<%= @grammar.prefix %>context_t newcontext;
|
||||||
|
memset(&newcontext, 0, sizeof(newcontext));
|
||||||
|
|
||||||
/* Lexer initialization. */
|
/* Lexer initialization. */
|
||||||
newcontext.input = input;
|
newcontext.input = input;
|
||||||
newcontext.input_length = input_length;
|
newcontext.input_length = input_length;
|
||||||
|
newcontext.text_position.row = 1u;
|
||||||
|
newcontext.text_position.col = 1u;
|
||||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||||
|
|
||||||
/* Copy to the user's context structure. */
|
/* Copy to the user's context structure. */
|
||||||
@ -342,8 +345,10 @@ static lexer_state_id_t check_lexer_transition(uint32_t current_state, uint32_t
|
|||||||
static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||||
lexer_match_info_t * out_match_info, size_t * out_unexpected_input_length)
|
lexer_match_info_t * out_match_info, size_t * out_unexpected_input_length)
|
||||||
{
|
{
|
||||||
lexer_match_info_t longest_match = {0};
|
lexer_match_info_t longest_match;
|
||||||
lexer_match_info_t attempt_match = {0};
|
memset(&longest_match, 0, sizeof(longest_match));
|
||||||
|
lexer_match_info_t attempt_match;
|
||||||
|
memset(&attempt_match, 0, sizeof(attempt_match));
|
||||||
*out_match_info = longest_match;
|
*out_match_info = longest_match;
|
||||||
uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
|
uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
|
||||||
for (;;)
|
for (;;)
|
||||||
@ -357,6 +362,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
switch (result)
|
switch (result)
|
||||||
{
|
{
|
||||||
case P_SUCCESS:
|
case P_SUCCESS:
|
||||||
|
{
|
||||||
lexer_state_id_t transition_state = check_lexer_transition(current_state, code_point);
|
lexer_state_id_t transition_state = check_lexer_transition(current_state, code_point);
|
||||||
if (transition_state != INVALID_LEXER_STATE_ID)
|
if (transition_state != INVALID_LEXER_STATE_ID)
|
||||||
{
|
{
|
||||||
@ -365,7 +371,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
if (code_point == '\n')
|
if (code_point == '\n')
|
||||||
{
|
{
|
||||||
attempt_match.delta_position.row++;
|
attempt_match.delta_position.row++;
|
||||||
attempt_match.delta_position.col = 0u;
|
attempt_match.delta_position.col = 1u;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -388,6 +394,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
*out_unexpected_input_length = attempt_match.length + code_point_length;
|
*out_unexpected_input_length = attempt_match.length + code_point_length;
|
||||||
return P_UNEXPECTED_INPUT;
|
return P_UNEXPECTED_INPUT;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case P_EOF:
|
case P_EOF:
|
||||||
@ -445,7 +452,8 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
*/
|
*/
|
||||||
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||||
{
|
{
|
||||||
<%= @grammar.prefix %>token_info_t token_info = {0};
|
<%= @grammar.prefix %>token_info_t token_info;
|
||||||
|
memset(&token_info, 0, sizeof(token_info));
|
||||||
token_info.position = context->text_position;
|
token_info.position = context->text_position;
|
||||||
token_info.token = INVALID_TOKEN_ID;
|
token_info.token = INVALID_TOKEN_ID;
|
||||||
lexer_match_info_t match_info;
|
lexer_match_info_t match_info;
|
||||||
@ -454,6 +462,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
|||||||
switch (result)
|
switch (result)
|
||||||
{
|
{
|
||||||
case P_SUCCESS:
|
case P_SUCCESS:
|
||||||
|
{
|
||||||
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state->token;
|
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state->token;
|
||||||
if (match_info.accepting_state->code_id != INVALID_USER_CODE_ID)
|
if (match_info.accepting_state->code_id != INVALID_USER_CODE_ID)
|
||||||
{
|
{
|
||||||
@ -505,6 +514,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
|||||||
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
||||||
}
|
}
|
||||||
*out_token_info = token_info;
|
*out_token_info = token_info;
|
||||||
|
}
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
|
|
||||||
case P_EOF:
|
case P_EOF:
|
||||||
@ -567,7 +577,7 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
|
|
||||||
/** Invalid position value. */
|
/** Invalid position value. */
|
||||||
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0xFFFFFFFFu, 0xFFFFFFFFu}
|
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0u, 0u}
|
||||||
|
|
||||||
/** Reduce ID type. */
|
/** Reduce ID type. */
|
||||||
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
|
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
|
||||||
@ -710,17 +720,22 @@ const uint16_t r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_
|
|||||||
/** Parser reduce table. */
|
/** Parser reduce table. */
|
||||||
static const reduce_t parser_reduce_table[] = {
|
static const reduce_t parser_reduce_table[] = {
|
||||||
<% @parser.reduce_table.each do |reduce| %>
|
<% @parser.reduce_table.each do |reduce| %>
|
||||||
{<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
|
{
|
||||||
|
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
||||||
|
<%= reduce[:rule_id] %>u, /* Rule ID */
|
||||||
|
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
||||||
<% if @grammar.ast %>
|
<% if @grammar.ast %>
|
||||||
|
<%= reduce[:n_states] %>u, /* Number of states */
|
||||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
||||||
, NULL
|
NULL, /* No rule set node field index map (flat map) */
|
||||||
<% else %>
|
<% else %>
|
||||||
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
|
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
||||||
<% end %>
|
<% end %>
|
||||||
, <%= reduce[:rule].rule_set.ast_fields.size %>
|
<%= reduce[:rule].rule_set.ast_fields.size %>, /* Number of AST fields */
|
||||||
, <%= reduce[:propagate_optional_target] %>
|
<%= reduce[:propagate_optional_target] %>}, /* Propagate optional target? */
|
||||||
|
<% else %>
|
||||||
|
<%= reduce[:n_states] %>u},
|
||||||
<% end %>
|
<% end %>
|
||||||
},
|
|
||||||
<% end %>
|
<% end %>
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -992,7 +1007,8 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
state_values_stack_index(&statevalues, -1)->ast_node = reduced_parser_node;
|
state_values_stack_index(&statevalues, -1)->ast_node = reduced_parser_node;
|
||||||
<% else %>
|
<% else %>
|
||||||
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
|
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
|
||||||
<%= @grammar.prefix %>value_t new_parse_result = {0};
|
<%= @grammar.prefix %>value_t new_parse_result;
|
||||||
|
memset(&new_parse_result, 0, sizeof(new_parse_result));
|
||||||
reduced_parser_value = new_parse_result;
|
reduced_parser_value = new_parse_result;
|
||||||
<% end %>
|
<% end %>
|
||||||
reduced_rule_set = INVALID_ID;
|
reduced_rule_set = INVALID_ID;
|
||||||
@ -1054,7 +1070,8 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
reduced_parser_node = NULL;
|
reduced_parser_node = NULL;
|
||||||
}
|
}
|
||||||
<% else %>
|
<% else %>
|
||||||
<%= @grammar.prefix %>value_t reduced_parser_value2 = {0};
|
<%= @grammar.prefix %>value_t reduced_parser_value2;
|
||||||
|
memset(&reduced_parser_value2, 0, sizeof(reduced_parser_value2));
|
||||||
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
||||||
{
|
{
|
||||||
return P_USER_TERMINATED;
|
return P_USER_TERMINATED;
|
||||||
|
|||||||
@ -8,6 +8,7 @@
|
|||||||
module <%= @grammar.modulename %>;
|
module <%= @grammar.modulename %>;
|
||||||
<% end %>
|
<% end %>
|
||||||
|
|
||||||
|
import core.memory;
|
||||||
import core.stdc.stdlib : malloc;
|
import core.stdc.stdlib : malloc;
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
@ -65,12 +66,12 @@ public struct <%= @grammar.prefix %>position_t
|
|||||||
uint col;
|
uint col;
|
||||||
|
|
||||||
/** Invalid position value. */
|
/** Invalid position value. */
|
||||||
enum INVALID = <%= @grammar.prefix %>position_t(0xFFFF_FFFF, 0xFFFF_FFFF);
|
enum INVALID = <%= @grammar.prefix %>position_t(0u, 0u);
|
||||||
|
|
||||||
/** Return whether the position is valid. */
|
/** Return whether the position is valid. */
|
||||||
public @property bool valid()
|
public @property bool valid()
|
||||||
{
|
{
|
||||||
return row != 0xFFFF_FFFFu;
|
return row != 0u;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,6 +236,8 @@ public void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t *
|
|||||||
|
|
||||||
/* Lexer initialization. */
|
/* Lexer initialization. */
|
||||||
newcontext.input = input;
|
newcontext.input = input;
|
||||||
|
newcontext.text_position.row = 1u;
|
||||||
|
newcontext.text_position.col = 1u;
|
||||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||||
|
|
||||||
/* Copy to the user's context structure. */
|
/* Copy to the user's context structure. */
|
||||||
@ -534,7 +537,7 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
if (code_point == '\n')
|
if (code_point == '\n')
|
||||||
{
|
{
|
||||||
attempt_match.delta_position.row++;
|
attempt_match.delta_position.row++;
|
||||||
attempt_match.delta_position.col = 0u;
|
attempt_match.delta_position.col = 1u;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -873,17 +876,22 @@ immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name
|
|||||||
/** Parser reduce table. */
|
/** Parser reduce table. */
|
||||||
private immutable reduce_t[] parser_reduce_table = [
|
private immutable reduce_t[] parser_reduce_table = [
|
||||||
<% @parser.reduce_table.each do |reduce| %>
|
<% @parser.reduce_table.each do |reduce| %>
|
||||||
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
|
reduce_t(
|
||||||
|
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
||||||
|
<%= reduce[:rule_id] %>u, /* Rule ID */
|
||||||
|
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
||||||
<% if @grammar.ast %>
|
<% if @grammar.ast %>
|
||||||
|
<%= reduce[:n_states] %>u, /* Number of states */
|
||||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
||||||
, null
|
null, /* No rule set node field index map (flat map) */
|
||||||
<% else %>
|
<% else %>
|
||||||
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
|
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
||||||
<% end %>
|
<% end %>
|
||||||
, <%= reduce[:rule].rule_set.ast_fields.size %>
|
<%= reduce[:rule].rule_set.ast_fields.size %>, /* Number of AST fields */
|
||||||
, <%= reduce[:propagate_optional_target] %>
|
<%= reduce[:propagate_optional_target] %>), /* Propagate optional target? */
|
||||||
|
<% else %>
|
||||||
|
<%= reduce[:n_states] %>u), /* Number of states */
|
||||||
<% end %>
|
<% end %>
|
||||||
),
|
|
||||||
<% end %>
|
<% end %>
|
||||||
];
|
];
|
||||||
|
|
||||||
@ -1072,7 +1080,9 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
else if (parser_reduce_table[reduce_index].n_states > 0)
|
||||||
{
|
{
|
||||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
||||||
ASTNode * node = cast(ASTNode *)malloc(ASTNode.sizeof + n_fields * (void *).sizeof);
|
size_t node_size = ASTNode.sizeof + n_fields * (void *).sizeof;
|
||||||
|
ASTNode * node = cast(ASTNode *)malloc(node_size);
|
||||||
|
GC.addRange(node, node_size);
|
||||||
node.position = <%= @grammar.prefix %>position_t.INVALID;
|
node.position = <%= @grammar.prefix %>position_t.INVALID;
|
||||||
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
|
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
|
||||||
foreach (i; 0..n_fields)
|
foreach (i; 0..n_fields)
|
||||||
|
|||||||
@ -53,7 +53,7 @@ typedef struct
|
|||||||
} <%= @grammar.prefix %>position_t;
|
} <%= @grammar.prefix %>position_t;
|
||||||
|
|
||||||
/** Return whether the position is valid. */
|
/** Return whether the position is valid. */
|
||||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0xFFFFFFFFu)
|
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0u)
|
||||||
|
|
||||||
/** User header code blocks. */
|
/** User header code blocks. */
|
||||||
<%= @grammar.code_blocks.fetch("header", "") %>
|
<%= @grammar.code_blocks.fetch("header", "") %>
|
||||||
|
|||||||
@ -67,10 +67,10 @@ import std.math;
|
|||||||
ptype ulong;
|
ptype ulong;
|
||||||
|
|
||||||
# A few basic arithmetic operators.
|
# A few basic arithmetic operators.
|
||||||
token plus /\\+/;
|
token plus /\+/;
|
||||||
token times /\\*/;
|
token times /\*/;
|
||||||
token power /\\*\\*/;
|
token power /\*\*/;
|
||||||
token integer /\\d+/ <<
|
token integer /\d+/ <<
|
||||||
ulong v;
|
ulong v;
|
||||||
foreach (c; match)
|
foreach (c; match)
|
||||||
{
|
{
|
||||||
@ -79,10 +79,10 @@ token integer /\\d+/ <<
|
|||||||
}
|
}
|
||||||
$$ = v;
|
$$ = v;
|
||||||
>>
|
>>
|
||||||
token lparen /\\(/;
|
token lparen /\(/;
|
||||||
token rparen /\\)/;
|
token rparen /\)/;
|
||||||
# Drop whitespace.
|
# Drop whitespace.
|
||||||
drop /\\s+/;
|
drop /\s+/;
|
||||||
|
|
||||||
Start -> E1 << $$ = $1; >>
|
Start -> E1 << $$ = $1; >>
|
||||||
E1 -> E2 << $$ = $1; >>
|
E1 -> E2 << $$ = $1; >>
|
||||||
@ -155,7 +155,7 @@ Example:
|
|||||||
```
|
```
|
||||||
ptype ulong;
|
ptype ulong;
|
||||||
|
|
||||||
token integer /\\d+/ <<
|
token integer /\d+/ <<
|
||||||
ulong v;
|
ulong v;
|
||||||
foreach (c; match)
|
foreach (c; match)
|
||||||
{
|
{
|
||||||
@ -228,9 +228,9 @@ token two /2/;
|
|||||||
token comma /,/ <<
|
token comma /,/ <<
|
||||||
$$ = 42;
|
$$ = 42;
|
||||||
>>
|
>>
|
||||||
token lparen /\\(/;
|
token lparen /\(/;
|
||||||
token rparen /\\)/;
|
token rparen /\)/;
|
||||||
drop /\\s+/;
|
drop /\s+/;
|
||||||
|
|
||||||
Start -> Items;
|
Start -> Items;
|
||||||
|
|
||||||
@ -432,10 +432,10 @@ A regular expression begins and ends with a `/` character.
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
```
|
```
|
||||||
/#.*$/
|
/#.*/
|
||||||
```
|
```
|
||||||
|
|
||||||
Regular expressions can include many special characters:
|
Regular expressions can include many special characters/sequences:
|
||||||
|
|
||||||
* The `.` character matches any input character other than a newline.
|
* The `.` character matches any input character other than a newline.
|
||||||
* The `*` character matches any number of the previous regex element.
|
* The `*` character matches any number of the previous regex element.
|
||||||
@ -447,14 +447,17 @@ Regular expressions can include many special characters:
|
|||||||
* The `\` character escapes the following character and changes its meaning:
|
* The `\` character escapes the following character and changes its meaning:
|
||||||
* The `\a` sequence matches an ASCII bell character (0x07).
|
* The `\a` sequence matches an ASCII bell character (0x07).
|
||||||
* The `\b` sequence matches an ASCII backspace character (0x08).
|
* The `\b` sequence matches an ASCII backspace character (0x08).
|
||||||
* The `\d` sequence matches any character `0` through `9`.
|
* The `\d` sequence is shorthand for the `[0-9]` character class.
|
||||||
|
* The `\D` sequence matches every code point not matched by `\d`.
|
||||||
* The `\f` sequence matches an ASCII form feed character (0x0C).
|
* The `\f` sequence matches an ASCII form feed character (0x0C).
|
||||||
* The `\n` sequence matches an ASCII new line character (0x0A).
|
* The `\n` sequence matches an ASCII new line character (0x0A).
|
||||||
* The `\r` sequence matches an ASCII carriage return character (0x0D).
|
* The `\r` sequence matches an ASCII carriage return character (0x0D).
|
||||||
* The `\s` sequence matches a space, horizontal tab `\t`, carriage return
|
* The `\s` sequence is shorthand for the `[ \t\r\n\f\v]` character class.
|
||||||
`\r`, a form feed `\f`, or a vertical tab `\v` character.
|
* The `\S` sequence matches every code point not matched by `\s`.
|
||||||
* The `\t` sequence matches an ASCII tab character (0x09).
|
* The `\t` sequence matches an ASCII tab character (0x09).
|
||||||
* The `\v` sequence matches an ASCII vertical tab character (0x0B).
|
* The `\v` sequence matches an ASCII vertical tab character (0x0B).
|
||||||
|
* The `\w` sequence is shorthand for the `[a-zA-Z0-9_]` character class.
|
||||||
|
* The `\W` sequence matches every code point not matched by `\w`.
|
||||||
* Any other character matches itself.
|
* Any other character matches itself.
|
||||||
* The `|` character creates an alternate match.
|
* The `|` character creates an alternate match.
|
||||||
|
|
||||||
@ -536,6 +539,28 @@ It also returns the `str` token now that the token is complete.
|
|||||||
Note that the token name `str` above could have been `string` instead - the
|
Note that the token name `str` above could have been `string` instead - the
|
||||||
namespace for token names is distinct from the namespace for lexer modes.
|
namespace for token names is distinct from the namespace for lexer modes.
|
||||||
|
|
||||||
|
Multiple modes can be specified for a token or pattern or drop statement.
|
||||||
|
For example, if the grammar wanted to only recognize an identifier following
|
||||||
|
a `.` token and not other keywords, it could switch to an `identonly` mode
|
||||||
|
when matching a `.`
|
||||||
|
The `ident` token pattern will be matched in either the `default` or
|
||||||
|
`identonly` mode.
|
||||||
|
|
||||||
|
```
|
||||||
|
ptype char;
|
||||||
|
token abc;
|
||||||
|
token def;
|
||||||
|
default, identonly: token ident /[a-z]+/ <<
|
||||||
|
$$ = match[0];
|
||||||
|
$mode(default);
|
||||||
|
return $token(ident);
|
||||||
|
>>
|
||||||
|
token dot /\./ <<
|
||||||
|
$mode(identonly);
|
||||||
|
>>
|
||||||
|
default, identonly: drop /\s+/;
|
||||||
|
```
|
||||||
|
|
||||||
##> Specifying parser value types - the `ptype` statement
|
##> Specifying parser value types - the `ptype` statement
|
||||||
|
|
||||||
The `ptype` statement is used to define parser value type(s).
|
The `ptype` statement is used to define parser value type(s).
|
||||||
@ -641,13 +666,14 @@ This example uses the default start rule name of `Start`.
|
|||||||
|
|
||||||
A parser rule has zero or more fields on the right side of its definition.
|
A parser rule has zero or more fields on the right side of its definition.
|
||||||
Each of these fields is either a token name or a rule name.
|
Each of these fields is either a token name or a rule name.
|
||||||
|
A field can be immediately followed by a `?` character to signify that it is
|
||||||
|
optional.
|
||||||
A field can optionally be followed by a `:` and then a field alias name.
|
A field can optionally be followed by a `:` and then a field alias name.
|
||||||
If present, the field alias name is used to refer to the field value in user
|
If present, the field alias name is used to refer to the field value in user
|
||||||
code blocks, or if AST mode is active, the field alias name is used as the
|
code blocks, or if AST mode is active, the field alias name is used as the
|
||||||
field name in the generated AST node structure.
|
field name in the generated AST node structure.
|
||||||
A field can be immediately followed by a `?` character to signify that it is
|
An optional and named field must use the format `field?:name`.
|
||||||
optional.
|
Example:
|
||||||
Another example:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
token public;
|
token public;
|
||||||
@ -655,7 +681,7 @@ token private;
|
|||||||
token int;
|
token int;
|
||||||
token ident /[a-zA-Z_][a-zA-Z_0-9]*/;
|
token ident /[a-zA-Z_][a-zA-Z_0-9]*/;
|
||||||
token semicolon /;/;
|
token semicolon /;/;
|
||||||
IntegerDeclaration -> Visibility? int ident:name semicolon;
|
IntegerDeclaration -> Visibility?:visibility int ident:name semicolon;
|
||||||
Visibility -> public;
|
Visibility -> public;
|
||||||
Visibility -> private;
|
Visibility -> private;
|
||||||
```
|
```
|
||||||
@ -663,7 +689,7 @@ Visibility -> private;
|
|||||||
In a parser rule code block, parser values for the right side fields are
|
In a parser rule code block, parser values for the right side fields are
|
||||||
accessible as `$1` for the first field's parser value, `$2` for the second
|
accessible as `$1` for the first field's parser value, `$2` for the second
|
||||||
field's parser value, etc...
|
field's parser value, etc...
|
||||||
For the `IntegerDeclaration` rule, the third field value can also be referred
|
For the `IntegerDeclaration` rule, the first field value can also be referred to as `${visibility}` and the third field value can also be referred
|
||||||
to as `${name}`.
|
to as `${name}`.
|
||||||
The `$$` symbol accesses the output parser value for this rule.
|
The `$$` symbol accesses the output parser value for this rule.
|
||||||
The above examples demonstrate how the parser values for the rule components
|
The above examples demonstrate how the parser values for the rule components
|
||||||
@ -725,7 +751,7 @@ Some example uses of this functionality could be to:
|
|||||||
|
|
||||||
* Detect integer overflow when lexing an integer literal constant.
|
* Detect integer overflow when lexing an integer literal constant.
|
||||||
* Detect and report an error as soon as possible during parsing before continuing to parse any more of the input.
|
* Detect and report an error as soon as possible during parsing before continuing to parse any more of the input.
|
||||||
* Determine whether parsing should stop and instead be performed using a different parser version.
|
* Determine whether parsing should stop and instead be retried using a different parser version.
|
||||||
|
|
||||||
To terminate parsing from a lexer or parser user code block, use the
|
To terminate parsing from a lexer or parser user code block, use the
|
||||||
`$terminate(code)` function, passing an integer expression argument.
|
`$terminate(code)` function, passing an integer expression argument.
|
||||||
@ -761,10 +787,16 @@ Propane generates the following result code constants:
|
|||||||
* `P_EOF`: The lexer reached the end of the input string.
|
* `P_EOF`: The lexer reached the end of the input string.
|
||||||
* `P_USER_TERMINATED`: A parser user code block has requested to terminate the parser.
|
* `P_USER_TERMINATED`: A parser user code block has requested to terminate the parser.
|
||||||
|
|
||||||
Result codes are returned by the functions `p_decode_input()`, `p_lex()`, and `p_parse()`.
|
Result codes are returned by the API functions `p_decode_code_point()`, `p_lex()`, and `p_parse()`.
|
||||||
|
|
||||||
##> Types
|
##> Types
|
||||||
|
|
||||||
|
### `p_code_point_t`
|
||||||
|
|
||||||
|
The `p_code_point_t` type is aliased to a 32-bit unsigned integer.
|
||||||
|
It is used to store decoded code points from the input text and perform
|
||||||
|
lexing based on the grammar's lexer patterns.
|
||||||
|
|
||||||
### `p_context_t`
|
### `p_context_t`
|
||||||
|
|
||||||
Propane defines a `p_context_t` structure type.
|
Propane defines a `p_context_t` structure type.
|
||||||
@ -775,8 +807,8 @@ A pointer to this instance is passed to the generated functions.
|
|||||||
|
|
||||||
### `p_position_t`
|
### `p_position_t`
|
||||||
|
|
||||||
The `p_position_t` structure contains two fields `row` and `col`.
|
The `p_position_t` structure contains two fields: `row` and `col`.
|
||||||
These fields contain the 0-based row and column describing a parser position.
|
These fields contain the 1-based row and column describing a parser position.
|
||||||
|
|
||||||
For D targets, the `p_position_t` structure can be checked for validity by
|
For D targets, the `p_position_t` structure can be checked for validity by
|
||||||
querying the `valid` property.
|
querying the `valid` property.
|
||||||
@ -785,6 +817,16 @@ For C targets, the `p_position_t` structure can be checked for validity by
|
|||||||
calling `p_position_valid(pos)` where `pos` is a `p_position_t` structure
|
calling `p_position_valid(pos)` where `pos` is a `p_position_t` structure
|
||||||
instance.
|
instance.
|
||||||
|
|
||||||
|
### `p_token_info_t`
|
||||||
|
|
||||||
|
The `p_token_info_t` structure contains the following fields:
|
||||||
|
|
||||||
|
* `position` (`p_position_t`) holds the text position of the first code point in the token.
|
||||||
|
* `end_position` (`p_position_t`) holds the text position of the last code point in the token.
|
||||||
|
* `length` (`size_t`) holds the number of input bytes used by the token.
|
||||||
|
* `token` (`p_token_t`) holds the token ID of the lexed token
|
||||||
|
* `pvalue` (`p_value_t`) holds the parser value associated with the token.
|
||||||
|
|
||||||
### AST Node Types
|
### AST Node Types
|
||||||
|
|
||||||
If AST generation mode is enabled, a structure type for each rule will be
|
If AST generation mode is enabled, a structure type for each rule will be
|
||||||
@ -895,6 +937,44 @@ p_context_t context;
|
|||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `p_lex`
|
||||||
|
|
||||||
|
The `p_lex()` function is the main entry point to the lexer.
|
||||||
|
It is normally called automatically by the generated parser to retrieve the
|
||||||
|
next input token for the parser and does not need to be called by the user.
|
||||||
|
However, the user may initialize a context and call `p_lex()` to use the
|
||||||
|
generated lexer in a standalone mode.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, input, input_length);
|
||||||
|
p_token_info_t token_info;
|
||||||
|
size_t result = p_lex(&context, &token_info);
|
||||||
|
switch (result)
|
||||||
|
{
|
||||||
|
case P_DECODE_ERROR:
|
||||||
|
/* UTF-8 decode error */
|
||||||
|
break;
|
||||||
|
case P_UNEXPECTED_INPUT:
|
||||||
|
/* Input text does not match any lexer pattern. */
|
||||||
|
break;
|
||||||
|
case P_USER_TERMINATED:
|
||||||
|
/* Lexer user code block requested to terminate the lexer. */
|
||||||
|
break;
|
||||||
|
case P_SUCCESS:
|
||||||
|
/*
|
||||||
|
* token_info.position holds the text position of the first code point in the token.
|
||||||
|
* token_info.end_position holds the text position of the last code point in the token.
|
||||||
|
* token_info.length holds the number of input bytes used by the token.
|
||||||
|
* token_info.token holds the token ID of the lexed token
|
||||||
|
* token_info.pvalue holds the parser value associated with the token.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `p_parse`
|
### `p_parse`
|
||||||
|
|
||||||
The `p_parse()` function is the main entry point to the parser.
|
The `p_parse()` function is the main entry point to the parser.
|
||||||
@ -998,6 +1078,26 @@ if (p_parse(&context) == P_UNEXPECTED_TOKEN)
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `p_decode_code_point`
|
||||||
|
|
||||||
|
The `p_decode_code_point()` function can be used to decode code points from a
|
||||||
|
UTF-8 string.
|
||||||
|
It does not require a lexer/parser context structure and can be used as a
|
||||||
|
standalone UTF-8 decoder or from within a lexer or parser user code block.
|
||||||
|
|
||||||
|
D Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
size_t result;
|
||||||
|
p_code_point_t code_point;
|
||||||
|
ubyte code_point_length;
|
||||||
|
|
||||||
|
result = p_decode_code_point("\xf0\x9f\xa7\xa1", &code_point, &code_point_length);
|
||||||
|
assert(result == P_SUCCESS);
|
||||||
|
assert(code_point == 0x1F9E1u);
|
||||||
|
assert(code_point_length == 4u);
|
||||||
|
```
|
||||||
|
|
||||||
##> Data
|
##> Data
|
||||||
|
|
||||||
### `p_token_names`
|
### `p_token_names`
|
||||||
|
|||||||
@ -22,7 +22,7 @@ syn match propaneFieldOperator ":" contained
|
|||||||
syn match propaneOperator "?"
|
syn match propaneOperator "?"
|
||||||
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
||||||
|
|
||||||
syn region propaneRegex start="/" end="/" skip="\\/"
|
syn region propaneRegex start="/" end="/" skip="\v\\\\|\\/"
|
||||||
|
|
||||||
hi def link propaneComment Comment
|
hi def link propaneComment Comment
|
||||||
hi def link propaneKeyword Keyword
|
hi def link propaneKeyword Keyword
|
||||||
|
|||||||
@ -43,8 +43,8 @@ class Propane
|
|||||||
# Assign default pattern mode to patterns without a mode assigned.
|
# Assign default pattern mode to patterns without a mode assigned.
|
||||||
found_default = false
|
found_default = false
|
||||||
@grammar.patterns.each do |pattern|
|
@grammar.patterns.each do |pattern|
|
||||||
if pattern.mode.nil?
|
if pattern.modes.empty?
|
||||||
pattern.mode = "default"
|
pattern.modes << "default"
|
||||||
found_default = true
|
found_default = true
|
||||||
end
|
end
|
||||||
pattern.ptypename ||= "default"
|
pattern.ptypename ||= "default"
|
||||||
|
|||||||
@ -25,7 +25,7 @@ class Propane
|
|||||||
@code_blocks = {}
|
@code_blocks = {}
|
||||||
@line_number = 1
|
@line_number = 1
|
||||||
@next_line_number = @line_number
|
@next_line_number = @line_number
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
@input = input.gsub("\r\n", "\n")
|
@input = input.gsub("\r\n", "\n")
|
||||||
@ptypes = {"default" => "void *"}
|
@ptypes = {"default" => "void *"}
|
||||||
@prefix = "p_"
|
@prefix = "p_"
|
||||||
@ -58,7 +58,7 @@ class Propane
|
|||||||
def parse_statement!
|
def parse_statement!
|
||||||
if parse_white_space!
|
if parse_white_space!
|
||||||
elsif parse_comment_line!
|
elsif parse_comment_line!
|
||||||
elsif @mode.nil? && parse_mode_label!
|
elsif @modeline.nil? && parse_mode_label!
|
||||||
elsif parse_ast_statement!
|
elsif parse_ast_statement!
|
||||||
elsif parse_ast_prefix_statement!
|
elsif parse_ast_prefix_statement!
|
||||||
elsif parse_ast_suffix_statement!
|
elsif parse_ast_suffix_statement!
|
||||||
@ -81,8 +81,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
def parse_mode_label!
|
def parse_mode_label!
|
||||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*:/)
|
if md = consume!(/(#{IDENTIFIER_REGEX}(?:\s*,\s*#{IDENTIFIER_REGEX})*)\s*:/)
|
||||||
@mode = md[1]
|
@modeline = md[1]
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ class Propane
|
|||||||
md = consume!(/([\w.]+)\s*/, "expected module name")
|
md = consume!(/([\w.]+)\s*/, "expected module name")
|
||||||
@modulename = md[1]
|
@modulename = md[1]
|
||||||
consume!(/;/, "expected `;'")
|
consume!(/;/, "expected `;'")
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -153,9 +153,9 @@ class Propane
|
|||||||
end
|
end
|
||||||
token = Token.new(name, ptypename, @line_number)
|
token = Token.new(name, ptypename, @line_number)
|
||||||
@tokens << token
|
@tokens << token
|
||||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||||
@patterns << pattern
|
@patterns << pattern
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -173,7 +173,7 @@ class Propane
|
|||||||
consume!(/;/, "expected `;'");
|
consume!(/;/, "expected `;'");
|
||||||
token = Token.new(name, ptypename, @line_number)
|
token = Token.new(name, ptypename, @line_number)
|
||||||
@tokens << token
|
@tokens << token
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -186,8 +186,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
consume!(/\s+/)
|
consume!(/\s+/)
|
||||||
consume!(/;/, "expected `;'")
|
consume!(/;/, "expected `;'")
|
||||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, mode: @mode)
|
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, modes: get_modes_from_modeline)
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -198,7 +198,7 @@ class Propane
|
|||||||
if @ast && ptypename
|
if @ast && ptypename
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||||
end
|
end
|
||||||
md = consume!(/((?:#{IDENTIFIER_REGEX}(?::#{IDENTIFIER_REGEX})?\??\s*)*)\s*/, "expected rule component list")
|
md = consume!(/((?:#{IDENTIFIER_REGEX}\??(?::#{IDENTIFIER_REGEX})?\s*)*)\s*/, "expected rule component list")
|
||||||
components = md[1].strip.split(/\s+/)
|
components = md[1].strip.split(/\s+/)
|
||||||
if @ast
|
if @ast
|
||||||
consume!(/;/, "expected `;'")
|
consume!(/;/, "expected `;'")
|
||||||
@ -208,7 +208,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -225,8 +225,8 @@ class Propane
|
|||||||
unless code = parse_code_block!
|
unless code = parse_code_block!
|
||||||
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
||||||
end
|
end
|
||||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -247,7 +247,7 @@ class Propane
|
|||||||
else
|
else
|
||||||
@code_blocks[name] = code
|
@code_blocks[name] = code
|
||||||
end
|
end
|
||||||
@mode = nil
|
@modeline = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -315,6 +315,14 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def get_modes_from_modeline
|
||||||
|
if @modeline
|
||||||
|
Set[*@modeline.split(",").map(&:strip)]
|
||||||
|
else
|
||||||
|
Set.new
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -26,8 +26,14 @@ class Propane
|
|||||||
private
|
private
|
||||||
|
|
||||||
def build_tables!
|
def build_tables!
|
||||||
@modes = @grammar.patterns.group_by do |pattern|
|
modenames = @grammar.patterns.reduce(Set.new) do |result, pattern|
|
||||||
pattern.mode
|
result + pattern.modes
|
||||||
|
end
|
||||||
|
@modes = modenames.reduce({}) do |result, modename|
|
||||||
|
result[modename] = @grammar.patterns.select do |pattern|
|
||||||
|
pattern.modes.include?(modename)
|
||||||
|
end
|
||||||
|
result
|
||||||
end.transform_values do |patterns|
|
end.transform_values do |patterns|
|
||||||
{dfa: DFA.new(patterns)}
|
{dfa: DFA.new(patterns)}
|
||||||
end
|
end
|
||||||
|
|||||||
@ -14,6 +14,7 @@ class Propane
|
|||||||
@item_sets = []
|
@item_sets = []
|
||||||
@item_sets_set = {}
|
@item_sets_set = {}
|
||||||
@warnings = Set.new
|
@warnings = Set.new
|
||||||
|
@errors = Set.new
|
||||||
@options = options
|
@options = options
|
||||||
start_item = Item.new(grammar.rules.first, 0)
|
start_item = Item.new(grammar.rules.first, 0)
|
||||||
eval_item_sets = Set[ItemSet.new([start_item])]
|
eval_item_sets = Set[ItemSet.new([start_item])]
|
||||||
@ -41,8 +42,18 @@ class Propane
|
|||||||
build_reduce_actions!
|
build_reduce_actions!
|
||||||
build_tables!
|
build_tables!
|
||||||
write_log!
|
write_log!
|
||||||
|
errormessage = ""
|
||||||
|
if @errors.size > 0
|
||||||
|
errormessage += @errors.join("\n")
|
||||||
|
end
|
||||||
if @warnings.size > 0 && @options[:warnings_as_errors]
|
if @warnings.size > 0 && @options[:warnings_as_errors]
|
||||||
raise Error.new("Fatal errors (-w):\n" + @warnings.join("\n"))
|
if errormessage != ""
|
||||||
|
errormessage += "\n"
|
||||||
|
end
|
||||||
|
errormessage += "Fatal errors (-w):\n" + @warnings.join("\n")
|
||||||
|
end
|
||||||
|
if errormessage != ""
|
||||||
|
raise Error.new(errormessage)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -53,24 +64,13 @@ class Propane
|
|||||||
@shift_table = []
|
@shift_table = []
|
||||||
@reduce_table = []
|
@reduce_table = []
|
||||||
@item_sets.each do |item_set|
|
@item_sets.each do |item_set|
|
||||||
shift_entries = item_set.next_symbols.map do |next_symbol|
|
|
||||||
state_id =
|
|
||||||
if next_symbol.name == "$EOF"
|
|
||||||
0
|
|
||||||
else
|
|
||||||
item_set.next_item_set[next_symbol].id
|
|
||||||
end
|
|
||||||
{
|
|
||||||
symbol: next_symbol,
|
|
||||||
state_id: state_id,
|
|
||||||
}
|
|
||||||
end
|
|
||||||
unless item_set.reduce_rules.empty?
|
unless item_set.reduce_rules.empty?
|
||||||
shift_entries.each do |shift_entry|
|
item_set.shift_entries.each do |shift_entry|
|
||||||
token = shift_entry[:symbol]
|
token = shift_entry[:symbol]
|
||||||
if get_lookahead_reduce_actions_for_item_set(item_set).include?(token)
|
if item_set.reduce_actions
|
||||||
rule = item_set.reduce_actions[token]
|
if rule = item_set.reduce_actions[token]
|
||||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -81,7 +81,7 @@ class Propane
|
|||||||
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
||||||
elsif reduce_actions = item_set.reduce_actions
|
elsif reduce_actions = item_set.reduce_actions
|
||||||
reduce_actions.map do |token, rule|
|
reduce_actions.map do |token, rule|
|
||||||
{token_id: token.id, rule_id: rule.id, rule: rule,
|
{token: token, token_id: token.id, rule_id: rule.id, rule: rule,
|
||||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||||
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
||||||
end
|
end
|
||||||
@ -90,11 +90,11 @@ class Propane
|
|||||||
end
|
end
|
||||||
@state_table << {
|
@state_table << {
|
||||||
shift_index: @shift_table.size,
|
shift_index: @shift_table.size,
|
||||||
n_shifts: shift_entries.size,
|
n_shifts: item_set.shift_entries.size,
|
||||||
reduce_index: @reduce_table.size,
|
reduce_index: @reduce_table.size,
|
||||||
n_reduces: reduce_entries.size,
|
n_reduces: reduce_entries.size,
|
||||||
}
|
}
|
||||||
@shift_table += shift_entries
|
@shift_table += item_set.shift_entries
|
||||||
@reduce_table += reduce_entries
|
@reduce_table += reduce_entries
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -114,8 +114,110 @@ class Propane
|
|||||||
# @return [void]
|
# @return [void]
|
||||||
def build_reduce_actions!
|
def build_reduce_actions!
|
||||||
@item_sets.each do |item_set|
|
@item_sets.each do |item_set|
|
||||||
|
build_shift_entries(item_set)
|
||||||
build_reduce_actions_for_item_set(item_set)
|
build_reduce_actions_for_item_set(item_set)
|
||||||
end
|
end
|
||||||
|
item_sets_to_process = @item_sets.select do |item_set|
|
||||||
|
# We need lookahead reduce actions if:
|
||||||
|
# 1) There is more than one possible rule to reduce. In this case the
|
||||||
|
# lookahead token can help choose which rule to reduce.
|
||||||
|
# 2) There is at least one shift action and one reduce action for
|
||||||
|
# this item set. In this case the lookahead reduce actions are
|
||||||
|
# needed to test for a Shift/Reduce conflict.
|
||||||
|
item_set.reduce_rules.size > 1 ||
|
||||||
|
(item_set.reduce_rules.size > 0 && item_set.shift_entries.size > 0)
|
||||||
|
end
|
||||||
|
if RbConfig::CONFIG["host_os"] =~ /linux/
|
||||||
|
item_sets_by_id = {}
|
||||||
|
item_sets_to_process.each do |item_set|
|
||||||
|
item_sets_by_id[item_set.object_id] = item_set
|
||||||
|
end
|
||||||
|
tokens_by_id = {}
|
||||||
|
@grammar.tokens.each do |token|
|
||||||
|
tokens_by_id[token.object_id] = token
|
||||||
|
end
|
||||||
|
rules_by_id = {}
|
||||||
|
@grammar.rules.each do |rule|
|
||||||
|
rules_by_id[rule.object_id] = rule
|
||||||
|
end
|
||||||
|
n_threads = Util.determine_n_threads
|
||||||
|
semaphore = Mutex.new
|
||||||
|
queue = Queue.new
|
||||||
|
threads = {}
|
||||||
|
n_threads.times do
|
||||||
|
piper, pipew = IO.pipe
|
||||||
|
thread = Thread.new do
|
||||||
|
loop do
|
||||||
|
item_set = nil
|
||||||
|
semaphore.synchronize do
|
||||||
|
item_set = item_sets_to_process.slice!(0)
|
||||||
|
end
|
||||||
|
break if item_set.nil?
|
||||||
|
fork do
|
||||||
|
piper.close
|
||||||
|
build_lookahead_reduce_actions_for_item_set(item_set, pipew)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
queue.push(Thread.current)
|
||||||
|
end
|
||||||
|
threads[thread] = [piper, pipew]
|
||||||
|
end
|
||||||
|
until threads.empty?
|
||||||
|
thread = queue.pop
|
||||||
|
piper, pipew = threads[thread]
|
||||||
|
pipew.close
|
||||||
|
thread_txt = piper.read
|
||||||
|
thread_txt.each_line do |line|
|
||||||
|
if line.start_with?("RA,")
|
||||||
|
parts = line.split(",")
|
||||||
|
item_set_id, token_id, rule_id = parts[1..3].map(&:to_i)
|
||||||
|
item_set = item_sets_by_id[item_set_id]
|
||||||
|
unless item_set
|
||||||
|
raise "Internal error: could not find item set from thread"
|
||||||
|
end
|
||||||
|
token = tokens_by_id[token_id]
|
||||||
|
unless item_set
|
||||||
|
raise "Internal error: could not find token from thread"
|
||||||
|
end
|
||||||
|
rule = rules_by_id[rule_id]
|
||||||
|
unless item_set
|
||||||
|
raise "Internal error: could not find rule from thread"
|
||||||
|
end
|
||||||
|
item_set.reduce_actions ||= {}
|
||||||
|
item_set.reduce_actions[token] = rule
|
||||||
|
elsif line.start_with?("Error: ")
|
||||||
|
@errors << line.chomp
|
||||||
|
else
|
||||||
|
raise "Internal error: unhandled thread line #{line}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
thread.join
|
||||||
|
threads.delete(thread)
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# Fall back to single threaded algorithm.
|
||||||
|
item_sets_to_process.each do |item_set|
|
||||||
|
item_set.reduce_actions = build_lookahead_reduce_actions_for_item_set(item_set)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Build the shift entries for a single item set.
|
||||||
|
#
|
||||||
|
# @return [void]
|
||||||
|
def build_shift_entries(item_set)
|
||||||
|
item_set.shift_entries = item_set.next_symbols.map do |next_symbol|
|
||||||
|
state_id =
|
||||||
|
if next_symbol.name == "$EOF"
|
||||||
|
0
|
||||||
|
else
|
||||||
|
item_set.next_item_set[next_symbol].id
|
||||||
|
end
|
||||||
|
{
|
||||||
|
symbol: next_symbol,
|
||||||
|
state_id: state_id,
|
||||||
|
}
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Build the reduce actions for a single item set (parser state).
|
# Build the reduce actions for a single item set (parser state).
|
||||||
@ -134,33 +236,18 @@ class Propane
|
|||||||
if item_set.reduce_rules.size == 1
|
if item_set.reduce_rules.size == 1
|
||||||
item_set.reduce_rule = item_set.reduce_rules.first
|
item_set.reduce_rule = item_set.reduce_rules.first
|
||||||
end
|
end
|
||||||
|
|
||||||
if item_set.reduce_rules.size > 1
|
|
||||||
# Force item_set.reduce_actions to be built to store the lookahead
|
|
||||||
# tokens for the possible reduce rules if there is more than one.
|
|
||||||
get_lookahead_reduce_actions_for_item_set(item_set)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Get the reduce actions for a single item set (parser state).
|
|
||||||
#
|
|
||||||
# @param item_set [ItemSet]
|
|
||||||
# ItemSet (parser state)
|
|
||||||
#
|
|
||||||
# @return [Hash]
|
|
||||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
|
||||||
def get_lookahead_reduce_actions_for_item_set(item_set)
|
|
||||||
item_set.reduce_actions ||= build_lookahead_reduce_actions_for_item_set(item_set)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Build the reduce actions for a single item set (parser state).
|
# Build the reduce actions for a single item set (parser state).
|
||||||
#
|
#
|
||||||
# @param item_set [ItemSet]
|
# @param item_set [ItemSet]
|
||||||
# ItemSet (parser state)
|
# ItemSet (parser state)
|
||||||
|
# @param fh [File]
|
||||||
|
# Output file handle for multiprocessing mode.
|
||||||
#
|
#
|
||||||
# @return [Hash]
|
# @return [Hash]
|
||||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
# Mapping of lookahead Tokens to the Rules to reduce.
|
||||||
def build_lookahead_reduce_actions_for_item_set(item_set)
|
def build_lookahead_reduce_actions_for_item_set(item_set, fh = nil)
|
||||||
# We will be looking for all possible tokens that can follow instances of
|
# We will be looking for all possible tokens that can follow instances of
|
||||||
# these rules. Rather than looking through the entire grammar for the
|
# these rules. Rather than looking through the entire grammar for the
|
||||||
# possible following tokens, we will only look in the item sets leading
|
# possible following tokens, we will only look in the item sets leading
|
||||||
@ -171,9 +258,12 @@ class Propane
|
|||||||
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
||||||
lookahead_tokens_for_rule.each do |lookahead_token|
|
lookahead_tokens_for_rule.each do |lookahead_token|
|
||||||
if existing_reduce_rule = reduce_actions[lookahead_token]
|
if existing_reduce_rule = reduce_actions[lookahead_token]
|
||||||
raise Error.new("Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number})")
|
error = "Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number}) for lookahead token #{lookahead_token}"
|
||||||
|
@errors << error
|
||||||
|
fh.puts(error) if fh
|
||||||
end
|
end
|
||||||
reduce_actions[lookahead_token] = reduce_rule
|
reduce_actions[lookahead_token] = reduce_rule
|
||||||
|
fh.puts "RA,#{item_set.object_id},#{lookahead_token.object_id},#{reduce_rule.object_id}" if fh
|
||||||
end
|
end
|
||||||
reduce_actions
|
reduce_actions
|
||||||
end
|
end
|
||||||
@ -220,6 +310,7 @@ class Propane
|
|||||||
rule_set = item.rule.rule_set
|
rule_set = item.rule.rule_set
|
||||||
unless checked_rule_sets.include?(rule_set)
|
unless checked_rule_sets.include?(rule_set)
|
||||||
rule_sets_to_check_after << rule_set
|
rule_sets_to_check_after << rule_set
|
||||||
|
checked_rule_sets << rule_set
|
||||||
end
|
end
|
||||||
break
|
break
|
||||||
when Token
|
when Token
|
||||||
|
|||||||
@ -22,6 +22,7 @@ class Propane
|
|||||||
def initialize(rule, position)
|
def initialize(rule, position)
|
||||||
@rule = rule
|
@rule = rule
|
||||||
@position = position
|
@position = position
|
||||||
|
@_hash = [@rule, @position].hash
|
||||||
end
|
end
|
||||||
|
|
||||||
# Hash function.
|
# Hash function.
|
||||||
@ -29,7 +30,7 @@ class Propane
|
|||||||
# @return [Integer]
|
# @return [Integer]
|
||||||
# Hash code.
|
# Hash code.
|
||||||
def hash
|
def hash
|
||||||
[@rule, @position].hash
|
@_hash
|
||||||
end
|
end
|
||||||
|
|
||||||
# Compare Item objects.
|
# Compare Item objects.
|
||||||
|
|||||||
@ -34,6 +34,10 @@ class Propane
|
|||||||
# more than one rule that could be reduced.
|
# more than one rule that could be reduced.
|
||||||
attr_accessor :reduce_actions
|
attr_accessor :reduce_actions
|
||||||
|
|
||||||
|
# @return [Array<Hash>]
|
||||||
|
# Shift table entries.
|
||||||
|
attr_accessor :shift_entries
|
||||||
|
|
||||||
# Build an ItemSet.
|
# Build an ItemSet.
|
||||||
#
|
#
|
||||||
# @param items [Array<Item>]
|
# @param items [Array<Item>]
|
||||||
|
|||||||
@ -26,9 +26,9 @@ class Propane
|
|||||||
# Regex NFA for matching the pattern.
|
# Regex NFA for matching the pattern.
|
||||||
attr_reader :nfa
|
attr_reader :nfa
|
||||||
|
|
||||||
# @return [String, nil]
|
# @return [Set]
|
||||||
# Lexer mode for this pattern.
|
# Lexer modes for this pattern.
|
||||||
attr_accessor :mode
|
attr_accessor :modes
|
||||||
|
|
||||||
# @return [String, nil]
|
# @return [String, nil]
|
||||||
# Parser value type name.
|
# Parser value type name.
|
||||||
@ -46,16 +46,16 @@ class Propane
|
|||||||
# Token to be returned by this pattern.
|
# Token to be returned by this pattern.
|
||||||
# @option options [Integer, nil] :line_number
|
# @option options [Integer, nil] :line_number
|
||||||
# Line number where the token was defined in the input grammar.
|
# Line number where the token was defined in the input grammar.
|
||||||
# @option options [String, nil] :mode
|
# @option options [String, nil] :modes
|
||||||
# Lexer mode for this pattern.
|
# Lexer modes for this pattern.
|
||||||
def initialize(options)
|
def initialize(options)
|
||||||
@code = options[:code]
|
@code = options[:code]
|
||||||
@pattern = options[:pattern]
|
@pattern = options[:pattern]
|
||||||
@token = options[:token]
|
@token = options[:token]
|
||||||
@line_number = options[:line_number]
|
@line_number = options[:line_number]
|
||||||
@mode = options[:mode]
|
@modes = options[:modes]
|
||||||
@ptypename = options[:ptypename]
|
@ptypename = options[:ptypename]
|
||||||
regex = Regex.new(@pattern)
|
regex = Regex.new(@pattern, @line_number)
|
||||||
regex.nfa.end_state.accepts = self
|
regex.nfa.end_state.accepts = self
|
||||||
@nfa = regex.nfa
|
@nfa = regex.nfa
|
||||||
end
|
end
|
||||||
|
|||||||
@ -4,12 +4,13 @@ class Propane
|
|||||||
attr_reader :unit
|
attr_reader :unit
|
||||||
attr_reader :nfa
|
attr_reader :nfa
|
||||||
|
|
||||||
def initialize(pattern)
|
def initialize(pattern, line_number)
|
||||||
@pattern = pattern.dup
|
@pattern = pattern.dup
|
||||||
|
@line_number = line_number
|
||||||
@unit = parse_alternates
|
@unit = parse_alternates
|
||||||
@nfa = @unit.to_nfa
|
@nfa = @unit.to_nfa
|
||||||
if @pattern != ""
|
if @pattern != ""
|
||||||
raise Error.new(%[Unexpected "#{@pattern}" in pattern])
|
raise Error.new(%[Line #{@line_number}: unexpected "#{@pattern}" in pattern])
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -41,7 +42,7 @@ class Propane
|
|||||||
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
||||||
au.replace_last!(mu)
|
au.replace_last!(mu)
|
||||||
else
|
else
|
||||||
raise Error.new("#{c} follows nothing")
|
raise Error.new("Line #{@line_number}: #{c} follows nothing")
|
||||||
end
|
end
|
||||||
when "|"
|
when "|"
|
||||||
au.new_alternate!
|
au.new_alternate!
|
||||||
@ -59,7 +60,7 @@ class Propane
|
|||||||
def parse_group
|
def parse_group
|
||||||
au = parse_alternates
|
au = parse_alternates
|
||||||
if @pattern[0] != ")"
|
if @pattern[0] != ")"
|
||||||
raise Error.new("Unterminated group in pattern")
|
raise Error.new("Line #{@line_number}: unterminated group in pattern")
|
||||||
end
|
end
|
||||||
@pattern.slice!(0)
|
@pattern.slice!(0)
|
||||||
au
|
au
|
||||||
@ -70,7 +71,7 @@ class Propane
|
|||||||
index = 0
|
index = 0
|
||||||
loop do
|
loop do
|
||||||
if @pattern == ""
|
if @pattern == ""
|
||||||
raise Error.new("Unterminated character class")
|
raise Error.new("Line #{@line_number}: unterminated character class")
|
||||||
end
|
end
|
||||||
c = @pattern.slice!(0)
|
c = @pattern.slice!(0)
|
||||||
if c == "]"
|
if c == "]"
|
||||||
@ -84,13 +85,13 @@ class Propane
|
|||||||
elsif c == "-" && @pattern[0] != "]"
|
elsif c == "-" && @pattern[0] != "]"
|
||||||
begin_cu = ccu.last_unit
|
begin_cu = ccu.last_unit
|
||||||
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
||||||
raise Error.new("Character range must be between single characters")
|
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||||
end
|
end
|
||||||
if @pattern[0] == "\\"
|
if @pattern[0] == "\\"
|
||||||
@pattern.slice!(0)
|
@pattern.slice!(0)
|
||||||
end_cu = parse_backslash
|
end_cu = parse_backslash
|
||||||
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
||||||
raise Error.new("Character range must be between single characters")
|
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||||
end
|
end
|
||||||
max_code_point = end_cu.code_point
|
max_code_point = end_cu.code_point
|
||||||
else
|
else
|
||||||
@ -116,7 +117,7 @@ class Propane
|
|||||||
elsif max_count.to_s != ""
|
elsif max_count.to_s != ""
|
||||||
max_count = max_count.to_i
|
max_count = max_count.to_i
|
||||||
if max_count < min_count
|
if max_count < min_count
|
||||||
raise Error.new("Maximum repetition count cannot be less than minimum repetition count")
|
raise Error.new("Line #{@line_number}: maximum repetition count cannot be less than minimum repetition count")
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
max_count = nil
|
max_count = nil
|
||||||
@ -124,28 +125,33 @@ class Propane
|
|||||||
@pattern = pattern
|
@pattern = pattern
|
||||||
[min_count, max_count]
|
[min_count, max_count]
|
||||||
else
|
else
|
||||||
raise Error.new("Unexpected match count at #{@pattern}")
|
raise Error.new("Line #{@line_number}: unexpected match count following {")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def parse_backslash
|
def parse_backslash
|
||||||
if @pattern == ""
|
if @pattern == ""
|
||||||
raise Error.new("Error: unfollowed \\")
|
raise Error.new("Line #{@line_number}: error: unfollowed \\")
|
||||||
else
|
else
|
||||||
c = @pattern.slice!(0)
|
c = @pattern.slice!(0)
|
||||||
case c
|
case c
|
||||||
when "a"
|
when "a"
|
||||||
CharacterRangeUnit.new("\a", "\a")
|
CharacterRangeUnit.new("\a")
|
||||||
when "b"
|
when "b"
|
||||||
CharacterRangeUnit.new("\b", "\b")
|
CharacterRangeUnit.new("\b")
|
||||||
when "d"
|
when "d"
|
||||||
CharacterRangeUnit.new("0", "9")
|
CharacterRangeUnit.new("0", "9")
|
||||||
|
when "D"
|
||||||
|
ccu = CharacterClassUnit.new
|
||||||
|
ccu << CharacterRangeUnit.new("0", "9")
|
||||||
|
ccu.negate = true
|
||||||
|
ccu
|
||||||
when "f"
|
when "f"
|
||||||
CharacterRangeUnit.new("\f", "\f")
|
CharacterRangeUnit.new("\f")
|
||||||
when "n"
|
when "n"
|
||||||
CharacterRangeUnit.new("\n", "\n")
|
CharacterRangeUnit.new("\n")
|
||||||
when "r"
|
when "r"
|
||||||
CharacterRangeUnit.new("\r", "\r")
|
CharacterRangeUnit.new("\r")
|
||||||
when "s"
|
when "s"
|
||||||
ccu = CharacterClassUnit.new
|
ccu = CharacterClassUnit.new
|
||||||
ccu << CharacterRangeUnit.new(" ")
|
ccu << CharacterRangeUnit.new(" ")
|
||||||
@ -155,10 +161,35 @@ class Propane
|
|||||||
ccu << CharacterRangeUnit.new("\f")
|
ccu << CharacterRangeUnit.new("\f")
|
||||||
ccu << CharacterRangeUnit.new("\v")
|
ccu << CharacterRangeUnit.new("\v")
|
||||||
ccu
|
ccu
|
||||||
|
when "S"
|
||||||
|
ccu = CharacterClassUnit.new
|
||||||
|
ccu << CharacterRangeUnit.new(" ")
|
||||||
|
ccu << CharacterRangeUnit.new("\t")
|
||||||
|
ccu << CharacterRangeUnit.new("\r")
|
||||||
|
ccu << CharacterRangeUnit.new("\n")
|
||||||
|
ccu << CharacterRangeUnit.new("\f")
|
||||||
|
ccu << CharacterRangeUnit.new("\v")
|
||||||
|
ccu.negate = true
|
||||||
|
ccu
|
||||||
when "t"
|
when "t"
|
||||||
CharacterRangeUnit.new("\t", "\t")
|
CharacterRangeUnit.new("\t")
|
||||||
when "v"
|
when "v"
|
||||||
CharacterRangeUnit.new("\v", "\v")
|
CharacterRangeUnit.new("\v")
|
||||||
|
when "w"
|
||||||
|
ccu = CharacterClassUnit.new
|
||||||
|
ccu << CharacterRangeUnit.new("_")
|
||||||
|
ccu << CharacterRangeUnit.new("0", "9")
|
||||||
|
ccu << CharacterRangeUnit.new("a", "z")
|
||||||
|
ccu << CharacterRangeUnit.new("A", "Z")
|
||||||
|
ccu
|
||||||
|
when "W"
|
||||||
|
ccu = CharacterClassUnit.new
|
||||||
|
ccu << CharacterRangeUnit.new("_")
|
||||||
|
ccu << CharacterRangeUnit.new("0", "9")
|
||||||
|
ccu << CharacterRangeUnit.new("a", "z")
|
||||||
|
ccu << CharacterRangeUnit.new("A", "Z")
|
||||||
|
ccu.negate = true
|
||||||
|
ccu
|
||||||
else
|
else
|
||||||
CharacterRangeUnit.new(c)
|
CharacterRangeUnit.new(c)
|
||||||
end
|
end
|
||||||
|
|||||||
@ -92,16 +92,19 @@ class Propane
|
|||||||
@units = []
|
@units = []
|
||||||
@negate = false
|
@negate = false
|
||||||
end
|
end
|
||||||
def initialize
|
def method_missing(*args, &block)
|
||||||
@units = []
|
@units.__send__(*args, &block)
|
||||||
end
|
|
||||||
def method_missing(*args)
|
|
||||||
@units.__send__(*args)
|
|
||||||
end
|
end
|
||||||
def <<(thing)
|
def <<(thing)
|
||||||
if thing.is_a?(CharacterClassUnit)
|
if thing.is_a?(CharacterClassUnit)
|
||||||
thing.each do |ccu_unit|
|
if thing.negate
|
||||||
@units << ccu_unit
|
CodePointRange.invert_ranges(thing.map(&:code_point_range)).each do |cpr|
|
||||||
|
CharacterRangeUnit.new(cpr.first, cpr.last)
|
||||||
|
end
|
||||||
|
else
|
||||||
|
thing.each do |ccu_unit|
|
||||||
|
@units << ccu_unit
|
||||||
|
end
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
@units << thing
|
@units << thing
|
||||||
|
|||||||
@ -119,6 +119,8 @@ class Propane
|
|||||||
def build_ast_fields(grammar)
|
def build_ast_fields(grammar)
|
||||||
field_ast_node_indexes = {}
|
field_ast_node_indexes = {}
|
||||||
field_indexes_across_all_rules = {}
|
field_indexes_across_all_rules = {}
|
||||||
|
# Stores the index into @ast_fields by field alias name.
|
||||||
|
field_aliases = {}
|
||||||
@ast_fields = []
|
@ast_fields = []
|
||||||
@rules.each do |rule|
|
@rules.each do |rule|
|
||||||
rule.components.each_with_index do |component, i|
|
rule.components.each_with_index do |component, i|
|
||||||
@ -136,6 +138,16 @@ class Propane
|
|||||||
field_ast_node_indexes[field_name] = @ast_fields.size
|
field_ast_node_indexes[field_name] = @ast_fields.size
|
||||||
@ast_fields << {field_name => struct_name}
|
@ast_fields << {field_name => struct_name}
|
||||||
end
|
end
|
||||||
|
rule.aliases.each do |alias_name, index|
|
||||||
|
if index == i
|
||||||
|
alias_ast_fields_index = field_ast_node_indexes[field_name]
|
||||||
|
if field_aliases[alias_name] && field_aliases[alias_name] != alias_ast_fields_index
|
||||||
|
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}` in rule #{rule.name} defined on line #{rule.line_number}")
|
||||||
|
end
|
||||||
|
field_aliases[alias_name] = alias_ast_fields_index
|
||||||
|
@ast_fields[alias_ast_fields_index][alias_name] = @ast_fields[alias_ast_fields_index].first[1]
|
||||||
|
end
|
||||||
|
end
|
||||||
field_indexes_across_all_rules[node_name] ||= Set.new
|
field_indexes_across_all_rules[node_name] ||= Set.new
|
||||||
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
||||||
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
||||||
@ -150,18 +162,6 @@ class Propane
|
|||||||
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
# Now merge in the field aliases as given by the user in the
|
|
||||||
# grammar.
|
|
||||||
field_aliases = {}
|
|
||||||
@rules.each do |rule|
|
|
||||||
rule.aliases.each do |alias_name, index|
|
|
||||||
if field_aliases[alias_name] && field_aliases[alias_name] != index
|
|
||||||
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}`")
|
|
||||||
end
|
|
||||||
field_aliases[alias_name] = index
|
|
||||||
@ast_fields[index][alias_name] = @ast_fields[index].first[1]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -10,6 +10,32 @@ class Propane
|
|||||||
"#{s}\n* #{message} *\n#{s}\n"
|
"#{s}\n* #{message} *\n#{s}\n"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Determine the number of threads to use.
|
||||||
|
#
|
||||||
|
# @return [Integer]
|
||||||
|
# The number of threads to use.
|
||||||
|
def determine_n_threads
|
||||||
|
# Try to figure out how many threads are available on the host hardware.
|
||||||
|
begin
|
||||||
|
case RbConfig::CONFIG["host_os"]
|
||||||
|
when /linux/
|
||||||
|
return File.read("/proc/cpuinfo").scan(/^processor\s*:/).size
|
||||||
|
when /mswin|mingw|msys/
|
||||||
|
if `wmic cpu get NumberOfLogicalProcessors -value` =~ /NumberOfLogicalProcessors=(\d+)/
|
||||||
|
return $1.to_i
|
||||||
|
end
|
||||||
|
when /darwin/
|
||||||
|
if `sysctl -n hw.ncpu` =~ /(\d+)/
|
||||||
|
return $1.to_i
|
||||||
|
end
|
||||||
|
end
|
||||||
|
rescue
|
||||||
|
end
|
||||||
|
|
||||||
|
# If we can't figure it out, default to 4.
|
||||||
|
4
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,3 +1,3 @@
|
|||||||
class Propane
|
class Propane
|
||||||
VERSION = "1.5.1"
|
VERSION = "2.3.0"
|
||||||
end
|
end
|
||||||
|
|||||||
151
spec/ast_node_memory_remains.c.propane
Normal file
151
spec/ast_node_memory_remains.c.propane
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
ast;
|
||||||
|
ast_prefix P;
|
||||||
|
|
||||||
|
<<header
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
typedef union
|
||||||
|
{
|
||||||
|
uint64_t i64;
|
||||||
|
const uint8_t * s;
|
||||||
|
double dou;
|
||||||
|
} TokenVal;
|
||||||
|
>>
|
||||||
|
|
||||||
|
ptype TokenVal;
|
||||||
|
|
||||||
|
# Keywords.
|
||||||
|
token byte;
|
||||||
|
token def;
|
||||||
|
token int;
|
||||||
|
token long;
|
||||||
|
token module;
|
||||||
|
token return;
|
||||||
|
token short;
|
||||||
|
token size_t;
|
||||||
|
token ssize_t;
|
||||||
|
token ubyte;
|
||||||
|
token uint;
|
||||||
|
token ulong;
|
||||||
|
token ushort;
|
||||||
|
|
||||||
|
# Symbols.
|
||||||
|
token arrow /->/;
|
||||||
|
token comma /,/;
|
||||||
|
token lbrace /\{/;
|
||||||
|
token lparen /\(/;
|
||||||
|
token rbrace /\}/;
|
||||||
|
token rparen /\)/;
|
||||||
|
token semicolon /;/;
|
||||||
|
|
||||||
|
# Integer literals.
|
||||||
|
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
||||||
|
$$.i64 = 64u;
|
||||||
|
>>
|
||||||
|
|
||||||
|
# Identifier.
|
||||||
|
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
||||||
|
$$.s = match;
|
||||||
|
$mode(default);
|
||||||
|
return $token(ident);
|
||||||
|
>>
|
||||||
|
|
||||||
|
# Comments.
|
||||||
|
drop /#.*/;
|
||||||
|
|
||||||
|
# Whitespace.
|
||||||
|
drop /[ \r\n]*/;
|
||||||
|
|
||||||
|
start Module;
|
||||||
|
|
||||||
|
# Assignment operators - right associative
|
||||||
|
Expression -> Expression_Or:exp0;
|
||||||
|
|
||||||
|
# Logical OR operator - left associative
|
||||||
|
Expression_Or -> Expression_And:exp0;
|
||||||
|
|
||||||
|
# Logical AND operator - left associative
|
||||||
|
Expression_And -> Expression_Comp:exp0;
|
||||||
|
|
||||||
|
# Equality operators - left associative
|
||||||
|
Expression_Comp -> Expression_Relational:exp0;
|
||||||
|
|
||||||
|
# Relational operators - left associative
|
||||||
|
Expression_Relational -> Expression_REMatch:exp0;
|
||||||
|
|
||||||
|
# Regular expression - left associative
|
||||||
|
Expression_REMatch -> Expression_BinOr:exp0;
|
||||||
|
|
||||||
|
# Binary OR operator - left associative
|
||||||
|
Expression_BinOr -> Expression_Xor:exp0;
|
||||||
|
|
||||||
|
# Binary XOR operator - left associative
|
||||||
|
Expression_Xor -> Expression_BinAnd:exp0;
|
||||||
|
|
||||||
|
# Binary AND operator - left associative
|
||||||
|
Expression_BinAnd -> Expression_BitShift:exp0;
|
||||||
|
|
||||||
|
# Bit shift operators - left associative
|
||||||
|
Expression_BitShift -> Expression_Plus:exp0;
|
||||||
|
|
||||||
|
# Add/subtract operators - left associative
|
||||||
|
Expression_Plus -> Expression_Mul:exp0;
|
||||||
|
|
||||||
|
# Multiplication/divide/modulus operators - left associative
|
||||||
|
Expression_Mul -> Expression_Range:exp0;
|
||||||
|
|
||||||
|
# Range construction operators - left associative
|
||||||
|
Expression_Range -> Expression_UnaryPrefix:exp0;
|
||||||
|
|
||||||
|
# Unary prefix operators
|
||||||
|
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
||||||
|
|
||||||
|
# Postfix operators
|
||||||
|
Expression_Dot -> Expression_Ident:exp0;
|
||||||
|
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
||||||
|
|
||||||
|
# Literals, identifiers, and parenthesized expressions
|
||||||
|
Expression_Ident -> Literal;
|
||||||
|
Expression_Ident -> ident;
|
||||||
|
|
||||||
|
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
||||||
|
|
||||||
|
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
||||||
|
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
||||||
|
|
||||||
|
FunctionReturnType -> arrow Type;
|
||||||
|
|
||||||
|
Literal -> LiteralInteger;
|
||||||
|
LiteralInteger -> hex_int_l;
|
||||||
|
|
||||||
|
Module -> ModuleStatement? ModuleItems;
|
||||||
|
|
||||||
|
ModuleItem -> FunctionDefinition;
|
||||||
|
|
||||||
|
ModuleItems -> ;
|
||||||
|
ModuleItems -> ModuleItems ModuleItem;
|
||||||
|
|
||||||
|
ModulePath -> ident;
|
||||||
|
|
||||||
|
ModuleStatement -> module ModulePath semicolon;
|
||||||
|
|
||||||
|
ReturnStatement -> return Expression?:exp0 semicolon;
|
||||||
|
|
||||||
|
Statements -> ;
|
||||||
|
Statements -> Statements Statement;
|
||||||
|
Statement -> Expression semicolon;
|
||||||
|
Statement -> ReturnStatement;
|
||||||
|
|
||||||
|
Type -> TypeBase;
|
||||||
|
|
||||||
|
TypeBase -> byte;
|
||||||
|
TypeBase -> ubyte;
|
||||||
|
TypeBase -> short;
|
||||||
|
TypeBase -> ushort;
|
||||||
|
TypeBase -> int;
|
||||||
|
TypeBase -> uint;
|
||||||
|
TypeBase -> long;
|
||||||
|
TypeBase -> ulong;
|
||||||
|
TypeBase -> size_t;
|
||||||
|
TypeBase -> ssize_t;
|
||||||
177
spec/ast_node_memory_remains.d.propane
Normal file
177
spec/ast_node_memory_remains.d.propane
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
ast;
|
||||||
|
ast_prefix P;
|
||||||
|
|
||||||
|
<<
|
||||||
|
import std.bigint;
|
||||||
|
|
||||||
|
private string stringvalue;
|
||||||
|
|
||||||
|
union TokenVal
|
||||||
|
{
|
||||||
|
BigInt bi;
|
||||||
|
string s;
|
||||||
|
double dou;
|
||||||
|
}
|
||||||
|
>>
|
||||||
|
|
||||||
|
ptype TokenVal;
|
||||||
|
|
||||||
|
# Keywords.
|
||||||
|
token byte;
|
||||||
|
token def;
|
||||||
|
token int;
|
||||||
|
token long;
|
||||||
|
token module;
|
||||||
|
token return;
|
||||||
|
token short;
|
||||||
|
token size_t;
|
||||||
|
token ssize_t;
|
||||||
|
token ubyte;
|
||||||
|
token uint;
|
||||||
|
token ulong;
|
||||||
|
token ushort;
|
||||||
|
|
||||||
|
# Symbols.
|
||||||
|
token arrow /->/;
|
||||||
|
token comma /,/;
|
||||||
|
token lbrace /\{/;
|
||||||
|
token lparen /\(/;
|
||||||
|
token rbrace /\}/;
|
||||||
|
token rparen /\)/;
|
||||||
|
token semicolon /;/;
|
||||||
|
|
||||||
|
# Integer literals.
|
||||||
|
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
||||||
|
$$.bi = BigInt(match[0..3]);
|
||||||
|
foreach (c; match[3..$])
|
||||||
|
{
|
||||||
|
if (('0' <= c) && (c <= '9'))
|
||||||
|
{
|
||||||
|
$$.bi *= 0x10;
|
||||||
|
$$.bi += (c - '0');
|
||||||
|
}
|
||||||
|
if (('a' <= c) && (c <= 'f'))
|
||||||
|
{
|
||||||
|
$$.bi *= 0x10;
|
||||||
|
$$.bi += (c - 'a' + 10);
|
||||||
|
}
|
||||||
|
if (('A' <= c) && (c <= 'F'))
|
||||||
|
{
|
||||||
|
$$.bi *= 0x10;
|
||||||
|
$$.bi += (c - 'A' + 10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
>>
|
||||||
|
|
||||||
|
# Identifier.
|
||||||
|
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
||||||
|
if (match[0] == '$')
|
||||||
|
{
|
||||||
|
$$.s = match[1..$];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
$$.s = match;
|
||||||
|
}
|
||||||
|
$mode(default);
|
||||||
|
return $token(ident);
|
||||||
|
>>
|
||||||
|
|
||||||
|
# Comments.
|
||||||
|
drop /#.*/;
|
||||||
|
|
||||||
|
# Whitespace.
|
||||||
|
drop /[ \r\n]*/;
|
||||||
|
|
||||||
|
start Module;
|
||||||
|
|
||||||
|
# Assignment operators - right associative
|
||||||
|
Expression -> Expression_Or:exp0;
|
||||||
|
|
||||||
|
# Logical OR operator - left associative
|
||||||
|
Expression_Or -> Expression_And:exp0;
|
||||||
|
|
||||||
|
# Logical AND operator - left associative
|
||||||
|
Expression_And -> Expression_Comp:exp0;
|
||||||
|
|
||||||
|
# Equality operators - left associative
|
||||||
|
Expression_Comp -> Expression_Relational:exp0;
|
||||||
|
|
||||||
|
# Relational operators - left associative
|
||||||
|
Expression_Relational -> Expression_REMatch:exp0;
|
||||||
|
|
||||||
|
# Regular expression - left associative
|
||||||
|
Expression_REMatch -> Expression_BinOr:exp0;
|
||||||
|
|
||||||
|
# Binary OR operator - left associative
|
||||||
|
Expression_BinOr -> Expression_Xor:exp0;
|
||||||
|
|
||||||
|
# Binary XOR operator - left associative
|
||||||
|
Expression_Xor -> Expression_BinAnd:exp0;
|
||||||
|
|
||||||
|
# Binary AND operator - left associative
|
||||||
|
Expression_BinAnd -> Expression_BitShift:exp0;
|
||||||
|
|
||||||
|
# Bit shift operators - left associative
|
||||||
|
Expression_BitShift -> Expression_Plus:exp0;
|
||||||
|
|
||||||
|
# Add/subtract operators - left associative
|
||||||
|
Expression_Plus -> Expression_Mul:exp0;
|
||||||
|
|
||||||
|
# Multiplication/divide/modulus operators - left associative
|
||||||
|
Expression_Mul -> Expression_Range:exp0;
|
||||||
|
|
||||||
|
# Range construction operators - left associative
|
||||||
|
Expression_Range -> Expression_UnaryPrefix:exp0;
|
||||||
|
|
||||||
|
# Unary prefix operators
|
||||||
|
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
||||||
|
|
||||||
|
# Postfix operators
|
||||||
|
Expression_Dot -> Expression_Ident:exp0;
|
||||||
|
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
||||||
|
|
||||||
|
# Literals, identifiers, and parenthesized expressions
|
||||||
|
Expression_Ident -> Literal;
|
||||||
|
Expression_Ident -> ident;
|
||||||
|
|
||||||
|
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
||||||
|
|
||||||
|
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
||||||
|
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
||||||
|
|
||||||
|
FunctionReturnType -> arrow Type;
|
||||||
|
|
||||||
|
Literal -> LiteralInteger;
|
||||||
|
LiteralInteger -> hex_int_l;
|
||||||
|
|
||||||
|
Module -> ModuleStatement? ModuleItems;
|
||||||
|
|
||||||
|
ModuleItem -> FunctionDefinition;
|
||||||
|
|
||||||
|
ModuleItems -> ;
|
||||||
|
ModuleItems -> ModuleItems ModuleItem;
|
||||||
|
|
||||||
|
ModulePath -> ident;
|
||||||
|
|
||||||
|
ModuleStatement -> module ModulePath semicolon;
|
||||||
|
|
||||||
|
ReturnStatement -> return Expression?:exp0 semicolon;
|
||||||
|
|
||||||
|
Statements -> ;
|
||||||
|
Statements -> Statements Statement;
|
||||||
|
Statement -> Expression semicolon;
|
||||||
|
Statement -> ReturnStatement;
|
||||||
|
|
||||||
|
Type -> TypeBase;
|
||||||
|
|
||||||
|
TypeBase -> byte;
|
||||||
|
TypeBase -> ubyte;
|
||||||
|
TypeBase -> short;
|
||||||
|
TypeBase -> ushort;
|
||||||
|
TypeBase -> int;
|
||||||
|
TypeBase -> uint;
|
||||||
|
TypeBase -> long;
|
||||||
|
TypeBase -> ulong;
|
||||||
|
TypeBase -> size_t;
|
||||||
|
TypeBase -> ssize_t;
|
||||||
@ -151,30 +151,30 @@ EOF
|
|||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to be_nil
|
expect(o.modes).to be_empty
|
||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "b"}
|
o = grammar.tokens.find {|token| token.name == "b"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to eq "m1"
|
expect(o.modes).to eq Set["m1"]
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to be_nil
|
expect(o.modes).to be_empty
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to eq "m2"
|
expect(o.modes).to eq Set["m2"]
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to be_nil
|
expect(o.modes).to be_empty
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.mode).to eq "m3"
|
expect(o.modes).to eq Set["m3"]
|
||||||
end
|
end
|
||||||
|
|
||||||
it "allows assigning ptypes to tokens and rules" do
|
it "allows assigning ptypes to tokens and rules" do
|
||||||
|
|||||||
@ -126,6 +126,74 @@ EOF
|
|||||||
]
|
]
|
||||||
expect(run(<<EOF, ";")).to eq expected
|
expect(run(<<EOF, ";")).to eq expected
|
||||||
token semicolon /;/;
|
token semicolon /;/;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
|
||||||
|
it "matches a negated character class" do
|
||||||
|
expected = [
|
||||||
|
["pattern", "/abc/"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "/abc/")).to eq expected
|
||||||
|
token pattern /\\/[^\\s]*\\//;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
|
||||||
|
it "matches special character classes " do
|
||||||
|
expected = [
|
||||||
|
["a", "abc123_FOO"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "abc123_FOO")).to eq expected
|
||||||
|
token a /\\w+/;
|
||||||
|
EOF
|
||||||
|
expected = [
|
||||||
|
["b", "FROG*%$#"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "FROG*%$#")).to eq expected
|
||||||
|
token b /FROG\\D{1,4}/;
|
||||||
|
EOF
|
||||||
|
expected = [
|
||||||
|
["c", "$883366"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "$883366")).to eq expected
|
||||||
|
token c /$\\d+/;
|
||||||
|
EOF
|
||||||
|
expected = [
|
||||||
|
["d", "^&$@"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "^&$@")).to eq expected
|
||||||
|
token d /^\\W+/;
|
||||||
|
EOF
|
||||||
|
expected = [
|
||||||
|
["a", "abc123_FOO"],
|
||||||
|
[nil, " "],
|
||||||
|
["b", "FROG*%$#"],
|
||||||
|
[nil, " "],
|
||||||
|
["c", "$883366"],
|
||||||
|
[nil, " "],
|
||||||
|
["d", "^&$@"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "abc123_FOO FROG*%$# $883366 ^&$@")).to eq expected
|
||||||
|
token a /\\w+/;
|
||||||
|
token b /FROG\\D{1,4}/;
|
||||||
|
token c /$\\d+/;
|
||||||
|
token d /^\\W+/;
|
||||||
|
drop /\\s+/;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
|
||||||
|
it "matches a negated character class with a nested inner negated character class" do
|
||||||
|
expected = [
|
||||||
|
["t", "$&*"],
|
||||||
|
]
|
||||||
|
expect(run(<<EOF, "$&*")).to eq expected
|
||||||
|
token t /[^%\\W]+/;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
|
||||||
|
it "\\s matches a newline" do
|
||||||
|
expected = [["s", "\n"]]
|
||||||
|
expect(run(<<EOF, "\n")).to eq expected
|
||||||
|
token s /\\s/;
|
||||||
EOF
|
EOF
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -2,14 +2,14 @@ class Propane
|
|||||||
RSpec.describe Regex do
|
RSpec.describe Regex do
|
||||||
|
|
||||||
it "parses an empty expression" do
|
it "parses an empty expression" do
|
||||||
regex = Regex.new("")
|
regex = Regex.new("", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0].size).to eq 0
|
expect(regex.unit.alternates[0].size).to eq 0
|
||||||
end
|
end
|
||||||
|
|
||||||
it "parses a single character unit expression" do
|
it "parses a single character unit expression" do
|
||||||
regex = Regex.new("a")
|
regex = Regex.new("a", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -19,7 +19,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a group with a single character unit expression" do
|
it "parses a group with a single character unit expression" do
|
||||||
regex = Regex.new("(a)")
|
regex = Regex.new("(a)", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -33,7 +33,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a *" do
|
it "parses a *" do
|
||||||
regex = Regex.new("a*")
|
regex = Regex.new("a*", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -47,7 +47,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a +" do
|
it "parses a +" do
|
||||||
regex = Regex.new("a+")
|
regex = Regex.new("a+", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -61,7 +61,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a ?" do
|
it "parses a ?" do
|
||||||
regex = Regex.new("a?")
|
regex = Regex.new("a?", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -75,7 +75,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a multiplicity count" do
|
it "parses a multiplicity count" do
|
||||||
regex = Regex.new("a{5}")
|
regex = Regex.new("a{5}", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -89,7 +89,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a minimum-only multiplicity count" do
|
it "parses a minimum-only multiplicity count" do
|
||||||
regex = Regex.new("a{5,}")
|
regex = Regex.new("a{5,}", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -103,7 +103,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a minimum and maximum multiplicity count" do
|
it "parses a minimum and maximum multiplicity count" do
|
||||||
regex = Regex.new("a{5,8}")
|
regex = Regex.new("a{5,8}", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -118,7 +118,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped *" do
|
it "parses an escaped *" do
|
||||||
regex = Regex.new("a\\*")
|
regex = Regex.new("a\\*", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -131,7 +131,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped +" do
|
it "parses an escaped +" do
|
||||||
regex = Regex.new("a\\+")
|
regex = Regex.new("a\\+", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -144,7 +144,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped \\" do
|
it "parses an escaped \\" do
|
||||||
regex = Regex.new("\\\\d")
|
regex = Regex.new("\\\\d", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -157,7 +157,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a character class" do
|
it "parses a character class" do
|
||||||
regex = Regex.new("[a-z_]")
|
regex = Regex.new("[a-z_]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -175,7 +175,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a negated character class" do
|
it "parses a negated character class" do
|
||||||
regex = Regex.new("[^xyz]")
|
regex = Regex.new("[^xyz]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -189,8 +189,25 @@ class Propane
|
|||||||
expect(ccu[0].first).to eq "x".ord
|
expect(ccu[0].first).to eq "x".ord
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "parses a negated character class with inner character classes" do
|
||||||
|
regex = Regex.new("[^x\\sz]", 1)
|
||||||
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
|
seq_unit = regex.unit.alternates[0]
|
||||||
|
expect(seq_unit.size).to eq 1
|
||||||
|
expect(seq_unit[0]).to be_a Regex::CharacterClassUnit
|
||||||
|
ccu = seq_unit[0]
|
||||||
|
expect(ccu.negate).to be_truthy
|
||||||
|
expect(ccu.size).to eq 8
|
||||||
|
expect(ccu[0]).to be_a Regex::CharacterRangeUnit
|
||||||
|
expect(ccu[0].first).to eq "x".ord
|
||||||
|
expect(ccu[1].first).to eq " ".ord
|
||||||
|
expect(ccu[7].first).to eq "z".ord
|
||||||
|
end
|
||||||
|
|
||||||
it "parses - as a plain character at beginning of a character class" do
|
it "parses - as a plain character at beginning of a character class" do
|
||||||
regex = Regex.new("[-9]")
|
regex = Regex.new("[-9]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -204,7 +221,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character at end of a character class" do
|
it "parses - as a plain character at end of a character class" do
|
||||||
regex = Regex.new("[0-]")
|
regex = Regex.new("[0-]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -220,7 +237,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character at beginning of a negated character class" do
|
it "parses - as a plain character at beginning of a negated character class" do
|
||||||
regex = Regex.new("[^-9]")
|
regex = Regex.new("[^-9]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -235,7 +252,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses . as a plain character in a character class" do
|
it "parses . as a plain character in a character class" do
|
||||||
regex = Regex.new("[.]")
|
regex = Regex.new("[.]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -250,7 +267,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character when escaped in middle of character class" do
|
it "parses - as a plain character when escaped in middle of character class" do
|
||||||
regex = Regex.new("[0\\-9]")
|
regex = Regex.new("[0\\-9]", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -269,7 +286,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses alternates" do
|
it "parses alternates" do
|
||||||
regex = Regex.new("ab|c")
|
regex = Regex.new("ab|c", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 2
|
expect(regex.unit.alternates.size).to eq 2
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -279,7 +296,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a ." do
|
it "parses a ." do
|
||||||
regex = Regex.new("a.b")
|
regex = Regex.new("a.b", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -290,7 +307,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses something complex" do
|
it "parses something complex" do
|
||||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+")
|
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+", 1)
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 4
|
expect(regex.unit.alternates.size).to eq 4
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
|
|||||||
@ -74,7 +74,7 @@ EOF
|
|||||||
when "c"
|
when "c"
|
||||||
result = system(*%w[gcc -Wall -o spec/run/testparser -Ispec -Ispec/run], *parsers, *test_files, "spec/testutils.c", "-lm")
|
result = system(*%w[gcc -Wall -o spec/run/testparser -Ispec -Ispec/run], *parsers, *test_files, "spec/testutils.c", "-lm")
|
||||||
when "d"
|
when "d"
|
||||||
result = system(*%w[ldc2 --unittest -of spec/run/testparser -Ispec], *parsers, *test_files, "spec/testutils.d")
|
result = system(*%w[ldc2 -g --unittest -of spec/run/testparser -Ispec], *parsers, *test_files, "spec/testutils.d")
|
||||||
end
|
end
|
||||||
expect(result).to be_truthy
|
expect(result).to be_truthy
|
||||||
end
|
end
|
||||||
@ -249,6 +249,18 @@ EOF
|
|||||||
expect(results.status).to eq 0
|
expect(results.status).to eq 0
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "shows error line number for unmatched left curly brace" do
|
||||||
|
write_grammar <<EOF
|
||||||
|
# Line 1
|
||||||
|
# Line 2
|
||||||
|
token a /a{/;
|
||||||
|
Start -> a;
|
||||||
|
EOF
|
||||||
|
results = run_propane(capture: true)
|
||||||
|
expect(results.stderr).to match /Line 3: unexpected match count following \{/
|
||||||
|
expect(results.status).to_not eq 0
|
||||||
|
end
|
||||||
|
|
||||||
%w[d c].each do |language|
|
%w[d c].each do |language|
|
||||||
|
|
||||||
context "#{language.upcase} language" do
|
context "#{language.upcase} language" do
|
||||||
@ -621,6 +633,62 @@ EOF
|
|||||||
])
|
])
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "multiple lexer modes may apply to a pattern" do
|
||||||
|
case language
|
||||||
|
when "c"
|
||||||
|
write_grammar <<EOF
|
||||||
|
<<
|
||||||
|
#include <stdio.h>
|
||||||
|
>>
|
||||||
|
ptype char;
|
||||||
|
token abc;
|
||||||
|
token def;
|
||||||
|
default, identonly: token ident /[a-z]+/ <<
|
||||||
|
$$ = match[0];
|
||||||
|
$mode(default);
|
||||||
|
return $token(ident);
|
||||||
|
>>
|
||||||
|
token dot /\\./ <<
|
||||||
|
$mode(identonly);
|
||||||
|
>>
|
||||||
|
default, identonly: drop /\\s+/;
|
||||||
|
Start -> abc dot ident <<
|
||||||
|
printf("ident: %c\\n", $3);
|
||||||
|
>>
|
||||||
|
EOF
|
||||||
|
when "d"
|
||||||
|
write_grammar <<EOF
|
||||||
|
<<
|
||||||
|
import std.stdio;
|
||||||
|
>>
|
||||||
|
ptype char;
|
||||||
|
token abc;
|
||||||
|
token def;
|
||||||
|
default, identonly: token ident /[a-z]+/ <<
|
||||||
|
$$ = match[0];
|
||||||
|
$mode(default);
|
||||||
|
>>
|
||||||
|
token dot /\\./ <<
|
||||||
|
$mode(identonly);
|
||||||
|
>>
|
||||||
|
default, identonly: drop /\\s+/;
|
||||||
|
Start -> abc dot ident <<
|
||||||
|
writeln("ident: ", $3);
|
||||||
|
>>
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
run_propane(language: language)
|
||||||
|
compile("spec/test_lexer_multiple_modes.#{language}", language: language)
|
||||||
|
results = run_test
|
||||||
|
expect(results.status).to eq 0
|
||||||
|
verify_lines(results.stdout, [
|
||||||
|
"ident: d",
|
||||||
|
"pass1",
|
||||||
|
"ident: a",
|
||||||
|
"pass2",
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
it "executes user code associated with a parser rule" do
|
it "executes user code associated with a parser rule" do
|
||||||
case language
|
case language
|
||||||
when "c"
|
when "c"
|
||||||
@ -689,6 +757,7 @@ EOF
|
|||||||
results = run_propane(capture: true, language: language)
|
results = run_propane(capture: true, language: language)
|
||||||
expect(results.status).to_not eq 0
|
expect(results.status).to_not eq 0
|
||||||
expect(results.stderr).to match %r{Error: reduce/reduce conflict \(state \d+\) between rule E#\d+ \(defined on line 10\) and rule F#\d+ \(defined on line 11\)}
|
expect(results.stderr).to match %r{Error: reduce/reduce conflict \(state \d+\) between rule E#\d+ \(defined on line 10\) and rule F#\d+ \(defined on line 11\)}
|
||||||
|
expect(File.binread("spec/run/testparser.log")).to match %r{Reduce.actions:}
|
||||||
end
|
end
|
||||||
|
|
||||||
it "provides matched text to user code blocks" do
|
it "provides matched text to user code blocks" do
|
||||||
@ -1117,6 +1186,47 @@ EOF
|
|||||||
expect(results.status).to eq 0
|
expect(results.status).to eq 0
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "allows naming an optional rule component in AST generation mode" do
|
||||||
|
if language == "d"
|
||||||
|
write_grammar <<EOF
|
||||||
|
ast;
|
||||||
|
|
||||||
|
<<
|
||||||
|
import std.stdio;
|
||||||
|
>>
|
||||||
|
|
||||||
|
token a;
|
||||||
|
token b;
|
||||||
|
token c;
|
||||||
|
token d;
|
||||||
|
Start -> a?:a b R?:r;
|
||||||
|
R -> c d;
|
||||||
|
R -> d c;
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
write_grammar <<EOF
|
||||||
|
ast;
|
||||||
|
|
||||||
|
<<
|
||||||
|
#include <stdio.h>
|
||||||
|
>>
|
||||||
|
|
||||||
|
token a;
|
||||||
|
token b;
|
||||||
|
token c;
|
||||||
|
token d;
|
||||||
|
Start -> a?:a b R?:r;
|
||||||
|
R -> c d;
|
||||||
|
R -> d c;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
run_propane(language: language)
|
||||||
|
compile("spec/test_named_optional_rule_component_ast.#{language}", language: language)
|
||||||
|
results = run_test
|
||||||
|
expect(results.stderr).to eq ""
|
||||||
|
expect(results.status).to eq 0
|
||||||
|
end
|
||||||
|
|
||||||
it "stores token and rule positions in AST nodes" do
|
it "stores token and rule positions in AST nodes" do
|
||||||
write_grammar <<EOF
|
write_grammar <<EOF
|
||||||
ast;
|
ast;
|
||||||
@ -1177,6 +1287,29 @@ EOF
|
|||||||
expect(results.status).to eq 0
|
expect(results.status).to eq 0
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "aliases the correct field when multiple rules are in a rule set in AST mode" do
|
||||||
|
write_grammar <<EOF
|
||||||
|
ast;
|
||||||
|
|
||||||
|
token a;
|
||||||
|
token b;
|
||||||
|
token c;
|
||||||
|
drop /\\s+/;
|
||||||
|
Start -> a;
|
||||||
|
Start -> Foo;
|
||||||
|
Start -> T:first T:second T:third;
|
||||||
|
Foo -> b;
|
||||||
|
T -> a;
|
||||||
|
T -> b;
|
||||||
|
T -> c;
|
||||||
|
EOF
|
||||||
|
run_propane(language: language)
|
||||||
|
compile("spec/test_ast_field_aliases.#{language}", language: language)
|
||||||
|
results = run_test
|
||||||
|
expect(results.stderr).to eq ""
|
||||||
|
expect(results.status).to eq 0
|
||||||
|
end
|
||||||
|
|
||||||
it "allows specifying field aliases when AST mode is not enabled" do
|
it "allows specifying field aliases when AST mode is not enabled" do
|
||||||
if language == "d"
|
if language == "d"
|
||||||
write_grammar <<EOF
|
write_grammar <<EOF
|
||||||
@ -1220,6 +1353,65 @@ EOF
|
|||||||
expect(results.status).to eq 0
|
expect(results.status).to eq 0
|
||||||
expect(results.stdout).to match /first is foo1.*second is bar2/m
|
expect(results.stdout).to match /first is foo1.*second is bar2/m
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "aliases the correct field when multiple rules are in a rule set when AST mode is not enabled" do
|
||||||
|
if language == "d"
|
||||||
|
write_grammar <<EOF
|
||||||
|
<<
|
||||||
|
import std.stdio;
|
||||||
|
>>
|
||||||
|
ptype string;
|
||||||
|
token id /[a-zA-Z_][a-zA-Z0-9_]*/ <<
|
||||||
|
$$ = match;
|
||||||
|
>>
|
||||||
|
drop /\\s+/;
|
||||||
|
Start -> id;
|
||||||
|
Start -> Foo;
|
||||||
|
Start -> id:first id:second <<
|
||||||
|
writeln("first is ", ${first});
|
||||||
|
writeln("second is ", ${second});
|
||||||
|
>>
|
||||||
|
Foo -> ;
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
write_grammar <<EOF
|
||||||
|
<<
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
>>
|
||||||
|
ptype char const *;
|
||||||
|
token id /[a-zA-Z_][a-zA-Z0-9_]*/ <<
|
||||||
|
char * s = malloc(match_length + 1);
|
||||||
|
strncpy(s, (char const *)match, match_length);
|
||||||
|
s[match_length] = 0;
|
||||||
|
$$ = s;
|
||||||
|
>>
|
||||||
|
drop /\\s+/;
|
||||||
|
Start -> id;
|
||||||
|
Start -> Foo;
|
||||||
|
Start -> id:first id:second <<
|
||||||
|
printf("first is %s\\n", ${first});
|
||||||
|
printf("second is %s\\n", ${second});
|
||||||
|
>>
|
||||||
|
Foo -> ;
|
||||||
|
EOF
|
||||||
|
end
|
||||||
|
run_propane(language: language)
|
||||||
|
compile("spec/test_field_aliases.#{language}", language: language)
|
||||||
|
results = run_test
|
||||||
|
expect(results.stderr).to eq ""
|
||||||
|
expect(results.status).to eq 0
|
||||||
|
expect(results.stdout).to match /first is foo1.*second is bar2/m
|
||||||
|
end
|
||||||
|
|
||||||
|
it "does not free memory allocated for AST nodes" do
|
||||||
|
write_grammar(File.read("spec/ast_node_memory_remains.#{language}.propane"))
|
||||||
|
run_propane(language: language)
|
||||||
|
compile("spec/test_ast_node_memory_remains.#{language}", language: language)
|
||||||
|
results = run_test
|
||||||
|
expect(results.stderr).to eq ""
|
||||||
|
expect(results.status).to eq 0
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -11,92 +11,92 @@ int main()
|
|||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
Start * start = p_result(&context);
|
Start * start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(1, start->pT1->pToken->position.row);
|
assert_eq(2, start->pT1->pToken->position.row);
|
||||||
assert_eq(0, start->pT1->pToken->position.col);
|
assert_eq(1, start->pT1->pToken->position.col);
|
||||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
assert_eq(2, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
assert(p_position_valid(start->pT1->pA->position));
|
||||||
assert_eq(2, start->pT1->pA->position.row);
|
assert_eq(3, start->pT1->pA->position.row);
|
||||||
assert_eq(2, start->pT1->pA->position.col);
|
assert_eq(3, start->pT1->pA->position.col);
|
||||||
assert_eq(2, start->pT1->pA->end_position.row);
|
assert_eq(3, start->pT1->pA->end_position.row);
|
||||||
assert_eq(7, start->pT1->pA->end_position.col);
|
assert_eq(8, start->pT1->pA->end_position.col);
|
||||||
assert_eq(1, start->pT1->position.row);
|
assert_eq(2, start->pT1->position.row);
|
||||||
assert_eq(0, start->pT1->position.col);
|
assert_eq(1, start->pT1->position.col);
|
||||||
assert_eq(2, start->pT1->end_position.row);
|
assert_eq(3, start->pT1->end_position.row);
|
||||||
assert_eq(7, start->pT1->end_position.col);
|
assert_eq(8, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(1, start->position.row);
|
assert_eq(2, start->position.row);
|
||||||
assert_eq(0, start->position.col);
|
assert_eq(1, start->position.col);
|
||||||
assert_eq(2, start->end_position.row);
|
assert_eq(3, start->end_position.row);
|
||||||
assert_eq(7, start->end_position.col);
|
assert_eq(8, start->end_position.col);
|
||||||
|
|
||||||
input = "a\nbb";
|
input = "a\nbb";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start->pT1->pToken->position.row);
|
assert_eq(1, start->pT1->pToken->position.row);
|
||||||
assert_eq(0, start->pT1->pToken->position.col);
|
assert_eq(1, start->pT1->pToken->position.col);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
assert(p_position_valid(start->pT1->pA->position));
|
||||||
assert_eq(1, start->pT1->pA->position.row);
|
assert_eq(2, start->pT1->pA->position.row);
|
||||||
assert_eq(0, start->pT1->pA->position.col);
|
assert_eq(1, start->pT1->pA->position.col);
|
||||||
assert_eq(1, start->pT1->pA->end_position.row);
|
assert_eq(2, start->pT1->pA->end_position.row);
|
||||||
assert_eq(1, start->pT1->pA->end_position.col);
|
assert_eq(2, start->pT1->pA->end_position.col);
|
||||||
assert_eq(0, start->pT1->position.row);
|
assert_eq(1, start->pT1->position.row);
|
||||||
assert_eq(0, start->pT1->position.col);
|
assert_eq(1, start->pT1->position.col);
|
||||||
assert_eq(1, start->pT1->end_position.row);
|
assert_eq(2, start->pT1->end_position.row);
|
||||||
assert_eq(1, start->pT1->end_position.col);
|
assert_eq(2, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->position.row);
|
assert_eq(1, start->position.row);
|
||||||
assert_eq(0, start->position.col);
|
assert_eq(1, start->position.col);
|
||||||
assert_eq(1, start->end_position.row);
|
assert_eq(2, start->end_position.row);
|
||||||
assert_eq(1, start->end_position.col);
|
assert_eq(2, start->end_position.col);
|
||||||
|
|
||||||
input = "a\nc\nc";
|
input = "a\nc\nc";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start->pT1->pToken->position.row);
|
assert_eq(1, start->pT1->pToken->position.row);
|
||||||
assert_eq(0, start->pT1->pToken->position.col);
|
assert_eq(1, start->pT1->pToken->position.col);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
assert(p_position_valid(start->pT1->pA->position));
|
||||||
assert_eq(1, start->pT1->pA->position.row);
|
assert_eq(2, start->pT1->pA->position.row);
|
||||||
assert_eq(0, start->pT1->pA->position.col);
|
assert_eq(1, start->pT1->pA->position.col);
|
||||||
assert_eq(2, start->pT1->pA->end_position.row);
|
assert_eq(3, start->pT1->pA->end_position.row);
|
||||||
assert_eq(0, start->pT1->pA->end_position.col);
|
assert_eq(1, start->pT1->pA->end_position.col);
|
||||||
assert_eq(0, start->pT1->position.row);
|
assert_eq(1, start->pT1->position.row);
|
||||||
assert_eq(0, start->pT1->position.col);
|
assert_eq(1, start->pT1->position.col);
|
||||||
assert_eq(2, start->pT1->end_position.row);
|
assert_eq(3, start->pT1->end_position.row);
|
||||||
assert_eq(0, start->pT1->end_position.col);
|
assert_eq(1, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->position.row);
|
assert_eq(1, start->position.row);
|
||||||
assert_eq(0, start->position.col);
|
assert_eq(1, start->position.col);
|
||||||
assert_eq(2, start->end_position.row);
|
assert_eq(3, start->end_position.row);
|
||||||
assert_eq(0, start->end_position.col);
|
assert_eq(1, start->end_position.col);
|
||||||
|
|
||||||
input = "a";
|
input = "a";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start->pT1->pToken->position.row);
|
assert_eq(1, start->pT1->pToken->position.row);
|
||||||
assert_eq(0, start->pT1->pToken->position.col);
|
assert_eq(1, start->pT1->pToken->position.col);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||||
assert(!p_position_valid(start->pT1->pA->position));
|
assert(!p_position_valid(start->pT1->pA->position));
|
||||||
assert_eq(0, start->pT1->position.row);
|
assert_eq(1, start->pT1->position.row);
|
||||||
assert_eq(0, start->pT1->position.col);
|
assert_eq(1, start->pT1->position.col);
|
||||||
assert_eq(0, start->pT1->end_position.row);
|
assert_eq(1, start->pT1->end_position.row);
|
||||||
assert_eq(0, start->pT1->end_position.col);
|
assert_eq(1, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->position.row);
|
assert_eq(1, start->position.row);
|
||||||
assert_eq(0, start->position.col);
|
assert_eq(1, start->position.col);
|
||||||
assert_eq(0, start->end_position.row);
|
assert_eq(1, start->end_position.row);
|
||||||
assert_eq(0, start->end_position.col);
|
assert_eq(1, start->end_position.col);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,90 +15,90 @@ unittest
|
|||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
Start * start = p_result(&context);
|
Start * start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(1, start.pT1.pToken.position.row);
|
assert_eq(2, start.pT1.pToken.position.row);
|
||||||
assert_eq(0, start.pT1.pToken.position.col);
|
assert_eq(1, start.pT1.pToken.position.col);
|
||||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
assert_eq(2, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||||
assert(start.pT1.pA.position.valid);
|
assert(start.pT1.pA.position.valid);
|
||||||
assert_eq(2, start.pT1.pA.position.row);
|
assert_eq(3, start.pT1.pA.position.row);
|
||||||
assert_eq(2, start.pT1.pA.position.col);
|
assert_eq(3, start.pT1.pA.position.col);
|
||||||
assert_eq(2, start.pT1.pA.end_position.row);
|
assert_eq(3, start.pT1.pA.end_position.row);
|
||||||
assert_eq(7, start.pT1.pA.end_position.col);
|
assert_eq(8, start.pT1.pA.end_position.col);
|
||||||
assert_eq(1, start.pT1.position.row);
|
assert_eq(2, start.pT1.position.row);
|
||||||
assert_eq(0, start.pT1.position.col);
|
assert_eq(1, start.pT1.position.col);
|
||||||
assert_eq(2, start.pT1.end_position.row);
|
assert_eq(3, start.pT1.end_position.row);
|
||||||
assert_eq(7, start.pT1.end_position.col);
|
assert_eq(8, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(1, start.position.row);
|
assert_eq(2, start.position.row);
|
||||||
assert_eq(0, start.position.col);
|
assert_eq(1, start.position.col);
|
||||||
assert_eq(2, start.end_position.row);
|
assert_eq(3, start.end_position.row);
|
||||||
assert_eq(7, start.end_position.col);
|
assert_eq(8, start.end_position.col);
|
||||||
|
|
||||||
input = "a\nbb";
|
input = "a\nbb";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start.pT1.pToken.position.row);
|
assert_eq(1, start.pT1.pToken.position.row);
|
||||||
assert_eq(0, start.pT1.pToken.position.col);
|
assert_eq(1, start.pT1.pToken.position.col);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||||
assert(start.pT1.pA.position.valid);
|
assert(start.pT1.pA.position.valid);
|
||||||
assert_eq(1, start.pT1.pA.position.row);
|
assert_eq(2, start.pT1.pA.position.row);
|
||||||
assert_eq(0, start.pT1.pA.position.col);
|
assert_eq(1, start.pT1.pA.position.col);
|
||||||
assert_eq(1, start.pT1.pA.end_position.row);
|
assert_eq(2, start.pT1.pA.end_position.row);
|
||||||
assert_eq(1, start.pT1.pA.end_position.col);
|
assert_eq(2, start.pT1.pA.end_position.col);
|
||||||
assert_eq(0, start.pT1.position.row);
|
assert_eq(1, start.pT1.position.row);
|
||||||
assert_eq(0, start.pT1.position.col);
|
assert_eq(1, start.pT1.position.col);
|
||||||
assert_eq(1, start.pT1.end_position.row);
|
assert_eq(2, start.pT1.end_position.row);
|
||||||
assert_eq(1, start.pT1.end_position.col);
|
assert_eq(2, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.position.row);
|
assert_eq(1, start.position.row);
|
||||||
assert_eq(0, start.position.col);
|
assert_eq(1, start.position.col);
|
||||||
assert_eq(1, start.end_position.row);
|
assert_eq(2, start.end_position.row);
|
||||||
assert_eq(1, start.end_position.col);
|
assert_eq(2, start.end_position.col);
|
||||||
|
|
||||||
input = "a\nc\nc";
|
input = "a\nc\nc";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start.pT1.pToken.position.row);
|
assert_eq(1, start.pT1.pToken.position.row);
|
||||||
assert_eq(0, start.pT1.pToken.position.col);
|
assert_eq(1, start.pT1.pToken.position.col);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||||
assert(start.pT1.pA.position.valid);
|
assert(start.pT1.pA.position.valid);
|
||||||
assert_eq(1, start.pT1.pA.position.row);
|
assert_eq(2, start.pT1.pA.position.row);
|
||||||
assert_eq(0, start.pT1.pA.position.col);
|
assert_eq(1, start.pT1.pA.position.col);
|
||||||
assert_eq(2, start.pT1.pA.end_position.row);
|
assert_eq(3, start.pT1.pA.end_position.row);
|
||||||
assert_eq(0, start.pT1.pA.end_position.col);
|
assert_eq(1, start.pT1.pA.end_position.col);
|
||||||
assert_eq(0, start.pT1.position.row);
|
assert_eq(1, start.pT1.position.row);
|
||||||
assert_eq(0, start.pT1.position.col);
|
assert_eq(1, start.pT1.position.col);
|
||||||
assert_eq(2, start.pT1.end_position.row);
|
assert_eq(3, start.pT1.end_position.row);
|
||||||
assert_eq(0, start.pT1.end_position.col);
|
assert_eq(1, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.position.row);
|
assert_eq(1, start.position.row);
|
||||||
assert_eq(0, start.position.col);
|
assert_eq(1, start.position.col);
|
||||||
assert_eq(2, start.end_position.row);
|
assert_eq(3, start.end_position.row);
|
||||||
assert_eq(0, start.end_position.col);
|
assert_eq(1, start.end_position.col);
|
||||||
|
|
||||||
input = "a";
|
input = "a";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start.pT1.pToken.position.row);
|
assert_eq(1, start.pT1.pToken.position.row);
|
||||||
assert_eq(0, start.pT1.pToken.position.col);
|
assert_eq(1, start.pT1.pToken.position.col);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||||
assert(!start.pT1.pA.position.valid);
|
assert(!start.pT1.pA.position.valid);
|
||||||
assert_eq(0, start.pT1.position.row);
|
assert_eq(1, start.pT1.position.row);
|
||||||
assert_eq(0, start.pT1.position.col);
|
assert_eq(1, start.pT1.position.col);
|
||||||
assert_eq(0, start.pT1.end_position.row);
|
assert_eq(1, start.pT1.end_position.row);
|
||||||
assert_eq(0, start.pT1.end_position.col);
|
assert_eq(1, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.position.row);
|
assert_eq(1, start.position.row);
|
||||||
assert_eq(0, start.position.col);
|
assert_eq(1, start.position.col);
|
||||||
assert_eq(0, start.end_position.row);
|
assert_eq(1, start.end_position.row);
|
||||||
assert_eq(0, start.end_position.col);
|
assert_eq(1, start.end_position.col);
|
||||||
}
|
}
|
||||||
|
|||||||
415
spec/test_ast_node_memory_remains.c
Normal file
415
spec/test_ast_node_memory_remains.c
Normal file
@ -0,0 +1,415 @@
|
|||||||
|
#include "testparser.h"
|
||||||
|
#include <assert.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "testutils.h"
|
||||||
|
|
||||||
|
int main(int argc, char * argv[])
|
||||||
|
{
|
||||||
|
const char * input =
|
||||||
|
"# 0\n"
|
||||||
|
"def byte_val() -> byte\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 1\n"
|
||||||
|
"def short_val() -> short\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x4242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 2\n"
|
||||||
|
"def int_val() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42424242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 3\n"
|
||||||
|
"def long_val() -> long\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x4242_4242_4242_4242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 4\n"
|
||||||
|
"def ssize_t_val() -> ssize_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42424242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 5\n"
|
||||||
|
"def byte_to_short() -> short\n"
|
||||||
|
"{\n"
|
||||||
|
" return byte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 6\n"
|
||||||
|
"def byte_to_int() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return byte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 7\n"
|
||||||
|
"def byte_to_long() -> long\n"
|
||||||
|
"{\n"
|
||||||
|
" return byte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 8\n"
|
||||||
|
"def byte_to_ssize_t() -> ssize_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return byte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 9\n"
|
||||||
|
"def short_to_byte() -> byte\n"
|
||||||
|
"{\n"
|
||||||
|
" return short_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 10\n"
|
||||||
|
"def short_to_int() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return short_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 11\n"
|
||||||
|
"def short_to_long() -> long\n"
|
||||||
|
"{\n"
|
||||||
|
" return short_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 12\n"
|
||||||
|
"def short_to_ssize_t() -> ssize_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return short_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 13\n"
|
||||||
|
"def int_to_byte() -> byte\n"
|
||||||
|
"{\n"
|
||||||
|
" return int_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 14\n"
|
||||||
|
"def int_to_short() -> short\n"
|
||||||
|
"{\n"
|
||||||
|
" return int_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 15\n"
|
||||||
|
"def int_to_long() -> long\n"
|
||||||
|
"{\n"
|
||||||
|
" return int_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 16\n"
|
||||||
|
"def int_to_ssize_t() -> ssize_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return int_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 17\n"
|
||||||
|
"def long_to_byte() -> byte\n"
|
||||||
|
"{\n"
|
||||||
|
" return long_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 18\n"
|
||||||
|
"def long_to_short() -> short\n"
|
||||||
|
"{\n"
|
||||||
|
" return long_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 19\n"
|
||||||
|
"def long_to_int() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return long_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 20\n"
|
||||||
|
"def long_to_ssize_t() -> ssize_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return long_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 21\n"
|
||||||
|
"def ssize_t_to_byte() -> byte\n"
|
||||||
|
"{\n"
|
||||||
|
" return ssize_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 22\n"
|
||||||
|
"def ssize_t_to_short() -> short\n"
|
||||||
|
"{\n"
|
||||||
|
" return ssize_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 23\n"
|
||||||
|
"def ssize_t_to_int() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return ssize_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 24\n"
|
||||||
|
"def ssize_t_to_long() -> long\n"
|
||||||
|
"{\n"
|
||||||
|
" return ssize_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 25\n"
|
||||||
|
"def ubyte_val() -> ubyte\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 26\n"
|
||||||
|
"def ushort_val() -> ushort\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x4242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 27\n"
|
||||||
|
"def uint_val() -> uint\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42424242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 28\n"
|
||||||
|
"def ulong_val() -> ulong\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x4242_4242_4242_4242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 29\n"
|
||||||
|
"def size_t_val() -> size_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return 0x42424242;\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 30\n"
|
||||||
|
"def ubyte_to_ushort() -> ushort\n"
|
||||||
|
"{\n"
|
||||||
|
" return ubyte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 31\n"
|
||||||
|
"def ubyte_to_uint() -> uint\n"
|
||||||
|
"{\n"
|
||||||
|
" return ubyte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 32\n"
|
||||||
|
"def ubyte_to_ulong() -> ulong\n"
|
||||||
|
"{\n"
|
||||||
|
" return ubyte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 33\n"
|
||||||
|
"def ubyte_to_size_t() -> size_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return ubyte_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 34\n"
|
||||||
|
"def ushort_to_ubyte() -> ubyte\n"
|
||||||
|
"{\n"
|
||||||
|
" return ushort_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 35\n"
|
||||||
|
"def ushort_to_uint() -> uint\n"
|
||||||
|
"{\n"
|
||||||
|
" return ushort_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 36\n"
|
||||||
|
"def ushort_to_ulong() -> ulong\n"
|
||||||
|
"{\n"
|
||||||
|
" return ushort_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 37\n"
|
||||||
|
"def ushort_to_size_t() -> size_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return ushort_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 38\n"
|
||||||
|
"def uint_to_ubyte() -> ubyte\n"
|
||||||
|
"{\n"
|
||||||
|
" return uint_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 39\n"
|
||||||
|
"def uint_to_ushort() -> ushort\n"
|
||||||
|
"{\n"
|
||||||
|
" return uint_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 40\n"
|
||||||
|
"def uint_to_ulong() -> ulong\n"
|
||||||
|
"{\n"
|
||||||
|
" return uint_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 41\n"
|
||||||
|
"def uint_to_size_t() -> size_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return uint_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 42\n"
|
||||||
|
"def ulong_to_ubyte() -> ubyte\n"
|
||||||
|
"{\n"
|
||||||
|
" return ulong_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 43\n"
|
||||||
|
"def ulong_to_ushort() -> ushort\n"
|
||||||
|
"{\n"
|
||||||
|
" return ulong_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 44\n"
|
||||||
|
"def ulong_to_uint() -> uint\n"
|
||||||
|
"{\n"
|
||||||
|
" return ulong_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 45\n"
|
||||||
|
"def ulong_to_size_t() -> size_t\n"
|
||||||
|
"{\n"
|
||||||
|
" return ulong_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 46\n"
|
||||||
|
"def size_t_to_ubyte() -> ubyte\n"
|
||||||
|
"{\n"
|
||||||
|
" return size_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 47\n"
|
||||||
|
"def size_t_to_ushort() -> ushort\n"
|
||||||
|
"{\n"
|
||||||
|
" return size_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 48\n"
|
||||||
|
"def size_t_to_int() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return size_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 49\n"
|
||||||
|
"def size_t_to_ulong() -> ulong\n"
|
||||||
|
"{\n"
|
||||||
|
" return size_t_val();\n"
|
||||||
|
"}\n"
|
||||||
|
"\n"
|
||||||
|
"# 50\n"
|
||||||
|
"def main() -> int\n"
|
||||||
|
"{\n"
|
||||||
|
" return int_val();\n"
|
||||||
|
"}\n";
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
const char * name;
|
||||||
|
p_token_t token;
|
||||||
|
} expected[] = {
|
||||||
|
{"byte_val", TOKEN_byte},
|
||||||
|
{"short_val", TOKEN_short},
|
||||||
|
{"int_val", TOKEN_int},
|
||||||
|
{"long_val", TOKEN_long},
|
||||||
|
{"ssize_t_val", TOKEN_ssize_t},
|
||||||
|
{"byte_to_short", TOKEN_short},
|
||||||
|
{"byte_to_int", TOKEN_int},
|
||||||
|
{"byte_to_long", TOKEN_long},
|
||||||
|
{"byte_to_ssize_t", TOKEN_ssize_t},
|
||||||
|
{"short_to_byte", TOKEN_byte},
|
||||||
|
{"short_to_int", TOKEN_int},
|
||||||
|
{"short_to_long", TOKEN_long},
|
||||||
|
{"short_to_ssize_t", TOKEN_ssize_t},
|
||||||
|
{"int_to_byte", TOKEN_byte},
|
||||||
|
{"int_to_short", TOKEN_short},
|
||||||
|
{"int_to_long", TOKEN_long},
|
||||||
|
{"int_to_ssize_t", TOKEN_ssize_t},
|
||||||
|
{"long_to_byte", TOKEN_byte},
|
||||||
|
{"long_to_short", TOKEN_short},
|
||||||
|
{"long_to_int", TOKEN_int},
|
||||||
|
{"long_to_ssize_t", TOKEN_ssize_t},
|
||||||
|
{"ssize_t_to_byte", TOKEN_byte},
|
||||||
|
{"ssize_t_to_short", TOKEN_short},
|
||||||
|
{"ssize_t_to_int", TOKEN_int},
|
||||||
|
{"ssize_t_to_long", TOKEN_long},
|
||||||
|
{"ubyte_val", TOKEN_ubyte},
|
||||||
|
{"ushort_val", TOKEN_ushort},
|
||||||
|
{"uint_val", TOKEN_uint},
|
||||||
|
{"ulong_val", TOKEN_ulong},
|
||||||
|
{"size_t_val", TOKEN_size_t},
|
||||||
|
{"ubyte_to_ushort", TOKEN_ushort},
|
||||||
|
{"ubyte_to_uint", TOKEN_uint},
|
||||||
|
{"ubyte_to_ulong", TOKEN_ulong},
|
||||||
|
{"ubyte_to_size_t", TOKEN_size_t},
|
||||||
|
{"ushort_to_ubyte", TOKEN_ubyte},
|
||||||
|
{"ushort_to_uint", TOKEN_uint},
|
||||||
|
{"ushort_to_ulong", TOKEN_ulong},
|
||||||
|
{"ushort_to_size_t", TOKEN_size_t},
|
||||||
|
{"uint_to_ubyte", TOKEN_ubyte},
|
||||||
|
{"uint_to_ushort", TOKEN_ushort},
|
||||||
|
{"uint_to_ulong", TOKEN_ulong},
|
||||||
|
{"uint_to_size_t", TOKEN_size_t},
|
||||||
|
{"ulong_to_ubyte", TOKEN_ubyte},
|
||||||
|
{"ulong_to_ushort", TOKEN_ushort},
|
||||||
|
{"ulong_to_uint", TOKEN_uint},
|
||||||
|
{"ulong_to_size_t", TOKEN_size_t},
|
||||||
|
{"size_t_to_ubyte", TOKEN_ubyte},
|
||||||
|
{"size_t_to_ushort", TOKEN_ushort},
|
||||||
|
{"size_t_to_int", TOKEN_int},
|
||||||
|
{"size_t_to_ulong", TOKEN_ulong},
|
||||||
|
{"main", TOKEN_int},
|
||||||
|
};
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, (const uint8_t *)input, strlen(input));
|
||||||
|
size_t result = p_parse(&context);
|
||||||
|
assert_eq(P_SUCCESS, result);
|
||||||
|
PModule * pmod = p_result(&context);
|
||||||
|
PModuleItems * pmis = pmod->pModuleItems;
|
||||||
|
PFunctionDefinition ** pfds;
|
||||||
|
size_t n_pfds = 0u;
|
||||||
|
while (pmis != NULL)
|
||||||
|
{
|
||||||
|
PModuleItem * pmi = pmis->pModuleItem;
|
||||||
|
if (pmi->pFunctionDefinition != NULL)
|
||||||
|
{
|
||||||
|
n_pfds++;
|
||||||
|
}
|
||||||
|
pmis = pmis->pModuleItems;
|
||||||
|
}
|
||||||
|
pfds = malloc(n_pfds * sizeof(PModuleItems *));
|
||||||
|
pmis = pmod->pModuleItems;
|
||||||
|
size_t pfd_i = n_pfds;
|
||||||
|
while (pmis != NULL)
|
||||||
|
{
|
||||||
|
PModuleItem * pmi = pmis->pModuleItem;
|
||||||
|
PFunctionDefinition * pfd = pmi->pFunctionDefinition;
|
||||||
|
if (pfd != NULL)
|
||||||
|
{
|
||||||
|
pfd_i--;
|
||||||
|
assert(pfd_i < n_pfds);
|
||||||
|
pfds[pfd_i] = pfd;
|
||||||
|
}
|
||||||
|
pmis = pmis->pModuleItems;
|
||||||
|
}
|
||||||
|
assert_eq(51, n_pfds);
|
||||||
|
for (size_t i = 0; i < n_pfds; i++)
|
||||||
|
{
|
||||||
|
if (strncmp(expected[i].name, (const char *)pfds[i]->name->pvalue.s, strlen(expected[i].name)) != 0 ||
|
||||||
|
(expected[i].token != pfds[i]->returntype->pType->pTypeBase->pToken1->token))
|
||||||
|
{
|
||||||
|
fprintf(stderr, "Index %lu: expected %s/%u, got %u\n", i, expected[i].name, expected[i].token, pfds[i]->returntype->pType->pTypeBase->pToken1->token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
408
spec/test_ast_node_memory_remains.d
Normal file
408
spec/test_ast_node_memory_remains.d
Normal file
@ -0,0 +1,408 @@
|
|||||||
|
import testparser;
|
||||||
|
import std.stdio;
|
||||||
|
import testutils;
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
unittest
|
||||||
|
{
|
||||||
|
string input = "
|
||||||
|
# 0
|
||||||
|
def byte_val() -> byte
|
||||||
|
{
|
||||||
|
return 0x42;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 1
|
||||||
|
def short_val() -> short
|
||||||
|
{
|
||||||
|
return 0x4242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 2
|
||||||
|
def int_val() -> int
|
||||||
|
{
|
||||||
|
return 0x42424242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 3
|
||||||
|
def long_val() -> long
|
||||||
|
{
|
||||||
|
return 0x4242_4242_4242_4242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 4
|
||||||
|
def ssize_t_val() -> ssize_t
|
||||||
|
{
|
||||||
|
return 0x42424242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 5
|
||||||
|
def byte_to_short() -> short
|
||||||
|
{
|
||||||
|
return byte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 6
|
||||||
|
def byte_to_int() -> int
|
||||||
|
{
|
||||||
|
return byte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 7
|
||||||
|
def byte_to_long() -> long
|
||||||
|
{
|
||||||
|
return byte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 8
|
||||||
|
def byte_to_ssize_t() -> ssize_t
|
||||||
|
{
|
||||||
|
return byte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 9
|
||||||
|
def short_to_byte() -> byte
|
||||||
|
{
|
||||||
|
return short_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 10
|
||||||
|
def short_to_int() -> int
|
||||||
|
{
|
||||||
|
return short_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 11
|
||||||
|
def short_to_long() -> long
|
||||||
|
{
|
||||||
|
return short_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 12
|
||||||
|
def short_to_ssize_t() -> ssize_t
|
||||||
|
{
|
||||||
|
return short_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 13
|
||||||
|
def int_to_byte() -> byte
|
||||||
|
{
|
||||||
|
return int_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 14
|
||||||
|
def int_to_short() -> short
|
||||||
|
{
|
||||||
|
return int_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 15
|
||||||
|
def int_to_long() -> long
|
||||||
|
{
|
||||||
|
return int_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 16
|
||||||
|
def int_to_ssize_t() -> ssize_t
|
||||||
|
{
|
||||||
|
return int_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 17
|
||||||
|
def long_to_byte() -> byte
|
||||||
|
{
|
||||||
|
return long_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 18
|
||||||
|
def long_to_short() -> short
|
||||||
|
{
|
||||||
|
return long_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 19
|
||||||
|
def long_to_int() -> int
|
||||||
|
{
|
||||||
|
return long_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 20
|
||||||
|
def long_to_ssize_t() -> ssize_t
|
||||||
|
{
|
||||||
|
return long_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 21
|
||||||
|
def ssize_t_to_byte() -> byte
|
||||||
|
{
|
||||||
|
return ssize_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 22
|
||||||
|
def ssize_t_to_short() -> short
|
||||||
|
{
|
||||||
|
return ssize_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 23
|
||||||
|
def ssize_t_to_int() -> int
|
||||||
|
{
|
||||||
|
return ssize_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 24
|
||||||
|
def ssize_t_to_long() -> long
|
||||||
|
{
|
||||||
|
return ssize_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 25
|
||||||
|
def ubyte_val() -> ubyte
|
||||||
|
{
|
||||||
|
return 0x42;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 26
|
||||||
|
def ushort_val() -> ushort
|
||||||
|
{
|
||||||
|
return 0x4242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 27
|
||||||
|
def uint_val() -> uint
|
||||||
|
{
|
||||||
|
return 0x42424242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 28
|
||||||
|
def ulong_val() -> ulong
|
||||||
|
{
|
||||||
|
return 0x4242_4242_4242_4242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 29
|
||||||
|
def size_t_val() -> size_t
|
||||||
|
{
|
||||||
|
return 0x42424242;
|
||||||
|
}
|
||||||
|
|
||||||
|
# 30
|
||||||
|
def ubyte_to_ushort() -> ushort
|
||||||
|
{
|
||||||
|
return ubyte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 31
|
||||||
|
def ubyte_to_uint() -> uint
|
||||||
|
{
|
||||||
|
return ubyte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 32
|
||||||
|
def ubyte_to_ulong() -> ulong
|
||||||
|
{
|
||||||
|
return ubyte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 33
|
||||||
|
def ubyte_to_size_t() -> size_t
|
||||||
|
{
|
||||||
|
return ubyte_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 34
|
||||||
|
def ushort_to_ubyte() -> ubyte
|
||||||
|
{
|
||||||
|
return ushort_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 35
|
||||||
|
def ushort_to_uint() -> uint
|
||||||
|
{
|
||||||
|
return ushort_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 36
|
||||||
|
def ushort_to_ulong() -> ulong
|
||||||
|
{
|
||||||
|
return ushort_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 37
|
||||||
|
def ushort_to_size_t() -> size_t
|
||||||
|
{
|
||||||
|
return ushort_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 38
|
||||||
|
def uint_to_ubyte() -> ubyte
|
||||||
|
{
|
||||||
|
return uint_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 39
|
||||||
|
def uint_to_ushort() -> ushort
|
||||||
|
{
|
||||||
|
return uint_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 40
|
||||||
|
def uint_to_ulong() -> ulong
|
||||||
|
{
|
||||||
|
return uint_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 41
|
||||||
|
def uint_to_size_t() -> size_t
|
||||||
|
{
|
||||||
|
return uint_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 42
|
||||||
|
def ulong_to_ubyte() -> ubyte
|
||||||
|
{
|
||||||
|
return ulong_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 43
|
||||||
|
def ulong_to_ushort() -> ushort
|
||||||
|
{
|
||||||
|
return ulong_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 44
|
||||||
|
def ulong_to_uint() -> uint
|
||||||
|
{
|
||||||
|
return ulong_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 45
|
||||||
|
def ulong_to_size_t() -> size_t
|
||||||
|
{
|
||||||
|
return ulong_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 46
|
||||||
|
def size_t_to_ubyte() -> ubyte
|
||||||
|
{
|
||||||
|
return size_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 47
|
||||||
|
def size_t_to_ushort() -> ushort
|
||||||
|
{
|
||||||
|
return size_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 48
|
||||||
|
def size_t_to_int() -> int
|
||||||
|
{
|
||||||
|
return size_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 49
|
||||||
|
def size_t_to_ulong() -> ulong
|
||||||
|
{
|
||||||
|
return size_t_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
# 50
|
||||||
|
def main() -> int
|
||||||
|
{
|
||||||
|
return int_val();
|
||||||
|
}
|
||||||
|
";
|
||||||
|
struct Expected
|
||||||
|
{
|
||||||
|
string name;
|
||||||
|
p_token_t token;
|
||||||
|
}
|
||||||
|
Expected[] expected = [
|
||||||
|
Expected("byte_val", TOKEN_byte),
|
||||||
|
Expected("short_val", TOKEN_short),
|
||||||
|
Expected("int_val", TOKEN_int),
|
||||||
|
Expected("long_val", TOKEN_long),
|
||||||
|
Expected("ssize_t_val", TOKEN_ssize_t),
|
||||||
|
Expected("byte_to_short", TOKEN_short),
|
||||||
|
Expected("byte_to_int", TOKEN_int),
|
||||||
|
Expected("byte_to_long", TOKEN_long),
|
||||||
|
Expected("byte_to_ssize_t", TOKEN_ssize_t),
|
||||||
|
Expected("short_to_byte", TOKEN_byte),
|
||||||
|
Expected("short_to_int", TOKEN_int),
|
||||||
|
Expected("short_to_long", TOKEN_long),
|
||||||
|
Expected("short_to_ssize_t", TOKEN_ssize_t),
|
||||||
|
Expected("int_to_byte", TOKEN_byte),
|
||||||
|
Expected("int_to_short", TOKEN_short),
|
||||||
|
Expected("int_to_long", TOKEN_long),
|
||||||
|
Expected("int_to_ssize_t", TOKEN_ssize_t),
|
||||||
|
Expected("long_to_byte", TOKEN_byte),
|
||||||
|
Expected("long_to_short", TOKEN_short),
|
||||||
|
Expected("long_to_int", TOKEN_int),
|
||||||
|
Expected("long_to_ssize_t", TOKEN_ssize_t),
|
||||||
|
Expected("ssize_t_to_byte", TOKEN_byte),
|
||||||
|
Expected("ssize_t_to_short", TOKEN_short),
|
||||||
|
Expected("ssize_t_to_int", TOKEN_int),
|
||||||
|
Expected("ssize_t_to_long", TOKEN_long),
|
||||||
|
Expected("ubyte_val", TOKEN_ubyte),
|
||||||
|
Expected("ushort_val", TOKEN_ushort),
|
||||||
|
Expected("uint_val", TOKEN_uint),
|
||||||
|
Expected("ulong_val", TOKEN_ulong),
|
||||||
|
Expected("size_t_val", TOKEN_size_t),
|
||||||
|
Expected("ubyte_to_ushort", TOKEN_ushort),
|
||||||
|
Expected("ubyte_to_uint", TOKEN_uint),
|
||||||
|
Expected("ubyte_to_ulong", TOKEN_ulong),
|
||||||
|
Expected("ubyte_to_size_t", TOKEN_size_t),
|
||||||
|
Expected("ushort_to_ubyte", TOKEN_ubyte),
|
||||||
|
Expected("ushort_to_uint", TOKEN_uint),
|
||||||
|
Expected("ushort_to_ulong", TOKEN_ulong),
|
||||||
|
Expected("ushort_to_size_t", TOKEN_size_t),
|
||||||
|
Expected("uint_to_ubyte", TOKEN_ubyte),
|
||||||
|
Expected("uint_to_ushort", TOKEN_ushort),
|
||||||
|
Expected("uint_to_ulong", TOKEN_ulong),
|
||||||
|
Expected("uint_to_size_t", TOKEN_size_t),
|
||||||
|
Expected("ulong_to_ubyte", TOKEN_ubyte),
|
||||||
|
Expected("ulong_to_ushort", TOKEN_ushort),
|
||||||
|
Expected("ulong_to_uint", TOKEN_uint),
|
||||||
|
Expected("ulong_to_size_t", TOKEN_size_t),
|
||||||
|
Expected("size_t_to_ubyte", TOKEN_ubyte),
|
||||||
|
Expected("size_t_to_ushort", TOKEN_ushort),
|
||||||
|
Expected("size_t_to_int", TOKEN_int),
|
||||||
|
Expected("size_t_to_ulong", TOKEN_ulong),
|
||||||
|
Expected("main", TOKEN_int),
|
||||||
|
];
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, input);
|
||||||
|
size_t result = p_parse(&context);
|
||||||
|
assert_eq(P_SUCCESS, result);
|
||||||
|
PModule * pmod = p_result(&context);
|
||||||
|
PModuleItems * pmis = pmod.pModuleItems;
|
||||||
|
PFunctionDefinition *[] pfds;
|
||||||
|
while (pmis !is null)
|
||||||
|
{
|
||||||
|
PModuleItem * pmi = pmis.pModuleItem;
|
||||||
|
if (pmi is null)
|
||||||
|
{
|
||||||
|
stderr.writeln("pmi is null!!!?");
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
PFunctionDefinition * pfd = pmi.pFunctionDefinition;
|
||||||
|
if (pfd !is null)
|
||||||
|
{
|
||||||
|
pfds = [pfd] ~ pfds;
|
||||||
|
}
|
||||||
|
pmis = pmis.pModuleItems;
|
||||||
|
}
|
||||||
|
assert_eq(51, pfds.length);
|
||||||
|
for (size_t i = 0; i < pfds.length; i++)
|
||||||
|
{
|
||||||
|
if ((expected[i].name != pfds[i].name.pvalue.s) ||
|
||||||
|
(expected[i].token != pfds[i].returntype.pType.pTypeBase.pToken1.token))
|
||||||
|
{
|
||||||
|
stderr.writeln("Index ", i, ": expected ", expected[i].name, "/", expected[i].token, ", got ", pfds[i].name.pvalue.s, "/", pfds[i].returntype.pType.pTypeBase.pToken1.token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -11,74 +11,74 @@ int main()
|
|||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
Start * start = p_result(&context);
|
Start * start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start->pT1->pToken->position.row);
|
assert_eq(1, start->pT1->pToken->position.row);
|
||||||
assert_eq(0, start->pT1->pToken->position.col);
|
assert_eq(1, start->pT1->pToken->position.col);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.row);
|
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(0, start->pT1->pToken->end_position.col);
|
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||||
assert_eq(0, start->pT1->position.row);
|
assert_eq(1, start->pT1->position.row);
|
||||||
assert_eq(0, start->pT1->position.col);
|
assert_eq(1, start->pT1->position.col);
|
||||||
assert_eq(0, start->pT1->end_position.row);
|
assert_eq(1, start->pT1->end_position.row);
|
||||||
assert_eq(0, start->pT1->end_position.col);
|
assert_eq(1, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->pT2->pToken->position.row);
|
assert_eq(1, start->pT2->pToken->position.row);
|
||||||
assert_eq(1, start->pT2->pToken->position.col);
|
assert_eq(2, start->pT2->pToken->position.col);
|
||||||
assert_eq(0, start->pT2->pToken->end_position.row);
|
assert_eq(1, start->pT2->pToken->end_position.row);
|
||||||
assert_eq(2, start->pT2->pToken->end_position.col);
|
assert_eq(3, start->pT2->pToken->end_position.col);
|
||||||
assert_eq(0, start->pT2->position.row);
|
assert_eq(1, start->pT2->position.row);
|
||||||
assert_eq(1, start->pT2->position.col);
|
assert_eq(2, start->pT2->position.col);
|
||||||
assert_eq(0, start->pT2->end_position.row);
|
assert_eq(1, start->pT2->end_position.row);
|
||||||
assert_eq(2, start->pT2->end_position.col);
|
assert_eq(3, start->pT2->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->pT3->pToken->position.row);
|
assert_eq(1, start->pT3->pToken->position.row);
|
||||||
assert_eq(3, start->pT3->pToken->position.col);
|
assert_eq(4, start->pT3->pToken->position.col);
|
||||||
assert_eq(0, start->pT3->pToken->end_position.row);
|
assert_eq(1, start->pT3->pToken->end_position.row);
|
||||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
assert_eq(6, start->pT3->pToken->end_position.col);
|
||||||
assert_eq(0, start->pT3->position.row);
|
assert_eq(1, start->pT3->position.row);
|
||||||
assert_eq(3, start->pT3->position.col);
|
assert_eq(4, start->pT3->position.col);
|
||||||
assert_eq(0, start->pT3->end_position.row);
|
assert_eq(1, start->pT3->end_position.row);
|
||||||
assert_eq(5, start->pT3->end_position.col);
|
assert_eq(6, start->pT3->end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start->position.row);
|
assert_eq(1, start->position.row);
|
||||||
assert_eq(0, start->position.col);
|
assert_eq(1, start->position.col);
|
||||||
assert_eq(0, start->end_position.row);
|
assert_eq(1, start->end_position.row);
|
||||||
assert_eq(5, start->end_position.col);
|
assert_eq(6, start->end_position.col);
|
||||||
|
|
||||||
input = "\n\n bb\nc\ncc\n\n a";
|
input = "\n\n bb\nc\ncc\n\n a";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(2, start->pT1->pToken->position.row);
|
assert_eq(3, start->pT1->pToken->position.row);
|
||||||
assert_eq(2, start->pT1->pToken->position.col);
|
assert_eq(3, start->pT1->pToken->position.col);
|
||||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
assert_eq(3, start->pT1->pToken->end_position.row);
|
||||||
assert_eq(3, start->pT1->pToken->end_position.col);
|
assert_eq(4, start->pT1->pToken->end_position.col);
|
||||||
assert_eq(2, start->pT1->position.row);
|
assert_eq(3, start->pT1->position.row);
|
||||||
assert_eq(2, start->pT1->position.col);
|
assert_eq(3, start->pT1->position.col);
|
||||||
assert_eq(2, start->pT1->end_position.row);
|
assert_eq(3, start->pT1->end_position.row);
|
||||||
assert_eq(3, start->pT1->end_position.col);
|
assert_eq(4, start->pT1->end_position.col);
|
||||||
|
|
||||||
assert_eq(3, start->pT2->pToken->position.row);
|
assert_eq(4, start->pT2->pToken->position.row);
|
||||||
assert_eq(0, start->pT2->pToken->position.col);
|
assert_eq(1, start->pT2->pToken->position.col);
|
||||||
assert_eq(4, start->pT2->pToken->end_position.row);
|
assert_eq(5, start->pT2->pToken->end_position.row);
|
||||||
assert_eq(1, start->pT2->pToken->end_position.col);
|
assert_eq(2, start->pT2->pToken->end_position.col);
|
||||||
assert_eq(3, start->pT2->position.row);
|
assert_eq(4, start->pT2->position.row);
|
||||||
assert_eq(0, start->pT2->position.col);
|
assert_eq(1, start->pT2->position.col);
|
||||||
assert_eq(4, start->pT2->end_position.row);
|
assert_eq(5, start->pT2->end_position.row);
|
||||||
assert_eq(1, start->pT2->end_position.col);
|
assert_eq(2, start->pT2->end_position.col);
|
||||||
|
|
||||||
assert_eq(6, start->pT3->pToken->position.row);
|
assert_eq(7, start->pT3->pToken->position.row);
|
||||||
assert_eq(5, start->pT3->pToken->position.col);
|
assert_eq(6, start->pT3->pToken->position.col);
|
||||||
assert_eq(6, start->pT3->pToken->end_position.row);
|
assert_eq(7, start->pT3->pToken->end_position.row);
|
||||||
assert_eq(5, start->pT3->pToken->end_position.col);
|
assert_eq(6, start->pT3->pToken->end_position.col);
|
||||||
assert_eq(6, start->pT3->position.row);
|
assert_eq(7, start->pT3->position.row);
|
||||||
assert_eq(5, start->pT3->position.col);
|
assert_eq(6, start->pT3->position.col);
|
||||||
assert_eq(6, start->pT3->end_position.row);
|
assert_eq(7, start->pT3->end_position.row);
|
||||||
assert_eq(5, start->pT3->end_position.col);
|
assert_eq(6, start->pT3->end_position.col);
|
||||||
|
|
||||||
assert_eq(2, start->position.row);
|
assert_eq(3, start->position.row);
|
||||||
assert_eq(2, start->position.col);
|
assert_eq(3, start->position.col);
|
||||||
assert_eq(6, start->end_position.row);
|
assert_eq(7, start->end_position.row);
|
||||||
assert_eq(5, start->end_position.col);
|
assert_eq(6, start->end_position.col);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,72 +15,72 @@ unittest
|
|||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
Start * start = p_result(&context);
|
Start * start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(0, start.pT1.pToken.position.row);
|
assert_eq(1, start.pT1.pToken.position.row);
|
||||||
assert_eq(0, start.pT1.pToken.position.col);
|
assert_eq(1, start.pT1.pToken.position.col);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.row);
|
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(0, start.pT1.pToken.end_position.col);
|
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||||
assert_eq(0, start.pT1.position.row);
|
assert_eq(1, start.pT1.position.row);
|
||||||
assert_eq(0, start.pT1.position.col);
|
assert_eq(1, start.pT1.position.col);
|
||||||
assert_eq(0, start.pT1.end_position.row);
|
assert_eq(1, start.pT1.end_position.row);
|
||||||
assert_eq(0, start.pT1.end_position.col);
|
assert_eq(1, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.pT2.pToken.position.row);
|
assert_eq(1, start.pT2.pToken.position.row);
|
||||||
assert_eq(1, start.pT2.pToken.position.col);
|
assert_eq(2, start.pT2.pToken.position.col);
|
||||||
assert_eq(0, start.pT2.pToken.end_position.row);
|
assert_eq(1, start.pT2.pToken.end_position.row);
|
||||||
assert_eq(2, start.pT2.pToken.end_position.col);
|
assert_eq(3, start.pT2.pToken.end_position.col);
|
||||||
assert_eq(0, start.pT2.position.row);
|
assert_eq(1, start.pT2.position.row);
|
||||||
assert_eq(1, start.pT2.position.col);
|
assert_eq(2, start.pT2.position.col);
|
||||||
assert_eq(0, start.pT2.end_position.row);
|
assert_eq(1, start.pT2.end_position.row);
|
||||||
assert_eq(2, start.pT2.end_position.col);
|
assert_eq(3, start.pT2.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.pT3.pToken.position.row);
|
assert_eq(1, start.pT3.pToken.position.row);
|
||||||
assert_eq(3, start.pT3.pToken.position.col);
|
assert_eq(4, start.pT3.pToken.position.col);
|
||||||
assert_eq(0, start.pT3.pToken.end_position.row);
|
assert_eq(1, start.pT3.pToken.end_position.row);
|
||||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
assert_eq(6, start.pT3.pToken.end_position.col);
|
||||||
assert_eq(0, start.pT3.position.row);
|
assert_eq(1, start.pT3.position.row);
|
||||||
assert_eq(3, start.pT3.position.col);
|
assert_eq(4, start.pT3.position.col);
|
||||||
assert_eq(0, start.pT3.end_position.row);
|
assert_eq(1, start.pT3.end_position.row);
|
||||||
assert_eq(5, start.pT3.end_position.col);
|
assert_eq(6, start.pT3.end_position.col);
|
||||||
|
|
||||||
assert_eq(0, start.position.row);
|
assert_eq(1, start.position.row);
|
||||||
assert_eq(0, start.position.col);
|
assert_eq(1, start.position.col);
|
||||||
assert_eq(0, start.end_position.row);
|
assert_eq(1, start.end_position.row);
|
||||||
assert_eq(5, start.end_position.col);
|
assert_eq(6, start.end_position.col);
|
||||||
|
|
||||||
input = "\n\n bb\nc\ncc\n\n a";
|
input = "\n\n bb\nc\ncc\n\n a";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
start = p_result(&context);
|
start = p_result(&context);
|
||||||
|
|
||||||
assert_eq(2, start.pT1.pToken.position.row);
|
assert_eq(3, start.pT1.pToken.position.row);
|
||||||
assert_eq(2, start.pT1.pToken.position.col);
|
assert_eq(3, start.pT1.pToken.position.col);
|
||||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
assert_eq(3, start.pT1.pToken.end_position.row);
|
||||||
assert_eq(3, start.pT1.pToken.end_position.col);
|
assert_eq(4, start.pT1.pToken.end_position.col);
|
||||||
assert_eq(2, start.pT1.position.row);
|
assert_eq(3, start.pT1.position.row);
|
||||||
assert_eq(2, start.pT1.position.col);
|
assert_eq(3, start.pT1.position.col);
|
||||||
assert_eq(2, start.pT1.end_position.row);
|
assert_eq(3, start.pT1.end_position.row);
|
||||||
assert_eq(3, start.pT1.end_position.col);
|
assert_eq(4, start.pT1.end_position.col);
|
||||||
|
|
||||||
assert_eq(3, start.pT2.pToken.position.row);
|
assert_eq(4, start.pT2.pToken.position.row);
|
||||||
assert_eq(0, start.pT2.pToken.position.col);
|
assert_eq(1, start.pT2.pToken.position.col);
|
||||||
assert_eq(4, start.pT2.pToken.end_position.row);
|
assert_eq(5, start.pT2.pToken.end_position.row);
|
||||||
assert_eq(1, start.pT2.pToken.end_position.col);
|
assert_eq(2, start.pT2.pToken.end_position.col);
|
||||||
assert_eq(3, start.pT2.position.row);
|
assert_eq(4, start.pT2.position.row);
|
||||||
assert_eq(0, start.pT2.position.col);
|
assert_eq(1, start.pT2.position.col);
|
||||||
assert_eq(4, start.pT2.end_position.row);
|
assert_eq(5, start.pT2.end_position.row);
|
||||||
assert_eq(1, start.pT2.end_position.col);
|
assert_eq(2, start.pT2.end_position.col);
|
||||||
|
|
||||||
assert_eq(6, start.pT3.pToken.position.row);
|
assert_eq(7, start.pT3.pToken.position.row);
|
||||||
assert_eq(5, start.pT3.pToken.position.col);
|
assert_eq(6, start.pT3.pToken.position.col);
|
||||||
assert_eq(6, start.pT3.pToken.end_position.row);
|
assert_eq(7, start.pT3.pToken.end_position.row);
|
||||||
assert_eq(5, start.pT3.pToken.end_position.col);
|
assert_eq(6, start.pT3.pToken.end_position.col);
|
||||||
assert_eq(6, start.pT3.position.row);
|
assert_eq(7, start.pT3.position.row);
|
||||||
assert_eq(5, start.pT3.position.col);
|
assert_eq(6, start.pT3.position.col);
|
||||||
assert_eq(6, start.pT3.end_position.row);
|
assert_eq(7, start.pT3.end_position.row);
|
||||||
assert_eq(5, start.pT3.end_position.col);
|
assert_eq(6, start.pT3.end_position.col);
|
||||||
|
|
||||||
assert_eq(2, start.position.row);
|
assert_eq(3, start.position.row);
|
||||||
assert_eq(2, start.position.col);
|
assert_eq(3, start.position.col);
|
||||||
assert_eq(6, start.end_position.row);
|
assert_eq(7, start.end_position.row);
|
||||||
assert_eq(5, start.end_position.col);
|
assert_eq(6, start.end_position.col);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,28 +12,28 @@ int main()
|
|||||||
input = "a\n123\na a";
|
input = "a\n123\na a";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 2);
|
assert(p_position(&context).row == 3);
|
||||||
assert(p_position(&context).col == 3);
|
assert(p_position(&context).col == 4);
|
||||||
assert(p_token(&context) == TOKEN_a);
|
assert(p_token(&context) == TOKEN_a);
|
||||||
|
|
||||||
input = "12";
|
input = "12";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 0);
|
assert(p_position(&context).row == 1);
|
||||||
assert(p_position(&context).col == 0);
|
assert(p_position(&context).col == 1);
|
||||||
assert(p_token(&context) == TOKEN_num);
|
assert(p_token(&context) == TOKEN_num);
|
||||||
|
|
||||||
input = "a 12\n\nab";
|
input = "a 12\n\nab";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||||
assert(p_position(&context).row == 2);
|
assert(p_position(&context).row == 3);
|
||||||
assert(p_position(&context).col == 1);
|
assert(p_position(&context).col == 2);
|
||||||
|
|
||||||
input = "a 12\n\na\n\n77\na \xAA";
|
input = "a 12\n\na\n\n77\na \xAA";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||||
assert(p_position(&context).row == 5);
|
assert(p_position(&context).row == 6);
|
||||||
assert(p_position(&context).col == 4);
|
assert(p_position(&context).col == 5);
|
||||||
|
|
||||||
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
||||||
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
||||||
|
|||||||
@ -16,24 +16,24 @@ unittest
|
|||||||
input = "a\n123\na a";
|
input = "a\n123\na a";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(2, 3));
|
assert(p_position(&context) == p_position_t(3, 4));
|
||||||
assert(p_token(&context) == TOKEN_a);
|
assert(p_token(&context) == TOKEN_a);
|
||||||
|
|
||||||
input = "12";
|
input = "12";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(0, 0));
|
assert(p_position(&context) == p_position_t(1, 1));
|
||||||
assert(p_token(&context) == TOKEN_num);
|
assert(p_token(&context) == TOKEN_num);
|
||||||
|
|
||||||
input = "a 12\n\nab";
|
input = "a 12\n\nab";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||||
assert(p_position(&context) == p_position_t(2, 1));
|
assert(p_position(&context) == p_position_t(3, 2));
|
||||||
|
|
||||||
input = "a 12\n\na\n\n77\na \xAA";
|
input = "a 12\n\na\n\n77\na \xAA";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||||
assert(p_position(&context) == p_position_t(5, 4));
|
assert(p_position(&context) == p_position_t(6, 5));
|
||||||
|
|
||||||
assert(p_token_names[TOKEN_a] == "a");
|
assert(p_token_names[TOKEN_a] == "a");
|
||||||
assert(p_token_names[TOKEN_num] == "num");
|
assert(p_token_names[TOKEN_num] == "num");
|
||||||
|
|||||||
@ -41,68 +41,68 @@ int main()
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 0u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 0u);
|
assert(token_info.position.col == 1u);
|
||||||
assert(token_info.end_position.row == 0u);
|
assert(token_info.end_position.row == 1u);
|
||||||
assert(token_info.end_position.col == 0u);
|
assert(token_info.end_position.col == 1u);
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 0u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 2u);
|
assert(token_info.position.col == 3u);
|
||||||
assert(token_info.end_position.row == 0u);
|
assert(token_info.end_position.row == 1u);
|
||||||
assert(token_info.end_position.col == 2u);
|
assert(token_info.end_position.col == 3u);
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_plus);
|
assert(token_info.token == TOKEN_plus);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 0u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 4u);
|
assert(token_info.position.col == 5u);
|
||||||
assert(token_info.end_position.row == 0u);
|
assert(token_info.end_position.row == 1u);
|
||||||
assert(token_info.end_position.col == 4u);
|
assert(token_info.end_position.col == 5u);
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 0u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 6u);
|
assert(token_info.position.col == 7u);
|
||||||
assert(token_info.end_position.row == 0u);
|
assert(token_info.end_position.row == 1u);
|
||||||
assert(token_info.end_position.col == 6u);
|
assert(token_info.end_position.col == 7u);
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_times);
|
assert(token_info.token == TOKEN_times);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 2u);
|
||||||
assert(token_info.position.col == 0u);
|
assert(token_info.position.col == 1u);
|
||||||
assert(token_info.end_position.row == 1u);
|
assert(token_info.end_position.row == 2u);
|
||||||
assert(token_info.end_position.col == 2u);
|
assert(token_info.end_position.col == 3u);
|
||||||
assert(token_info.length == 3u);
|
assert(token_info.length == 3u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 2u);
|
||||||
assert(token_info.position.col == 4u);
|
assert(token_info.position.col == 5u);
|
||||||
assert(token_info.end_position.row == 1u);
|
assert(token_info.end_position.row == 2u);
|
||||||
assert(token_info.end_position.col == 4u);
|
assert(token_info.end_position.col == 5u);
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_plus);
|
assert(token_info.token == TOKEN_plus);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 2u);
|
||||||
assert(token_info.position.col == 6u);
|
assert(token_info.position.col == 7u);
|
||||||
assert(token_info.end_position.row == 1u);
|
assert(token_info.end_position.row == 2u);
|
||||||
assert(token_info.end_position.col == 8u);
|
assert(token_info.end_position.col == 9u);
|
||||||
assert(token_info.length == 3u);
|
assert(token_info.length == 3u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 2u);
|
||||||
assert(token_info.position.col == 9u);
|
assert(token_info.position.col == 10u);
|
||||||
assert(token_info.end_position.row == 1u);
|
assert(token_info.end_position.row == 2u);
|
||||||
assert(token_info.end_position.col == 9u);
|
assert(token_info.end_position.col == 10u);
|
||||||
assert(token_info.length == 0u);
|
assert(token_info.length == 0u);
|
||||||
assert(token_info.token == TOKEN___EOF);
|
assert(token_info.token == TOKEN___EOF);
|
||||||
|
|
||||||
p_context_init(&context, (uint8_t const *)"", 0u);
|
p_context_init(&context, (uint8_t const *)"", 0u);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 0u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 0u);
|
assert(token_info.position.col == 1u);
|
||||||
assert(token_info.end_position.row == 0u);
|
assert(token_info.end_position.row == 1u);
|
||||||
assert(token_info.end_position.col == 0u);
|
assert(token_info.end_position.col == 1u);
|
||||||
assert(token_info.length == 0u);
|
assert(token_info.length == 0u);
|
||||||
assert(token_info.token == TOKEN___EOF);
|
assert(token_info.token == TOKEN___EOF);
|
||||||
|
|
||||||
|
|||||||
@ -47,23 +47,23 @@ unittest
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 1, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 1, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(0, 2), p_position_t(0, 2), 1, TOKEN_plus));
|
assert(token_info == p_token_info_t(p_position_t(1, 3), p_position_t(1, 3), 1, TOKEN_plus));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(0, 4), p_position_t(0, 4), 1, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(1, 5), p_position_t(1, 5), 1, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(0, 6), p_position_t(0, 6), 1, TOKEN_times));
|
assert(token_info == p_token_info_t(p_position_t(1, 7), p_position_t(1, 7), 1, TOKEN_times));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 0), p_position_t(1, 2), 3, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(2, 1), p_position_t(2, 3), 3, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 4), p_position_t(1, 4), 1, TOKEN_plus));
|
assert(token_info == p_token_info_t(p_position_t(2, 5), p_position_t(2, 5), 1, TOKEN_plus));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 6), p_position_t(1, 8), 3, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(2, 7), p_position_t(2, 9), 3, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 9), p_position_t(1, 9), 0, TOKEN___EOF));
|
assert(token_info == p_token_info_t(p_position_t(2, 10), p_position_t(2, 10), 0, TOKEN___EOF));
|
||||||
|
|
||||||
p_context_init(&context, "");
|
p_context_init(&context, "");
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 0, TOKEN___EOF));
|
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 0, TOKEN___EOF));
|
||||||
}
|
}
|
||||||
|
|||||||
20
spec/test_lexer_multiple_modes.c
Normal file
20
spec/test_lexer_multiple_modes.c
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#include "testparser.h"
|
||||||
|
#include <assert.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
char const * input = "abc.def";
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
printf("pass1\n");
|
||||||
|
|
||||||
|
input = "abc . abc";
|
||||||
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
printf("pass2\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
21
spec/test_lexer_multiple_modes.d
Normal file
21
spec/test_lexer_multiple_modes.d
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
import testparser;
|
||||||
|
import std.stdio;
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
unittest
|
||||||
|
{
|
||||||
|
string input = `abc.def`;
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, input);
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
writeln("pass1");
|
||||||
|
|
||||||
|
input = `abc . abc`;
|
||||||
|
p_context_init(&context, input);
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
writeln("pass2");
|
||||||
|
}
|
||||||
45
spec/test_named_optional_rule_component_ast.c
Normal file
45
spec/test_named_optional_rule_component_ast.c
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#include "testparser.h"
|
||||||
|
#include <assert.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "testutils.h"
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
char const * input = "b";
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
Start * start = p_result(&context);
|
||||||
|
assert(start->a == NULL);
|
||||||
|
assert(start->pToken2 != NULL);
|
||||||
|
assert_eq(TOKEN_b, start->pToken2->token);
|
||||||
|
assert(start->pR3 == NULL);
|
||||||
|
assert(start->pR == NULL);
|
||||||
|
assert(start->r == NULL);
|
||||||
|
|
||||||
|
input = "abcd";
|
||||||
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
start = p_result(&context);
|
||||||
|
assert(start->a != NULL);
|
||||||
|
assert_eq(TOKEN_a, start->pToken1->token);
|
||||||
|
assert(start->pToken2 != NULL);
|
||||||
|
assert(start->pR3 != NULL);
|
||||||
|
assert(start->pR != NULL);
|
||||||
|
assert(start->r != NULL);
|
||||||
|
assert(start->pR == start->pR3);
|
||||||
|
assert(start->pR == start->r);
|
||||||
|
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||||
|
|
||||||
|
input = "bdc";
|
||||||
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
start = p_result(&context);
|
||||||
|
assert(start->a == NULL);
|
||||||
|
assert(start->pToken2 != NULL);
|
||||||
|
assert(start->r != NULL);
|
||||||
|
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
46
spec/test_named_optional_rule_component_ast.d
Normal file
46
spec/test_named_optional_rule_component_ast.d
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import testparser;
|
||||||
|
import std.stdio;
|
||||||
|
import testutils;
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
unittest
|
||||||
|
{
|
||||||
|
string input = "b";
|
||||||
|
p_context_t context;
|
||||||
|
p_context_init(&context, input);
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
Start * start = p_result(&context);
|
||||||
|
assert(start.pToken1 is null);
|
||||||
|
assert(start.pToken2 !is null);
|
||||||
|
assert_eq(TOKEN_b, start.pToken2.token);
|
||||||
|
assert(start.pR3 is null);
|
||||||
|
assert(start.pR is null);
|
||||||
|
assert(start.r is null);
|
||||||
|
|
||||||
|
input = "abcd";
|
||||||
|
p_context_init(&context, input);
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
start = p_result(&context);
|
||||||
|
assert(start.pToken1 != null);
|
||||||
|
assert_eq(TOKEN_a, start.pToken1.token);
|
||||||
|
assert(start.pToken2 != null);
|
||||||
|
assert(start.pR3 != null);
|
||||||
|
assert(start.pR != null);
|
||||||
|
assert(start.r != null);
|
||||||
|
assert(start.pR == start.pR3);
|
||||||
|
assert(start.pR == start.r);
|
||||||
|
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||||
|
|
||||||
|
input = "bdc";
|
||||||
|
p_context_init(&context, input);
|
||||||
|
assert(p_parse(&context) == P_SUCCESS);
|
||||||
|
start = p_result(&context);
|
||||||
|
assert(start.pToken1 is null);
|
||||||
|
assert(start.pToken2 !is null);
|
||||||
|
assert(start.pR !is null);
|
||||||
|
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||||
|
}
|
||||||
@ -8,8 +8,8 @@ int main()
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 0);
|
assert(p_position(&context).row == 1);
|
||||||
assert(p_position(&context).col == 1);
|
assert(p_position(&context).col == 2);
|
||||||
assert(context.token == TOKEN___EOF);
|
assert(context.token == TOKEN___EOF);
|
||||||
|
|
||||||
input = "a b";
|
input = "a b";
|
||||||
|
|||||||
@ -12,7 +12,7 @@ unittest
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(0, 1));
|
assert(p_position(&context) == p_position_t(1, 2));
|
||||||
assert(context.token == TOKEN___EOF);
|
assert(context.token == TOKEN___EOF);
|
||||||
|
|
||||||
input = "a b";
|
input = "a b";
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user