Compare commits

..

73 Commits

Author SHA1 Message Date
c24f323ff0 v1.5.1 2024-07-26 22:30:48 -04:00
fec2c28693 Only calculate lookahead tokens when needed - #28
Lookahead tokens are only need if either:
(1) There is more than one rule that could be reduced in a given parser
state, or
(2) There are shift actions for a state and at least one rule that could
be reduced in the same state (to warn about shift/reduce conflicts).
2024-07-26 22:08:25 -04:00
61339aeae9 Avoid recalculating reduce_rules - #28 2024-07-26 21:36:41 -04:00
95b3dc6550 Cache ItemSet#next_symbols - #28 2024-07-25 20:33:15 -04:00
74d94fef72 Do not build ItemSet follow sets - #28 2024-07-25 20:02:00 -04:00
588c5e21c7 Cache ItemSet#leading_item_sets return values - #28 2024-07-25 10:42:43 -04:00
5f1c306273 Update CLI usage in README 2024-07-22 21:35:32 -04:00
343e8a7f9e v1.5.0 2024-07-22 21:23:38 -04:00
b3a134bf8d Update vim syntax to highlight "?" and field alias names 2024-07-22 20:39:59 -04:00
4a71dc74fb Update CHANGELOG for v1.5.0 2024-07-22 20:26:04 -04:00
a7348be95d Add rule field aliases - #24 2024-07-22 20:16:52 -04:00
9746b3f2bf Document position tracking fields in user guide - #27 2024-07-21 14:04:51 -04:00
c5b8fc28bd Move INVALID_POSITION from header to C source - #27 2024-07-21 13:39:34 -04:00
092fce61eb Test position validity for empty matching rules - #27 2024-07-21 13:39:30 -04:00
e647248e34 Track start and end position of rules in AST nodes - #27 2024-07-19 15:37:37 -04:00
f4ae1b8601 Add position fields to AST nodes (not populated yet) - #27 2024-07-19 14:34:50 -04:00
eae2e17f41 Test tracking token end positions when the token spans a newline - #27 2024-07-18 12:09:26 -04:00
87d6d29d60 Store token end position - #27 2024-07-18 12:03:44 -04:00
3aced70356 Show line numbers of rules upon conflict - close #23 2024-07-14 20:52:52 -04:00
2dd89445fc Add command line switch to output warnings to stderr - close #26 2024-07-14 15:36:07 -04:00
4ae5ab79b3 Warn on shift/reduce conflicts 2024-07-13 21:35:53 -04:00
69cc8fa67d Always compute lookahead tokens for reduce rules
Even if they won't be needed for the generated parser, they'll be useful
to detect shift/reduce conflicts.
2024-07-13 21:01:44 -04:00
7f3eb8f315 Calculate follow token set for an ItemSet 2024-07-13 20:48:28 -04:00
d76e12fea1 Rename "following" to "next" - #25
The term "following" could potentially imply an association with the
"follow set", however it was used in a non-closed manner.
2024-07-08 10:14:09 -04:00
911e9505b7 Track token position in AST Token node 2024-05-27 22:10:05 -04:00
aaeb0c4db1 Remove leftover TODO from earlier restructuring 2024-05-27 20:44:42 -04:00
fd89c5c6b3 Add Vim syntax highlighting files for Propane 2024-05-26 14:49:30 -04:00
1468946735 v1.4.0 2024-05-11 11:46:28 -04:00
2bccf3303e Update CHANGELOG 2024-05-09 17:38:18 -04:00
0d1ee74ca6 Give a better error message when a referenced ptype has not been declared 2024-05-09 17:35:27 -04:00
985b180f62 Update CHANGELOG 2024-05-09 11:56:44 -04:00
f3e4941ad8 Allow rule terms to be marked as optional 2024-05-09 11:56:13 -04:00
494afb7307 Allow specifying the start rule name 2024-05-05 12:39:00 -04:00
508dabe760 Update CHANGELOG for v1.4.0 2024-05-04 21:49:13 -04:00
153f9d28f8 Allow user to specify AST node prefix or suffix
Add ast_prefix and ast_suffix grammar statements.
2024-05-04 21:49:13 -04:00
d0f542cbd7 v1.3.0 2024-04-23 00:31:56 -04:00
786c78b635 Update CHANGELOG for v1.3.0 2024-04-23 00:21:28 -04:00
f0bd8d8663 Add documentation for AST generation mode - close #22 2024-04-23 00:15:19 -04:00
c7a18ef821 Add AST node field name with no suffix when unique - #22 2024-04-22 21:50:26 -04:00
cb06a56f81 Add AST generation - #22 2024-04-22 20:51:27 -04:00
2b28ef622d Add specs to fully cover cli.rb 2024-04-06 14:37:15 -04:00
19c32b58dc Fix README example grammar 2024-04-06 14:16:27 -04:00
3a8dcac55f v1.2.0 2024-04-02 21:42:33 -04:00
632ab2fe6f Update CHANGELOG for v1.2.0 2024-04-02 21:42:18 -04:00
3eaf0d3d49 allow one line user code blocks - close #21 2024-04-02 17:44:15 -04:00
918dc7b2bb fix generator hang when state transition cycle is present - close #20 2024-04-02 14:27:08 -04:00
5b2cbe53e6 Add backslash escape codes - close #19 2024-03-29 16:45:54 -04:00
1d1590dfda Add API to access unexpected token found - close #18 2024-03-29 15:58:56 -04:00
1c91dcd298 Add token_names API - close #17 2024-03-29 15:02:01 -04:00
5dfd62b756 Add D example to user guide for p_context_init() - close #16 2024-03-29 13:52:16 -04:00
fad7f4fb36 Allow user termination from lexer code blocks - close #15 2024-03-29 13:45:08 -04:00
d55c5e0080 Update CHANGELOG for v1.1.0 2024-01-07 17:48:47 -05:00
6c847c05b1 v1.1.0 2024-01-07 17:43:06 -05:00
a5800575c8 Document generated API in user guide - close #14 2024-01-05 20:47:22 -05:00
24af3590d1 Allow user to terminate the parser - close #13 2024-01-03 22:32:10 -05:00
92c76b74c8 Update license year 2024-01-03 20:05:46 -05:00
a032ac027c Compilation warning for unreachable statement - close #12 2023-10-21 16:04:15 -04:00
af5edaa762 Bump version to 1.0.0 2023-09-25 20:02:02 -04:00
81f15245f2 Update README with some user guide contents 2023-09-25 19:57:49 -04:00
d8aa72d516 Add all task 2023-09-25 19:51:47 -04:00
aabc8a5af5 Remove propane.sh 2023-09-25 19:51:41 -04:00
705e5d8ba9 Remove need for dspec directory 2023-09-25 19:50:13 -04:00
f152cd9da1 Turn on simplecov to measure code coverage 2023-09-25 19:38:55 -04:00
9a9315f7f9 Include asset file contents in distributable script 2023-09-25 19:12:37 -04:00
197f126109 Add Assets module to abstract accessing asset files 2023-09-25 16:16:20 -04:00
db6dc0e099 Add dspec task to test distributable script 2023-09-24 18:54:19 -04:00
547dbd3850 Add user guide section for parser rules 2023-09-24 18:45:01 -04:00
aff0102536 Document the prefix statement 2023-09-24 16:14:59 -04:00
1328a718ac Add user guide content for lexer 2023-09-24 16:07:43 -04:00
562c24ce9e Remove grammar "class" statement 2023-09-24 13:23:44 -04:00
c824ae9e5c User guide: document specifying parser value types 2023-08-26 21:15:33 -04:00
140b2d8350 User guide: document user code blocks 2023-08-24 09:40:08 -04:00
3c8794058f Add C backend - close #4 2023-08-24 09:40:01 -04:00
66 changed files with 4334 additions and 401 deletions

View File

@ -1,3 +1,62 @@
## v1.5.1
### Improvements
- Improve performance (#28)
## v1.5.0
### New Features
- Track start and end text positions for tokens and rules in AST node structures (#27)
- Add warnings for shift/reduce conflicts to log file (#25)
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
- Add rule field aliases (#24)
### Improvements
- Show line numbers of rules on conflict (#23)
## v1.4.0
### New Features
- Allow user to specify AST node name prefix or suffix
- Allow specifying the start rule name
- Allow rule terms to be marked as optional
### Improvements
- Give a better error message when a referenced ptype has not been declared
## v1.3.0
### New Features
- Add AST generation (#22)
## v1.2.0
### New Features
- Allow one line user code blocks (#21)
- Add backslash escape codes (#19)
- Add API to access unexpected token found (#18)
- Add token_names API (#17)
- Add D example to user guide for p_context_init() (#16)
- Allow user termination from lexer code blocks (#15)
### Fixes
- Fix generator hang when state transition cycle is present (#20)
## v1.1.0
### New Features
- Add user parser terminations (#13)
- Document generated parser API in user guide (#14)
## v1.0.0 ## v1.0.0
- Initial release - Initial release

View File

@ -5,3 +5,4 @@ gem "rspec"
gem "rdoc" gem "rdoc"
gem "redcarpet" gem "redcarpet"
gem "syntax" gem "syntax"
gem "simplecov"

View File

@ -2,6 +2,7 @@ GEM
remote: https://rubygems.org/ remote: https://rubygems.org/
specs: specs:
diff-lcs (1.5.0) diff-lcs (1.5.0)
docile (1.4.0)
psych (5.1.0) psych (5.1.0)
stringio stringio
rake (13.0.6) rake (13.0.6)
@ -21,6 +22,12 @@ GEM
diff-lcs (>= 1.2.0, < 2.0) diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.12.0) rspec-support (~> 3.12.0)
rspec-support (3.12.1) rspec-support (3.12.1)
simplecov (0.22.0)
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
simplecov-html (0.12.3)
simplecov_json_formatter (0.1.4)
stringio (3.0.7) stringio (3.0.7)
syntax (1.2.2) syntax (1.2.2)
@ -32,6 +39,7 @@ DEPENDENCIES
rdoc rdoc
redcarpet redcarpet
rspec rspec
simplecov
syntax syntax
BUNDLED WITH BUNDLED WITH

View File

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2010-2023 Josh Holtrop Copyright (c) 2010-2024 Josh Holtrop
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,21 +1,104 @@
# The Propane Parser Generator # The Propane Parser Generator
Propane is an LR Parser Generator (LPG) which: Propane is a LALR Parser Generator (LPG) which:
* accepts LR(0), SLR, and LALR grammars * accepts LR(0), SLR, and LALR grammars
* generates a built-in lexer to tokenize input * generates a built-in lexer to tokenize input
* supports UTF-8 lexer inputs * supports UTF-8 lexer inputs
* generates a table-driven parser to parse input in linear time * generates a table-driven shift/reduce parser to parse input in linear time
* targets C or D language outputs
* optionally supports automatic full AST generation
* is MIT-licensed * is MIT-licensed
* is distributable as a standalone Ruby script * is distributable as a standalone Ruby script
## Installation ## Installation
TODO Propane is designed to be distributed as a stand-alone single file script that
can be copied into and versioned in a project's source tree.
The only requirement to run Propane is that the system has a Ruby interpreter
installed.
The latest release can be downloaded from [https://github.com/holtrop/propane/releases](https://github.com/holtrop/propane/releases).
Simply copy the `propane` executable script into the desired location within
the project to be built (typically the root of the repository) and mark it
executable.
## Usage ## Usage
TODO: Write usage instructions here ### Command Line Interface
Propane is typically invoked from the command-line as `./propane`.
Usage: ./propane [options] <input-file> <output-file>
Options:
-h, --help Show this usage and exit.
--log LOG Write log file. This will show all parser states and their
associated shifts and reduces. It can be helpful when
debugging a grammar.
--version Show program version and exit.
-w Treat warnings as errors. This option will treat shift/reduce
conflicts as fatal errors and will print them to stderr in
addition to the log file.
The user must specify the path to a Propane input grammar file and a path to an
output file.
The generated source code will be written to the output file.
If a log file path is specified, Propane will write a log file containing
detailed information about the parser states and transitions.
### Propane Grammar File
A Propane grammar file provides Propane with the patterns, tokens, grammar
rules, and user code blocks from which to build the generated lexer and parser.
Example grammar file:
```
<<
import std.math;
>>
# Parser values are unsigned integers.
ptype ulong;
# A few basic arithmetic operators.
token plus /\+/;
token times /\*/;
token power /\*\*/;
token integer /\d+/ <<
ulong v;
foreach (c; match)
{
v *= 10;
v += (c - '0');
}
$$ = v;
>>
token lparen /\(/;
token rparen /\)/;
# Drop whitespace.
drop /\s+/;
Start -> E1 << $$ = $1; >>
E1 -> E2 << $$ = $1; >>
E1 -> E1 plus E2 << $$ = $1 + $3; >>
E2 -> E3 << $$ = $1; >>
E2 -> E2 times E3 << $$ = $1 * $3; >>
E3 -> E4 << $$ = $1; >>
E3 -> E3 power E4 <<
$$ = pow($1, $3);
>>
E4 -> integer << $$ = $1; >>
E4 -> lparen E1 rparen << $$ = $2; >>
```
Grammar files can contain comment lines beginning with `#` which are ignored.
White space in the grammar file is also ignored.
It is convention to use the extension `.propane` for the Propane grammar file,
however any file name is accepted by Propane.
See [https://holtrop.github.io/propane/index.html](https://holtrop.github.io/propane/index.html) for the full User Guide.
## Development ## Development

View File

@ -1,5 +1,8 @@
require "rake/clean"
require "rspec/core/rake_task" require "rspec/core/rake_task"
CLEAN.include %w[spec/run gen .yardoc yard coverage dist]
task :build_dist do task :build_dist do
sh "ruby rb/build_dist.rb" sh "ruby rb/build_dist.rb"
end end
@ -10,9 +13,20 @@ RSpec::Core::RakeTask.new(:spec, :example_pattern) do |task, args|
end end
end end
# dspec task is useful to test the distributable release script, but is not
# useful for coverage information.
desc "Dist Specs"
task :dspec, [:example_string] => :build_dist do |task, args|
ENV["dist_specs"] = "1"
Rake::Task["spec"].execute(args)
ENV.delete("dist_specs")
end
task :default => :spec task :default => :spec
desc "Build user guide" desc "Build user guide"
task :user_guide do task :user_guide do
system("ruby", "-Ilib", "rb/gen_user_guide.rb") system("ruby", "-Ilib", "rb/gen_user_guide.rb")
end end
task :all => [:spec, :dspec, :user_guide]

View File

@ -3,13 +3,22 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
/**************************************************************************
* Public data
*************************************************************************/
/** Token names. */
const char * <%= @grammar.prefix %>token_names[] = {
<% @grammar.tokens.each_with_index do |token, index| %>
"<%= token.name %>",
<% end %>
};
/************************************************************************** /**************************************************************************
* User code blocks * User code blocks
*************************************************************************/ *************************************************************************/
<% @grammar.code_blocks.each do |code| %> <%= @grammar.code_blocks.fetch("", "") %>
<%= code %>
<% end %>
/************************************************************************** /**************************************************************************
* Private types * Private types
@ -23,6 +32,7 @@
#define P_UNEXPECTED_TOKEN 3u #define P_UNEXPECTED_TOKEN 3u
#define P_DROP 4u #define P_DROP 4u
#define P_EOF 5u #define P_EOF 5u
#define P_USER_TERMINATED 6u
<% end %> <% end %>
/* An invalid ID value. */ /* An invalid ID value. */
@ -216,7 +226,10 @@ typedef struct
/** Number of bytes of input text used to match. */ /** Number of bytes of input text used to match. */
size_t length; size_t length;
/** Input text position delta. */ /** Input text position delta to end of token. */
<%= @grammar.prefix %>position_t end_delta_position;
/** Input text position delta to next code point after token end. */
<%= @grammar.prefix %>position_t delta_position; <%= @grammar.prefix %>position_t delta_position;
/** Accepting lexer state from the match. */ /** Accepting lexer state from the match. */
@ -310,9 +323,12 @@ static lexer_state_id_t check_lexer_transition(uint32_t current_state, uint32_t
* *
* @param context * @param context
* Lexer/parser context structure. * Lexer/parser context structure.
* @param[out] out_token_info * @param[out] out_match_info
* The lexed token information is stored here if the return value is * The longest match information is stored here if the return value is
* P_SUCCESS. * P_SUCCESS or P_DECODE_ERROR.
* @param[out] out_unexpected_input_length
* The unexpected input length is stored here if the return value is
* P_UNEXPECTED_INPUT.
* *
* @reval P_SUCCESS * @reval P_SUCCESS
* A token was successfully lexed. * A token was successfully lexed.
@ -345,6 +361,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
if (transition_state != INVALID_LEXER_STATE_ID) if (transition_state != INVALID_LEXER_STATE_ID)
{ {
attempt_match.length += code_point_length; attempt_match.length += code_point_length;
attempt_match.end_delta_position = attempt_match.delta_position;
if (code_point == '\n') if (code_point == '\n')
{ {
attempt_match.delta_position.row++; attempt_match.delta_position.row++;
@ -392,7 +409,6 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
/* Valid EOF return. */ /* Valid EOF return. */
return P_EOF; return P_EOF;
} }
break;
case P_DECODE_ERROR: case P_DECODE_ERROR:
/* If we see a decode error, we may be partially in the middle of /* If we see a decode error, we may be partially in the middle of
@ -424,13 +440,14 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
* Input text does not match any lexer pattern. * Input text does not match any lexer pattern.
* @retval P_DROP * @retval P_DROP
* A drop pattern was matched so the lexer should continue. * A drop pattern was matched so the lexer should continue.
* @retval P_USER_TERMINATED
* User code has requested to terminate the lexer.
*/ */
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
{ {
<%= @grammar.prefix %>token_info_t token_info = {0}; <%= @grammar.prefix %>token_info_t token_info = {0};
token_info.position = context->text_position; token_info.position = context->text_position;
token_info.token = INVALID_TOKEN_ID; token_info.token = INVALID_TOKEN_ID;
*out_token_info = token_info; // TODO: remove
lexer_match_info_t match_info; lexer_match_info_t match_info;
size_t unexpected_input_length; size_t unexpected_input_length;
size_t result = find_longest_match(context, &match_info, &unexpected_input_length); size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
@ -443,6 +460,12 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
uint8_t const * match = &context->input[context->input_index]; uint8_t const * match = &context->input[context->input_index];
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context, <%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
match_info.accepting_state->code_id, match, match_info.length, &token_info); match_info.accepting_state->code_id, match, match_info.length, &token_info);
/* A TERMINATE_TOKEN_ID return code from lexer_user_code() means
* that the user code is requesting to terminate the lexer. */
if (user_code_token == TERMINATE_TOKEN_ID)
{
return P_USER_TERMINATED;
}
/* An invalid token returned from lexer_user_code() means that the /* An invalid token returned from lexer_user_code() means that the
* user code did not explicitly return a token. So only override * user code did not explicitly return a token. So only override
* the token to return if the user code does explicitly return a * the token to return if the user code does explicitly return a
@ -471,11 +494,22 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
} }
token_info.token = token_to_accept; token_info.token = token_to_accept;
token_info.length = match_info.length; token_info.length = match_info.length;
if (match_info.end_delta_position.row != 0u)
{
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
token_info.end_position.col = match_info.end_delta_position.col;
}
else
{
token_info.end_position.row = token_info.position.row;
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
}
*out_token_info = token_info; *out_token_info = token_info;
return P_SUCCESS; return P_SUCCESS;
case P_EOF: case P_EOF:
token_info.token = TOKEN___EOF; token_info.token = TOKEN___EOF;
token_info.end_position = token_info.position;
*out_token_info = token_info; *out_token_info = token_info;
return P_SUCCESS; return P_SUCCESS;
@ -513,6 +547,8 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
* The decoder encountered invalid text encoding. * The decoder encountered invalid text encoding.
* @reval P_UNEXPECTED_INPUT * @reval P_UNEXPECTED_INPUT
* Input text does not match any lexer pattern. * Input text does not match any lexer pattern.
* @retval P_USER_TERMINATED
* User code has requested to terminate the lexer.
*/ */
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
{ {
@ -530,6 +566,9 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
* Parser * Parser
*************************************************************************/ *************************************************************************/
/** Invalid position value. */
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0xFFFFFFFFu, 0xFFFFFFFFu}
/** Reduce ID type. */ /** Reduce ID type. */
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t; typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
@ -589,6 +628,25 @@ typedef struct
* reduce action. * reduce action.
*/ */
parser_state_id_t n_states; parser_state_id_t n_states;
<% if @grammar.ast %>
/**
* Map of rule components to rule set child fields.
*/
uint16_t const * rule_set_node_field_index_map;
/**
* Number of rule set AST node fields.
*/
uint16_t rule_set_node_field_array_size;
/**
* Whether this rule was a generated optional rule that matched the
* optional target. In this case, propagate the matched target node up
* instead of making a new node for this rule.
*/
bool propagate_optional_target;
<% end %>
} reduce_t; } reduce_t;
/** Parser state entry. */ /** Parser state entry. */
@ -619,19 +677,50 @@ typedef struct
/** Parser value from this state. */ /** Parser value from this state. */
<%= @grammar.prefix %>value_t pvalue; <%= @grammar.prefix %>value_t pvalue;
<% if @grammar.ast %>
/** AST node. */
void * ast_node;
<% end %>
} state_value_t; } state_value_t;
/** Common AST node structure. */
typedef struct
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
void * fields[];
} ASTNode;
/** Parser shift table. */ /** Parser shift table. */
static const shift_t parser_shift_table[] = { static const shift_t parser_shift_table[] = {
<% @parser.shift_table.each do |shift| %> <% @parser.shift_table.each do |shift| %>
{<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u}, {<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u},
<% end %> <% end %>
}; };
<% if @grammar.ast %>
<% @grammar.rules.each do |rule| %>
<% unless rule.flat_rule_set_node_field_index_map? %>
const uint16_t r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map[<%= rule.rule_set_node_field_index_map.size %>] = {<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>};
<% end %>
<% end %>
<% end %>
/** Parser reduce table. */ /** Parser reduce table. */
static const reduce_t parser_reduce_table[] = { static const reduce_t parser_reduce_table[] = {
<% @parser.reduce_table.each do |reduce| %> <% @parser.reduce_table.each do |reduce| %>
{<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u}, {<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
<% if @grammar.ast %>
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
, NULL
<% else %>
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
<% end %>
, <%= reduce[:rule].rule_set.ast_fields.size %>
, <%= reduce[:propagate_optional_target] %>
<% end %>
},
<% end %> <% end %>
}; };
@ -735,17 +824,19 @@ static void state_values_stack_free(state_values_stack_t * stack)
free(stack->entries); free(stack->entries);
} }
<% unless @grammar.ast %>
/** /**
* Execute user code associated with a parser rule. * Execute user code associated with a parser rule.
* *
* @param rule The ID of the rule. * @param rule The ID of the rule.
* *
* @return Parse value. * @retval P_SUCCESS
* Continue parsing.
* @retval P_USER_TERMINATED
* User requested to terminate parsing.
*/ */
static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states) static size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states, <%= @grammar.prefix %>context_t * context)
{ {
<%= @grammar.prefix %>value_t _pvalue = {0};
switch (rule) switch (rule)
{ {
<% @grammar.rules.each do |rule| %> <% @grammar.rules.each do |rule| %>
@ -758,8 +849,9 @@ static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_value
default: break; default: break;
} }
return _pvalue; return P_SUCCESS;
} }
<% end %>
/** /**
* Check if the parser should shift to a new state. * Check if the parser should shift to a new state.
@ -821,7 +913,7 @@ static size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token)
* can be accessed with <%= @grammar.prefix %>result(). * can be accessed with <%= @grammar.prefix %>result().
* @retval P_UNEXPECTED_TOKEN * @retval P_UNEXPECTED_TOKEN
* An unexpected token was encountered that does not match any grammar rule. * An unexpected token was encountered that does not match any grammar rule.
* The value context->token holds the unexpected token. * The function p_token(&context) can be used to get the unexpected token.
* @reval P_DECODE_ERROR * @reval P_DECODE_ERROR
* The decoder encountered invalid text encoding. * The decoder encountered invalid text encoding.
* @reval P_UNEXPECTED_INPUT * @reval P_UNEXPECTED_INPUT
@ -833,7 +925,11 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID; <%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
state_values_stack_t statevalues; state_values_stack_t statevalues;
size_t reduced_rule_set = INVALID_ID; size_t reduced_rule_set = INVALID_ID;
<% if @grammar.ast %>
void * reduced_parser_node;
<% else %>
<%= @grammar.prefix %>value_t reduced_parser_value; <%= @grammar.prefix %>value_t reduced_parser_value;
<% end %>
state_values_stack_init(&statevalues); state_values_stack_init(&statevalues);
state_values_stack_push(&statevalues); state_values_stack_push(&statevalues);
size_t result; size_t result;
@ -860,7 +956,11 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF)) if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
{ {
/* Successful parse. */ /* Successful parse. */
<% if @grammar.ast %>
context->parse_result = (<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)state_values_stack_index(&statevalues, -1)->ast_node;
<% else %>
context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue; context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue;
<% end %>
result = P_SUCCESS; result = P_SUCCESS;
break; break;
} }
@ -873,15 +973,28 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
if (reduced_rule_set == INVALID_ID) if (reduced_rule_set == INVALID_ID)
{ {
/* We shifted a token, mark it consumed. */ /* We shifted a token, mark it consumed. */
token = INVALID_TOKEN_ID; <% if @grammar.ast %>
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = malloc(sizeof(<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>));
token_ast_node->position = token_info.position;
token_ast_node->end_position = token_info.end_position;
token_ast_node->token = token;
token_ast_node->pvalue = token_info.pvalue;
state_values_stack_index(&statevalues, -1)->ast_node = token_ast_node;
<% else %>
state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue; state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue;
<% end %>
token = INVALID_TOKEN_ID;
} }
else else
{ {
/* We shifted a RuleSet. */ /* We shifted a RuleSet. */
<% if @grammar.ast %>
state_values_stack_index(&statevalues, -1)->ast_node = reduced_parser_node;
<% else %>
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value; state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
<%= @grammar.prefix %>value_t new_parse_result = {0}; <%= @grammar.prefix %>value_t new_parse_result = {0};
reduced_parser_value = new_parse_result; reduced_parser_value = new_parse_result;
<% end %>
reduced_rule_set = INVALID_ID; reduced_rule_set = INVALID_ID;
} }
continue; continue;
@ -891,7 +1004,63 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
if (reduce_index != INVALID_ID) if (reduce_index != INVALID_ID)
{ {
/* We have something to reduce. */ /* We have something to reduce. */
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states); <% if @grammar.ast %>
if (parser_reduce_table[reduce_index].propagate_optional_target)
{
reduced_parser_node = state_values_stack_index(&statevalues, -1)->ast_node;
}
else if (parser_reduce_table[reduce_index].n_states > 0)
{
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
ASTNode * node = (ASTNode *)malloc(sizeof(ASTNode) + n_fields * sizeof(void *));
node->position = INVALID_POSITION;
node->end_position = INVALID_POSITION;
for (size_t i = 0; i < n_fields; i++)
{
node->fields[i] = NULL;
}
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map == NULL)
{
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
{
node->fields[i] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
}
}
else
{
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
{
node->fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
}
}
bool position_found = false;
for (size_t i = 0; i < n_fields; i++)
{
ASTNode * child = (ASTNode *)node->fields[i];
if ((child != NULL) && <%= @grammar.prefix %>position_valid(child->position))
{
if (!position_found)
{
node->position = child->position;
position_found = true;
}
node->end_position = child->end_position;
}
}
reduced_parser_node = node;
}
else
{
reduced_parser_node = NULL;
}
<% else %>
<%= @grammar.prefix %>value_t reduced_parser_value2 = {0};
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
{
return P_USER_TERMINATED;
}
reduced_parser_value = reduced_parser_value2;
<% end %>
reduced_rule_set = parser_reduce_table[reduce_index].rule_set; reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states); state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states);
continue; continue;
@ -919,9 +1088,17 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
* *
* @return Parse result value. * @return Parse result value.
*/ */
<% if @grammar.ast %>
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
<% else %>
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context) <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
<% end %>
{ {
<% if @grammar.ast %>
return context->parse_result;
<% else %>
return context->parse_result.v_<%= start_rule_type[0] %>; return context->parse_result.v_<%= start_rule_type[0] %>;
<% end %>
} }
/** /**
@ -936,3 +1113,26 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
{ {
return context->text_position; return context->text_position;
} }
/**
* Get the user terminate code.
*
* @param context
* Lexer/parser context structure.
*
* @return User terminate code.
*/
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context)
{
return context->user_terminate_code;
}
/**
* Get the parse token.
*
* @return Parse token.
*/
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context)
{
return context->token;
}

View File

@ -8,13 +8,13 @@
module <%= @grammar.modulename %>; module <%= @grammar.modulename %>;
<% end %> <% end %>
import core.stdc.stdlib : malloc;
/************************************************************************** /**************************************************************************
* User code blocks * User code blocks
*************************************************************************/ *************************************************************************/
<% @grammar.code_blocks.each do |code| %> <%= @grammar.code_blocks.fetch("", "") %>
<%= code %>
<% end %>
/************************************************************************** /**************************************************************************
* Public types * Public types
@ -29,10 +29,11 @@ public enum : size_t
<%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN, <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN,
<%= @grammar.prefix.upcase %>DROP, <%= @grammar.prefix.upcase %>DROP,
<%= @grammar.prefix.upcase %>EOF, <%= @grammar.prefix.upcase %>EOF,
<%= @grammar.prefix.upcase %>USER_TERMINATED,
} }
/** Token type. */ /** Token type. */
public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.invalid_token_id) %>; public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.terminate_token_id) %>;
/** Token IDs. */ /** Token IDs. */
public enum : <%= @grammar.prefix %>token_t public enum : <%= @grammar.prefix %>token_t
@ -44,21 +45,14 @@ public enum : <%= @grammar.prefix %>token_t
<% end %> <% end %>
<% end %> <% end %>
INVALID_TOKEN_ID = <%= @grammar.invalid_token_id %>, INVALID_TOKEN_ID = <%= @grammar.invalid_token_id %>,
TERMINATE_TOKEN_ID = <%= @grammar.terminate_token_id %>,
} }
/** Code point type. */ /** Code point type. */
public alias <%= @grammar.prefix %>code_point_t = uint; public alias <%= @grammar.prefix %>code_point_t = uint;
/** Parser values type(s). */
public union <%= @grammar.prefix %>value_t
{
<% @grammar.ptypes.each do |name, typestring| %>
<%= typestring %> v_<%= name %>;
<% end %>
}
/** /**
* A structure to keep track of parser position. * A structure to keep track of input position.
* *
* This is useful for reporting errors, etc... * This is useful for reporting errors, etc...
*/ */
@ -69,14 +63,79 @@ public struct <%= @grammar.prefix %>position_t
/** Input text column (0-based). */ /** Input text column (0-based). */
uint col; uint col;
/** Invalid position value. */
enum INVALID = <%= @grammar.prefix %>position_t(0xFFFF_FFFF, 0xFFFF_FFFF);
/** Return whether the position is valid. */
public @property bool valid()
{
return row != 0xFFFF_FFFFu;
}
} }
<% if @grammar.ast %>
/** Parser values type. */
public alias <%= @grammar.prefix %>value_t = <%= @grammar.ptype %>;
<% else %>
/** Parser values type(s). */
public union <%= @grammar.prefix %>value_t
{
<% @grammar.ptypes.each do |name, typestring| %>
<%= typestring %> v_<%= name %>;
<% end %>
}
<% end %>
<% if @grammar.ast %>
/** Common AST node structure. */
private struct ASTNode
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
void *[0] fields;
}
/** AST node types. @{ */
public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
{
/* ASTNode fields must be present in the same order here. */
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<%= @grammar.prefix %>token_t token;
<%= @grammar.prefix %>value_t pvalue;
}
<% @parser.rule_sets.each do |name, rule_set| %>
<% next if name.start_with?("$") %>
<% next if rule_set.optional? %>
public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<% rule_set.ast_fields.each do |fields| %>
union
{
<% fields.each do |field_name, type| %>
<%= type %> * <%= field_name %>;
<% end %>
}
<% end %>
}
<% end %>
/** @} */
<% end %>
/** Lexed token information. */ /** Lexed token information. */
public struct <%= @grammar.prefix %>token_info_t public struct <%= @grammar.prefix %>token_info_t
{ {
/** Text position where the token was found. */ /** Text position of first code point in token. */
<%= @grammar.prefix %>position_t position; <%= @grammar.prefix %>position_t position;
/** Text position of last code point in token. */
<%= @grammar.prefix %>position_t end_position;
/** Number of input bytes used by the token. */ /** Number of input bytes used by the token. */
size_t length; size_t length;
@ -112,10 +171,17 @@ public struct <%= @grammar.prefix %>context_t
/* Parser context data. */ /* Parser context data. */
/** Parse result value. */ /** Parse result value. */
<% if @grammar.ast %>
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
<% else %>
<%= @grammar.prefix %>value_t parse_result; <%= @grammar.prefix %>value_t parse_result;
<% end %>
/** Unexpected token received. */ /** Unexpected token received. */
<%= @grammar.prefix %>token_t token; <%= @grammar.prefix %>token_t token;
/** User terminate code. */
size_t user_terminate_code;
} }
/************************************************************************** /**************************************************************************
@ -143,6 +209,7 @@ private enum : size_t
P_UNEXPECTED_TOKEN, P_UNEXPECTED_TOKEN,
P_DROP, P_DROP,
P_EOF, P_EOF,
P_USER_TERMINATED,
} }
<% end %> <% end %>
@ -332,7 +399,10 @@ private struct lexer_match_info_t
/** Number of bytes of input text used to match. */ /** Number of bytes of input text used to match. */
size_t length; size_t length;
/** Input text position delta. */ /** Input text position delta to end of token. */
<%= @grammar.prefix %>position_t end_delta_position;
/** Input text position delta to next code point after token end. */
<%= @grammar.prefix %>position_t delta_position; <%= @grammar.prefix %>position_t delta_position;
/** Accepting lexer state from the match. */ /** Accepting lexer state from the match. */
@ -424,9 +494,12 @@ private lexer_state_id_t check_lexer_transition(uint current_state, uint code_po
* *
* @param context * @param context
* Lexer/parser context structure. * Lexer/parser context structure.
* @param[out] out_token_info * @param[out] out_match_info
* The lexed token information is stored here if the return value is * The longest match information is stored here if the return value is
* P_SUCCESS. * P_SUCCESS or P_DECODE_ERROR.
* @param[out] out_unexpected_input_length
* The unexpected input length is stored here if the return value is
* P_UNEXPECTED_INPUT.
* *
* @reval P_SUCCESS * @reval P_SUCCESS
* A token was successfully lexed. * A token was successfully lexed.
@ -457,6 +530,7 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
if (transition_state != INVALID_LEXER_STATE_ID) if (transition_state != INVALID_LEXER_STATE_ID)
{ {
attempt_match.length += code_point_length; attempt_match.length += code_point_length;
attempt_match.end_delta_position = attempt_match.delta_position;
if (code_point == '\n') if (code_point == '\n')
{ {
attempt_match.delta_position.row++; attempt_match.delta_position.row++;
@ -504,7 +578,6 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
/* Valid EOF return. */ /* Valid EOF return. */
return P_EOF; return P_EOF;
} }
break;
case P_DECODE_ERROR: case P_DECODE_ERROR:
/* If we see a decode error, we may be partially in the middle of /* If we see a decode error, we may be partially in the middle of
@ -536,13 +609,14 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
* Input text does not match any lexer pattern. * Input text does not match any lexer pattern.
* @retval P_DROP * @retval P_DROP
* A drop pattern was matched so the lexer should continue. * A drop pattern was matched so the lexer should continue.
* @retval P_USER_TERMINATED
* User code has requested to terminate the lexer.
*/ */
private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
{ {
<%= @grammar.prefix %>token_info_t token_info; <%= @grammar.prefix %>token_info_t token_info;
token_info.position = context.text_position; token_info.position = context.text_position;
token_info.token = INVALID_TOKEN_ID; token_info.token = INVALID_TOKEN_ID;
*out_token_info = token_info; // TODO: remove
lexer_match_info_t match_info; lexer_match_info_t match_info;
size_t unexpected_input_length; size_t unexpected_input_length;
size_t result = find_longest_match(context, &match_info, &unexpected_input_length); size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
@ -555,6 +629,12 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
string match = context.input[context.input_index..(context.input_index + match_info.length)]; string match = context.input[context.input_index..(context.input_index + match_info.length)];
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context, <%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
match_info.accepting_state.code_id, match, &token_info); match_info.accepting_state.code_id, match, &token_info);
/* A TERMINATE_TOKEN_ID return code from lexer_user_code() means
* that the user code is requesting to terminate the lexer. */
if (user_code_token == TERMINATE_TOKEN_ID)
{
return P_USER_TERMINATED;
}
/* An invalid token returned from lexer_user_code() means that the /* An invalid token returned from lexer_user_code() means that the
* user code did not explicitly return a token. So only override * user code did not explicitly return a token. So only override
* the token to return if the user code does explicitly return a * the token to return if the user code does explicitly return a
@ -583,11 +663,22 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
} }
token_info.token = token_to_accept; token_info.token = token_to_accept;
token_info.length = match_info.length; token_info.length = match_info.length;
if (match_info.end_delta_position.row != 0u)
{
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
token_info.end_position.col = match_info.end_delta_position.col;
}
else
{
token_info.end_position.row = token_info.position.row;
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
}
*out_token_info = token_info; *out_token_info = token_info;
return P_SUCCESS; return P_SUCCESS;
case P_EOF: case P_EOF:
token_info.token = TOKEN___EOF; token_info.token = TOKEN___EOF;
token_info.end_position = token_info.position;
*out_token_info = token_info; *out_token_info = token_info;
return P_SUCCESS; return P_SUCCESS;
@ -625,6 +716,8 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
* The decoder encountered invalid text encoding. * The decoder encountered invalid text encoding.
* @reval P_UNEXPECTED_INPUT * @reval P_UNEXPECTED_INPUT
* Input text does not match any lexer pattern. * Input text does not match any lexer pattern.
* @retval P_USER_TERMINATED
* User code has requested to terminate the lexer.
*/ */
public size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info) public size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
{ {
@ -701,6 +794,25 @@ private struct reduce_t
* reduce action. * reduce action.
*/ */
parser_state_id_t n_states; parser_state_id_t n_states;
<% if @grammar.ast %>
/**
* Map of rule components to rule set child fields.
*/
immutable(ushort) * rule_set_node_field_index_map;
/**
* Number of rule set AST node fields.
*/
ushort rule_set_node_field_array_size;
/**
* Whether this rule was a generated optional rule that matched the
* optional target. In this case, propagate the matched target node up
* instead of making a new node for this rule.
*/
bool propagate_optional_target;
<% end %>
} }
/** Parser state entry. */ /** Parser state entry. */
@ -732,6 +844,11 @@ private struct state_value_t
/** Parser value from this state. */ /** Parser value from this state. */
<%= @grammar.prefix %>value_t pvalue; <%= @grammar.prefix %>value_t pvalue;
<% if @grammar.ast %>
/** AST node. */
void * ast_node;
<% end %>
this(size_t state_id) this(size_t state_id)
{ {
this.state_id = state_id; this.state_id = state_id;
@ -741,14 +858,32 @@ private struct state_value_t
/** Parser shift table. */ /** Parser shift table. */
private immutable shift_t[] parser_shift_table = [ private immutable shift_t[] parser_shift_table = [
<% @parser.shift_table.each do |shift| %> <% @parser.shift_table.each do |shift| %>
shift_t(<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u), shift_t(<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u),
<% end %> <% end %>
]; ];
<% if @grammar.ast %>
<% @grammar.rules.each do |rule| %>
<% unless rule.flat_rule_set_node_field_index_map? %>
immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map = [<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>];
<% end %>
<% end %>
<% end %>
/** Parser reduce table. */ /** Parser reduce table. */
private immutable reduce_t[] parser_reduce_table = [ private immutable reduce_t[] parser_reduce_table = [
<% @parser.reduce_table.each do |reduce| %> <% @parser.reduce_table.each do |reduce| %>
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u), reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u
<% if @grammar.ast %>
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
, null
<% else %>
, &r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0]
<% end %>
, <%= reduce[:rule].rule_set.ast_fields.size %>
, <%= reduce[:propagate_optional_target] %>
<% end %>
),
<% end %> <% end %>
]; ];
@ -759,17 +894,19 @@ private immutable parser_state_t[] parser_state_table = [
<% end %> <% end %>
]; ];
<% unless @grammar.ast %>
/** /**
* Execute user code associated with a parser rule. * Execute user code associated with a parser rule.
* *
* @param rule The ID of the rule. * @param rule The ID of the rule.
* *
* @return Parse value. * @retval P_SUCCESS
* Continue parsing.
* @retval P_USER_TERMINATED
* User requested to terminate parsing.
*/ */
private <%= @grammar.prefix %>value_t parser_user_code(uint rule, state_value_t[] statevalues, uint n_states) private size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint rule, state_value_t[] statevalues, uint n_states, <%= @grammar.prefix %>context_t * context)
{ {
<%= @grammar.prefix %>value_t _pvalue;
switch (rule) switch (rule)
{ {
<% @grammar.rules.each do |rule| %> <% @grammar.rules.each do |rule| %>
@ -782,8 +919,9 @@ private <%= @grammar.prefix %>value_t parser_user_code(uint rule, state_value_t[
default: break; default: break;
} }
return _pvalue; return P_SUCCESS;
} }
<% end %>
/** /**
* Check if the parser should shift to a new state. * Check if the parser should shift to a new state.
@ -845,7 +983,7 @@ private size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token
* can be accessed with <%= @grammar.prefix %>result(). * can be accessed with <%= @grammar.prefix %>result().
* @retval P_UNEXPECTED_TOKEN * @retval P_UNEXPECTED_TOKEN
* An unexpected token was encountered that does not match any grammar rule. * An unexpected token was encountered that does not match any grammar rule.
* The value context.token holds the unexpected token. * The function p_token(&context) can be used to get the unexpected token.
* @reval P_DECODE_ERROR * @reval P_DECODE_ERROR
* The decoder encountered invalid text encoding. * The decoder encountered invalid text encoding.
* @reval P_UNEXPECTED_INPUT * @reval P_UNEXPECTED_INPUT
@ -857,7 +995,11 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID; <%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
state_value_t[] statevalues = new state_value_t[](1); state_value_t[] statevalues = new state_value_t[](1);
size_t reduced_rule_set = INVALID_ID; size_t reduced_rule_set = INVALID_ID;
<% if @grammar.ast %>
void * reduced_parser_node;
<% else %>
<%= @grammar.prefix %>value_t reduced_parser_value; <%= @grammar.prefix %>value_t reduced_parser_value;
<% end %>
for (;;) for (;;)
{ {
if (token == INVALID_TOKEN_ID) if (token == INVALID_TOKEN_ID)
@ -880,7 +1022,11 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF)) if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
{ {
/* Successful parse. */ /* Successful parse. */
<% if @grammar.ast %>
context.parse_result = cast(<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)statevalues[$-1].ast_node;
<% else %>
context.parse_result = statevalues[$-1].pvalue; context.parse_result = statevalues[$-1].pvalue;
<% end %>
return P_SUCCESS; return P_SUCCESS;
} }
} }
@ -891,15 +1037,24 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
if (reduced_rule_set == INVALID_ID) if (reduced_rule_set == INVALID_ID)
{ {
/* We shifted a token, mark it consumed. */ /* We shifted a token, mark it consumed. */
token = INVALID_TOKEN_ID; <% if @grammar.ast %>
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token_info.position, token_info.end_position, token, token_info.pvalue);
statevalues[$-1].ast_node = token_ast_node;
<% else %>
statevalues[$-1].pvalue = token_info.pvalue; statevalues[$-1].pvalue = token_info.pvalue;
<% end %>
token = INVALID_TOKEN_ID;
} }
else else
{ {
/* We shifted a RuleSet. */ /* We shifted a RuleSet. */
<% if @grammar.ast %>
statevalues[$-1].ast_node = reduced_parser_node;
<% else %>
statevalues[$-1].pvalue = reduced_parser_value; statevalues[$-1].pvalue = reduced_parser_value;
<%= @grammar.prefix %>value_t new_parse_result; <%= @grammar.prefix %>value_t new_parse_result;
reduced_parser_value = new_parse_result; reduced_parser_value = new_parse_result;
<% end %>
reduced_rule_set = INVALID_ID; reduced_rule_set = INVALID_ID;
} }
continue; continue;
@ -909,7 +1064,63 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
if (reduce_index != INVALID_ID) if (reduce_index != INVALID_ID)
{ {
/* We have something to reduce. */ /* We have something to reduce. */
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states); <% if @grammar.ast %>
if (parser_reduce_table[reduce_index].propagate_optional_target)
{
reduced_parser_node = statevalues[$ - 1].ast_node;
}
else if (parser_reduce_table[reduce_index].n_states > 0)
{
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
ASTNode * node = cast(ASTNode *)malloc(ASTNode.sizeof + n_fields * (void *).sizeof);
node.position = <%= @grammar.prefix %>position_t.INVALID;
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
foreach (i; 0..n_fields)
{
node.fields[i] = null;
}
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map is null)
{
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
{
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
}
}
else
{
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
{
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
}
}
bool position_found = false;
foreach (i; 0..n_fields)
{
ASTNode * child = cast(ASTNode *)node.fields[i];
if (child && child.position.valid)
{
if (!position_found)
{
node.position = child.position;
position_found = true;
}
node.end_position = child.end_position;
}
}
reduced_parser_node = node;
}
else
{
reduced_parser_node = null;
}
<% else %>
<%= @grammar.prefix %>value_t reduced_parser_value2;
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
{
return P_USER_TERMINATED;
}
reduced_parser_value = reduced_parser_value2;
<% end %>
reduced_rule_set = parser_reduce_table[reduce_index].rule_set; reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
statevalues.length -= parser_reduce_table[reduce_index].n_states; statevalues.length -= parser_reduce_table[reduce_index].n_states;
continue; continue;
@ -934,9 +1145,17 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
* *
* @return Parse result value. * @return Parse result value.
*/ */
<% if @grammar.ast %>
public <%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
<% else %>
public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context) public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
<% end %>
{ {
<% if @grammar.ast %>
return context.parse_result;
<% else %>
return context.parse_result.v_<%= start_rule_type[0] %>; return context.parse_result.v_<%= start_rule_type[0] %>;
<% end %>
} }
/** /**
@ -951,3 +1170,26 @@ public <%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @gram
{ {
return context.text_position; return context.text_position;
} }
/**
* Get the user terminate code.
*
* @param context
* Lexer/parser context structure.
*
* @return User terminate code.
*/
public size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context)
{
return context.user_terminate_code;
}
/**
* Get the parse token.
*
* @return Parse token.
*/
public <%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context)
{
return context.token;
}

View File

@ -20,9 +20,10 @@
#define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u #define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u
#define <%= @grammar.prefix.upcase %>DROP 4u #define <%= @grammar.prefix.upcase %>DROP 4u
#define <%= @grammar.prefix.upcase %>EOF 5u #define <%= @grammar.prefix.upcase %>EOF 5u
#define <%= @grammar.prefix.upcase %>USER_TERMINATED 6u
/** Token type. */ /** Token type. */
typedef <%= get_type_for(@grammar.invalid_token_id) %> <%= @grammar.prefix %>token_t; typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>token_t;
/** Token IDs. */ /** Token IDs. */
<% @grammar.tokens.each_with_index do |token, index| %> <% @grammar.tokens.each_with_index do |token, index| %>
@ -32,20 +33,13 @@ typedef <%= get_type_for(@grammar.invalid_token_id) %> <%= @grammar.prefix %>tok
<% end %> <% end %>
<% end %> <% end %>
#define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u #define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u
#define TERMINATE_TOKEN_ID <%= @grammar.terminate_token_id %>u
/** Code point type. */ /** Code point type. */
typedef uint32_t <%= @grammar.prefix %>code_point_t; typedef uint32_t <%= @grammar.prefix %>code_point_t;
/** Parser values type(s). */
typedef union
{
<% @grammar.ptypes.each do |name, typestring| %>
<%= typestring %> v_<%= name %>;
<% end %>
} <%= @grammar.prefix %>value_t;
/** /**
* A structure to keep track of parser position. * A structure to keep track of input position.
* *
* This is useful for reporting errors, etc... * This is useful for reporting errors, etc...
*/ */
@ -58,12 +52,72 @@ typedef struct
uint32_t col; uint32_t col;
} <%= @grammar.prefix %>position_t; } <%= @grammar.prefix %>position_t;
/** Return whether the position is valid. */
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0xFFFFFFFFu)
/** User header code blocks. */
<%= @grammar.code_blocks.fetch("header", "") %>
<% if @grammar.ast %>
/** Parser values type. */
typedef <%= @grammar.ptype %> <%= @grammar.prefix %>value_t;
<% else %>
/** Parser values type(s). */
typedef union
{
<% @grammar.ptypes.each do |name, typestring| %>
<%= typestring %> v_<%= name %>;
<% end %>
} <%= @grammar.prefix %>value_t;
<% end %>
<% if @grammar.ast %>
/** AST node types. @{ */
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
{
/* ASTNode fields must be present in the same order here. */
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<%= @grammar.prefix %>token_t token;
<%= @grammar.prefix %>value_t pvalue;
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
<% @parser.rule_sets.each do |name, rule_set| %>
<% next if name.start_with?("$") %>
<% next if rule_set.optional? %>
struct <%= name %>;
<% end %>
<% @parser.rule_sets.each do |name, rule_set| %>
<% next if name.start_with?("$") %>
<% next if rule_set.optional? %>
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<% rule_set.ast_fields.each do |fields| %>
union
{
<% fields.each do |field_name, type| %>
struct <%= type %> * <%= field_name %>;
<% end %>
};
<% end %>
} <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>;
<% end %>
/** @} */
<% end %>
/** Lexed token information. */ /** Lexed token information. */
typedef struct typedef struct
{ {
/** Text position where the token was found. */ /** Text position of first code point in token. */
<%= @grammar.prefix %>position_t position; <%= @grammar.prefix %>position_t position;
/** Text position of last code point in token. */
<%= @grammar.prefix %>position_t end_position;
/** Number of input bytes used by the token. */ /** Number of input bytes used by the token. */
size_t length; size_t length;
@ -102,12 +156,26 @@ typedef struct
/* Parser context data. */ /* Parser context data. */
/** Parse result value. */ /** Parse result value. */
<% if @grammar.ast %>
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
<% else %>
<%= @grammar.prefix %>value_t parse_result; <%= @grammar.prefix %>value_t parse_result;
<% end %>
/** Unexpected token received. */ /** Unexpected token received. */
<%= @grammar.prefix %>token_t token; <%= @grammar.prefix %>token_t token;
/** User terminate code. */
size_t user_terminate_code;
} <%= @grammar.prefix %>context_t; } <%= @grammar.prefix %>context_t;
/**************************************************************************
* Public data
*************************************************************************/
/** Token names. */
extern const char * <%= @grammar.prefix %>token_names[];
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length); void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length, size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
@ -117,6 +185,14 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context); size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
<% if @grammar.ast %>
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
<% else %>
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context); <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
<% end %>
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context); <%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context);
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
au BufNewFile,BufRead *.propane set filetype=propane

View File

@ -0,0 +1,33 @@
" Vim syntax file for Propane
" Language: propane
" Maintainer: Josh Holtrop
" URL: https://github.com/holtrop/propane
if exists("b:current_syntax")
finish
endif
if !exists("b:propane_subtype")
let b:propane_subtype = "d"
endif
exe "syn include @propaneTarget syntax/".b:propane_subtype.".vim"
syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contains=@propaneTarget keepend
syn match propaneComment "#.*"
syn match propaneOperator "->"
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
syn match propaneFieldOperator ":" contained
syn match propaneOperator "?"
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
syn region propaneRegex start="/" end="/" skip="\\/"
hi def link propaneComment Comment
hi def link propaneKeyword Keyword
hi def link propaneRegex String
hi def link propaneOperator Operator
hi def link propaneFieldOperator Operator
hi def link propaneDelimiter Delimiter
hi def link propaneFieldAlias Identifier

View File

@ -1,6 +1,7 @@
require "erb" require "erb"
require "set" require "set"
require "stringio" require "stringio"
require_relative "propane/assets"
require_relative "propane/cli" require_relative "propane/cli"
require_relative "propane/code_point_range" require_relative "propane/code_point_range"
require_relative "propane/fa" require_relative "propane/fa"
@ -30,10 +31,10 @@ class Propane
class << self class << self
def run(input_file, output_file, log_file) def run(input_file, output_file, log_file, options)
begin begin
grammar = Grammar.new(File.read(input_file)) grammar = Grammar.new(File.read(input_file))
generator = Generator.new(grammar, output_file, log_file) generator = Generator.new(grammar, output_file, log_file, options)
generator.generate generator.generate
rescue Error => e rescue Error => e
$stderr.puts e.message $stderr.puts e.message

10
lib/propane/assets.rb Normal file
View File

@ -0,0 +1,10 @@
class Propane
module Assets
class << self
def get(name)
path = File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/#{name}")
File.binread(path)
end
end
end
end

View File

@ -4,15 +4,21 @@ class Propane
USAGE = <<EOF USAGE = <<EOF
Usage: #{$0} [options] <input-file> <output-file> Usage: #{$0} [options] <input-file> <output-file>
Options: Options:
--log LOG Write log file -h, --help Show this usage and exit.
--version Show program version and exit --log LOG Write log file. This will show all parser states and their
-h, --help Show this usage and exit associated shifts and reduces. It can be helpful when
debugging a grammar.
--version Show program version and exit.
-w Treat warnings as errors. This option will treat shift/reduce
conflicts as fatal errors and will print them to stderr in
addition to the log file.
EOF EOF
class << self class << self
def run(args) def run(args)
params = [] params = []
options = {}
log_file = nil log_file = nil
i = 0 i = 0
while i < args.size while i < args.size
@ -24,11 +30,13 @@ EOF
log_file = args[i] log_file = args[i]
end end
when "--version" when "--version"
puts "propane v#{VERSION}" puts "propane version #{VERSION}"
return 0 return 0
when "-h", "--help" when "-h", "--help"
puts USAGE puts USAGE
return 0 return 0
when "-w"
options[:warnings_as_errors] = true
when /^-/ when /^-/
$stderr.puts "Error: unknown option #{arg}" $stderr.puts "Error: unknown option #{arg}"
return 1 return 1
@ -45,7 +53,7 @@ EOF
$stderr.puts "Error: cannot read #{params[0]}" $stderr.puts "Error: cannot read #{params[0]}"
return 2 return 2
end end
Propane.run(*params, log_file) Propane.run(*params, log_file, options)
end end
end end

View File

@ -2,7 +2,7 @@ class Propane
class Generator class Generator
def initialize(grammar, output_file, log_file) def initialize(grammar, output_file, log_file, options)
@grammar = grammar @grammar = grammar
@output_file = output_file @output_file = output_file
if log_file if log_file
@ -10,13 +10,13 @@ class Propane
else else
@log = StringIO.new @log = StringIO.new
end end
@classname = @grammar.classname || File.basename(output_file).sub(%r{[^a-zA-Z0-9].*}, "").capitalize
@language = @language =
if output_file =~ /\.([a-z]+)$/ if output_file =~ /\.([a-z]+)$/
$1 $1
else else
"d" "d"
end end
@options = options
process_grammar! process_grammar!
end end
@ -26,7 +26,8 @@ class Propane
extensions += %w[h] extensions += %w[h]
end end
extensions.each do |extension| extensions.each do |extension|
erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.#{extension}.erb")), trim_mode: "<>") template = Assets.get("parser.#{extension}.erb")
erb = ERB.new(template, trim_mode: "<>")
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}") output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
result = erb.result(binding.clone) result = erb.result(binding.clone)
File.open(output_file, "wb") do |fh| File.open(output_file, "wb") do |fh|
@ -51,6 +52,7 @@ class Propane
unless found_default unless found_default
raise Error.new("No patterns found for default mode") raise Error.new("No patterns found for default mode")
end end
check_ptypes!
# Add EOF token. # Add EOF token.
@grammar.tokens << Token.new("$EOF", nil, nil) @grammar.tokens << Token.new("$EOF", nil, nil)
tokens_by_name = {} tokens_by_name = {}
@ -66,11 +68,14 @@ class Propane
tokens_by_name[token.name] = token tokens_by_name[token.name] = token
end end
# Check for user start rule. # Check for user start rule.
unless @grammar.rules.find {|rule| rule.name == "Start"} unless @grammar.rules.find {|rule| rule.name == @grammar.start_rule}
raise Error.new("Start rule not found") raise Error.new("Start rule `#{@grammar.start_rule}` not found")
end end
# Add "real" start rule. # Add "real" start rule.
@grammar.rules.unshift(Rule.new("$Start", ["Start", "$EOF"], nil, nil, nil)) @grammar.rules.unshift(Rule.new("$Start", [@grammar.start_rule, "$EOF"], nil, nil, nil))
# Generate and add rules for optional components.
generate_optional_component_rules!(tokens_by_name)
# Build rule sets.
rule_sets = {} rule_sets = {}
rule_set_id = @grammar.tokens.size rule_set_id = @grammar.tokens.size
@grammar.rules.each_with_index do |rule, rule_id| @grammar.rules.each_with_index do |rule, rule_id|
@ -119,10 +124,55 @@ class Propane
end end
end end
determine_possibly_empty_rulesets!(rule_sets) determine_possibly_empty_rulesets!(rule_sets)
rule_sets.each do |name, rule_set|
rule_set.finalize(@grammar)
end
# Generate the lexer. # Generate the lexer.
@lexer = Lexer.new(@grammar) @lexer = Lexer.new(@grammar)
# Generate the parser. # Generate the parser.
@parser = Parser.new(@grammar, rule_sets, @log) @parser = Parser.new(@grammar, rule_sets, @log, @options)
end
# Check that any referenced ptypes have been defined.
def check_ptypes!
(@grammar.patterns + @grammar.tokens + @grammar.rules).each do |potor|
if potor.ptypename
unless @grammar.ptypes.include?(potor.ptypename)
raise Error.new("Error: Line #{potor.line_number}: ptype #{potor.ptypename} not declared. Declare with `ptype` statement.")
end
end
end
end
# Generate and add rules for any optional components.
def generate_optional_component_rules!(tokens_by_name)
optional_rules_added = Set.new
@grammar.rules.each do |rule|
rule.components.each do |component|
if component =~ /^(.*)\?$/
c = $1
unless optional_rules_added.include?(component)
# Create two rules for the optional component: one empty and
# one just matching the component.
# We need to find the ptypename for the optional component in
# order to copy it to the generated rules.
if tokens_by_name[c]
# The optional component is a token.
ptypename = tokens_by_name[c].ptypename
else
# The optional component must be a rule, so find any instance
# of that rule that specifies a ptypename.
ptypename = @grammar.rules.reduce(nil) do |result, rule|
rule.name == c && rule.ptypename ? rule.ptypename : result
end
end
@grammar.rules << Rule.new(component, [], nil, ptypename, rule.line_number)
@grammar.rules << Rule.new(component, [c], "$$ = $1;\n", ptypename, rule.line_number)
optional_rules_added << component
end
end
end
end
end end
# Determine which grammar rules could expand to empty sequences. # Determine which grammar rules could expand to empty sequences.
@ -198,10 +248,25 @@ class Propane
code = code.gsub(/\$token\(([$\w]+)\)/) do |match| code = code.gsub(/\$token\(([$\w]+)\)/) do |match|
"TOKEN_#{Token.code_name($1)}" "TOKEN_#{Token.code_name($1)}"
end end
code = code.gsub(/\$terminate\((.*)\);/) do |match|
user_terminate_code = $1
retval = rule ? "P_USER_TERMINATED" : "TERMINATE_TOKEN_ID"
case @language
when "c"
"context->user_terminate_code = (#{user_terminate_code}); return #{retval};"
when "d"
"context.user_terminate_code = (#{user_terminate_code}); return #{retval};"
end
end
if parser if parser
code = code.gsub(/\$\$/) do |match| code = code.gsub(/\$\$/) do |match|
case @language
when "c"
"_pvalue->v_#{rule.ptypename}"
when "d"
"_pvalue.v_#{rule.ptypename}" "_pvalue.v_#{rule.ptypename}"
end end
end
code = code.gsub(/\$(\d+)/) do |match| code = code.gsub(/\$(\d+)/) do |match|
index = $1.to_i index = $1.to_i
case @language case @language
@ -211,8 +276,29 @@ class Propane
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}" "statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
end end
end end
code = code.gsub(/\$\{(\w+)\}/) do |match|
aliasname = $1
if index = rule.aliases[aliasname]
case @language
when "c"
"state_values_stack_index(statevalues, -(int)n_states + #{index})->pvalue.v_#{rule.components[index].ptypename}"
when "d"
"statevalues[$-n_states+#{index}].pvalue.v_#{rule.components[index].ptypename}"
end
else
raise Error.new("Field alias '#{aliasname}' not found")
end
end
else else
code = code.gsub(/\$\$/) do |match| code = code.gsub(/\$\$/) do |match|
if @grammar.ast
case @language
when "c"
"out_token_info->pvalue"
when "d"
"out_token_info.pvalue"
end
else
case @language case @language
when "c" when "c"
"out_token_info->pvalue.v_#{pattern.ptypename}" "out_token_info->pvalue.v_#{pattern.ptypename}"
@ -220,6 +306,7 @@ class Propane
"out_token_info.pvalue.v_#{pattern.ptypename}" "out_token_info.pvalue.v_#{pattern.ptypename}"
end end
end end
end
code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match| code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match|
mode_name = $1 mode_name = $1
mode_id = @lexer.mode_id(mode_name) mode_id = @lexer.mode_id(mode_name)
@ -243,7 +330,7 @@ class Propane
# Start rule parser value type name and type string. # Start rule parser value type name and type string.
def start_rule_type def start_rule_type
start_rule = @grammar.rules.find do |rule| start_rule = @grammar.rules.find do |rule|
rule.name == "Start" rule.name == @grammar.start_rule
end end
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]] [start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
end end

View File

@ -5,10 +5,13 @@ class Propane
# Reserve identifiers beginning with a double-underscore for internal use. # Reserve identifiers beginning with a double-underscore for internal use.
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/ IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
attr_reader :classname attr_reader :ast
attr_reader :ast_prefix
attr_reader :ast_suffix
attr_reader :modulename attr_reader :modulename
attr_reader :patterns attr_reader :patterns
attr_reader :rules attr_reader :rules
attr_reader :start_rule
attr_reader :tokens attr_reader :tokens
attr_reader :code_blocks attr_reader :code_blocks
attr_reader :ptypes attr_reader :ptypes
@ -16,15 +19,19 @@ class Propane
def initialize(input) def initialize(input)
@patterns = [] @patterns = []
@start_rule = "Start"
@tokens = [] @tokens = []
@rules = [] @rules = []
@code_blocks = [] @code_blocks = {}
@line_number = 1 @line_number = 1
@next_line_number = @line_number @next_line_number = @line_number
@mode = nil @mode = nil
@input = input.gsub("\r\n", "\n") @input = input.gsub("\r\n", "\n")
@ptypes = {"default" => "void *"} @ptypes = {"default" => "void *"}
@prefix = "p_" @prefix = "p_"
@ast = false
@ast_prefix = ""
@ast_suffix = ""
parse_grammar! parse_grammar!
end end
@ -36,6 +43,10 @@ class Propane
@tokens.size @tokens.size
end end
def terminate_token_id
@tokens.size + 1
end
private private
def parse_grammar! def parse_grammar!
@ -48,10 +59,13 @@ class Propane
if parse_white_space! if parse_white_space!
elsif parse_comment_line! elsif parse_comment_line!
elsif @mode.nil? && parse_mode_label! elsif @mode.nil? && parse_mode_label!
elsif parse_ast_statement!
elsif parse_ast_prefix_statement!
elsif parse_ast_suffix_statement!
elsif parse_module_statement! elsif parse_module_statement!
elsif parse_class_statement!
elsif parse_ptype_statement! elsif parse_ptype_statement!
elsif parse_pattern_statement! elsif parse_pattern_statement!
elsif parse_start_statement!
elsif parse_token_statement! elsif parse_token_statement!
elsif parse_tokenid_statement! elsif parse_tokenid_statement!
elsif parse_drop_statement! elsif parse_drop_statement!
@ -80,6 +94,24 @@ class Propane
consume!(/#.*\n/) consume!(/#.*\n/)
end end
def parse_ast_statement!
if consume!(/ast\s*;/)
@ast = true
end
end
def parse_ast_prefix_statement!
if md = consume!(/ast_prefix\s+(\w+)\s*;/)
@ast_prefix = md[1]
end
end
def parse_ast_suffix_statement!
if md = consume!(/ast_suffix\s+(\w+)\s*;/)
@ast_suffix = md[1]
end
end
def parse_module_statement! def parse_module_statement!
if consume!(/module\s+/) if consume!(/module\s+/)
md = consume!(/([\w.]+)\s*/, "expected module name") md = consume!(/([\w.]+)\s*/, "expected module name")
@ -90,20 +122,13 @@ class Propane
end end
end end
def parse_class_statement!
if consume!(/class\s+/)
md = consume!(/([\w.]+)\s*/, "expected class name")
@classname = md[1]
consume!(/;/, "expected `;'")
@mode = nil
true
end
end
def parse_ptype_statement! def parse_ptype_statement!
if consume!(/ptype\s+/) if consume!(/ptype\s+/)
name = "default" name = "default"
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/) if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
if @ast
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
name = md[1] name = md[1]
end end
md = consume!(/([^;]+);/, "expected parser result type expression") md = consume!(/([^;]+);/, "expected parser result type expression")
@ -116,12 +141,15 @@ class Propane
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name") md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
name = md[1] name = md[1]
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/) if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
if @ast
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
ptypename = md[1] ptypename = md[1]
end end
pattern = parse_pattern! || name pattern = parse_pattern! || name
consume!(/\s+/) consume!(/\s+/)
unless code = parse_code_block! unless code = parse_code_block!
consume!(/;/, "expected pattern or `;' or code block") consume!(/;/, "expected `;' or code block")
end end
token = Token.new(name, ptypename, @line_number) token = Token.new(name, ptypename, @line_number)
@tokens << token @tokens << token
@ -137,6 +165,9 @@ class Propane
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name") md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
name = md[1] name = md[1]
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/) if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
if @ast
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
ptypename = md[1] ptypename = md[1]
end end
consume!(/;/, "expected `;'"); consume!(/;/, "expected `;'");
@ -164,10 +195,17 @@ class Propane
def parse_rule_statement! def parse_rule_statement!
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/) if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
rule_name, ptypename = *md[1, 2] rule_name, ptypename = *md[1, 2]
md = consume!(/((?:#{IDENTIFIER_REGEX}\s*)*)\s*/, "expected rule component list") if @ast && ptypename
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
md = consume!(/((?:#{IDENTIFIER_REGEX}(?::#{IDENTIFIER_REGEX})?\??\s*)*)\s*/, "expected rule component list")
components = md[1].strip.split(/\s+/) components = md[1].strip.split(/\s+/)
if @ast
consume!(/;/, "expected `;'")
else
unless code = parse_code_block! unless code = parse_code_block!
consume!(/;/, "expected pattern or `;' or code block") consume!(/;/, "expected `;' or code block")
end
end end
@rules << Rule.new(rule_name, components, code, ptypename, @line_number) @rules << Rule.new(rule_name, components, code, ptypename, @line_number)
@mode = nil @mode = nil
@ -179,6 +217,9 @@ class Propane
if pattern = parse_pattern! if pattern = parse_pattern!
consume!(/\s+/) consume!(/\s+/)
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/) if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
if @ast
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
ptypename = md[1] ptypename = md[1]
end end
unless code = parse_code_block! unless code = parse_code_block!
@ -190,9 +231,22 @@ class Propane
end end
end end
def parse_start_statement!
if md = consume!(/start\s+(\w+)\s*;/)
@start_rule = md[1]
end
end
def parse_code_block_statement! def parse_code_block_statement!
if code = parse_code_block! if md = consume!(/<<([a-z]*)(.*?)>>\n/m)
@code_blocks << code name, code = md[1..2]
code.sub!(/\A\n/, "")
code += "\n" unless code.end_with?("\n")
if @code_blocks[name]
@code_blocks[name] += code
else
@code_blocks[name] = code
end
@mode = nil @mode = nil
true true
end end
@ -225,8 +279,11 @@ class Propane
end end
def parse_code_block! def parse_code_block!
if md = consume!(/<<\n(.*?)^>>\n/m) if md = consume!(/<<(.*?)>>\n/m)
md[1] code = md[1]
code.sub!(/\A\n/, "")
code += "\n" unless code.end_with?("\n")
code
end end
end end

View File

@ -7,12 +7,14 @@ class Propane
attr_reader :reduce_table attr_reader :reduce_table
attr_reader :rule_sets attr_reader :rule_sets
def initialize(grammar, rule_sets, log) def initialize(grammar, rule_sets, log, options)
@grammar = grammar @grammar = grammar
@rule_sets = rule_sets @rule_sets = rule_sets
@log = log @log = log
@item_sets = [] @item_sets = []
@item_sets_set = {} @item_sets_set = {}
@warnings = Set.new
@options = options
start_item = Item.new(grammar.rules.first, 0) start_item = Item.new(grammar.rules.first, 0)
eval_item_sets = Set[ItemSet.new([start_item])] eval_item_sets = Set[ItemSet.new([start_item])]
@ -23,10 +25,10 @@ class Propane
item_set.id = @item_sets.size item_set.id = @item_sets.size
@item_sets << item_set @item_sets << item_set
@item_sets_set[item_set] = item_set @item_sets_set[item_set] = item_set
item_set.following_symbols.each do |following_symbol| item_set.next_symbols.each do |next_symbol|
unless following_symbol.name == "$EOF" unless next_symbol.name == "$EOF"
following_set = item_set.build_following_item_set(following_symbol) next_item_set = item_set.build_next_item_set(next_symbol)
eval_item_sets << following_set eval_item_sets << next_item_set
end end
end end
end end
@ -37,8 +39,11 @@ class Propane
end end
build_reduce_actions! build_reduce_actions!
write_log!
build_tables! build_tables!
write_log!
if @warnings.size > 0 && @options[:warnings_as_errors]
raise Error.new("Fatal errors (-w):\n" + @warnings.join("\n"))
end
end end
private private
@ -48,27 +53,37 @@ class Propane
@shift_table = [] @shift_table = []
@reduce_table = [] @reduce_table = []
@item_sets.each do |item_set| @item_sets.each do |item_set|
shift_entries = item_set.following_symbols.map do |following_symbol| shift_entries = item_set.next_symbols.map do |next_symbol|
state_id = state_id =
if following_symbol.name == "$EOF" if next_symbol.name == "$EOF"
0 0
else else
item_set.following_item_set[following_symbol].id item_set.next_item_set[next_symbol].id
end end
{ {
symbol_id: following_symbol.id, symbol: next_symbol,
state_id: state_id, state_id: state_id,
} }
end end
unless item_set.reduce_rules.empty?
shift_entries.each do |shift_entry|
token = shift_entry[:symbol]
if get_lookahead_reduce_actions_for_item_set(item_set).include?(token)
rule = item_set.reduce_actions[token]
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
end
end
end
reduce_entries = reduce_entries =
case ra = item_set.reduce_actions if rule = item_set.reduce_rule
when Rule [{token_id: @grammar.invalid_token_id, rule_id: rule.id, rule: rule,
[{token_id: @grammar.invalid_token_id, rule_id: ra.id, rule_set_id: rule.rule_set.id, n_states: rule.components.size,
rule_set_id: ra.rule_set.id, n_states: ra.components.size}] propagate_optional_target: rule.optional? && rule.components.size == 1}]
when Hash elsif reduce_actions = item_set.reduce_actions
ra.map do |token, rule| reduce_actions.map do |token, rule|
{token_id: token.id, rule_id: rule.id, {token_id: token.id, rule_id: rule.id, rule: rule,
rule_set_id: rule.rule_set.id, n_states: rule.components.size} rule_set_id: rule.rule_set.id, n_states: rule.components.size,
propagate_optional_target: rule.optional? && rule.components.size == 1}
end end
else else
[] []
@ -85,11 +100,11 @@ class Propane
end end
def process_item_set(item_set) def process_item_set(item_set)
item_set.following_symbols.each do |following_symbol| item_set.next_symbols.each do |next_symbol|
unless following_symbol.name == "$EOF" unless next_symbol.name == "$EOF"
following_set = @item_sets_set[item_set.build_following_item_set(following_symbol)] next_item_set = @item_sets_set[item_set.build_next_item_set(next_symbol)]
item_set.following_item_set[following_symbol] = following_set item_set.next_item_set[next_symbol] = next_item_set
following_set.in_sets << item_set next_item_set.in_sets << item_set
end end
end end
end end
@ -99,7 +114,7 @@ class Propane
# @return [void] # @return [void]
def build_reduce_actions! def build_reduce_actions!
@item_sets.each do |item_set| @item_sets.each do |item_set|
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set) build_reduce_actions_for_item_set(item_set)
end end
end end
@ -108,38 +123,55 @@ class Propane
# @param item_set [ItemSet] # @param item_set [ItemSet]
# ItemSet (parser state) # ItemSet (parser state)
# #
# @return [nil, Rule, Hash] # @return [void]
# If no reduce actions are possible for the given item set, nil.
# If only one reduce action is possible for the given item set, the Rule
# to reduce.
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
def build_reduce_actions_for_item_set(item_set) def build_reduce_actions_for_item_set(item_set)
# To build the reduce actions, we start by looking at any # To build the reduce actions, we start by looking at any
# "complete" items, i.e., items where the parse position is at the # "complete" items, i.e., items where the parse position is at the
# end of a rule. These are the only rules that are candidates for # end of a rule. These are the only rules that are candidates for
# reduction in the current ItemSet. # reduction in the current ItemSet.
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule)) item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
# If there are no rules to reduce for this ItemSet, we're done here. if item_set.reduce_rules.size == 1
return nil if reduce_rules.size == 0 item_set.reduce_rule = item_set.reduce_rules.first
end
# If there is exactly one rule to reduce for this ItemSet, then do not if item_set.reduce_rules.size > 1
# figure out the lookaheads; just reduce it. # Force item_set.reduce_actions to be built to store the lookahead
return reduce_rules.first if reduce_rules.size == 1 # tokens for the possible reduce rules if there is more than one.
get_lookahead_reduce_actions_for_item_set(item_set)
end
end
# Otherwise, we have more than one possible rule to reduce. # Get the reduce actions for a single item set (parser state).
#
# @param item_set [ItemSet]
# ItemSet (parser state)
#
# @return [Hash]
# Mapping of lookahead Tokens to the Rules to reduce.
def get_lookahead_reduce_actions_for_item_set(item_set)
item_set.reduce_actions ||= build_lookahead_reduce_actions_for_item_set(item_set)
end
# Build the reduce actions for a single item set (parser state).
#
# @param item_set [ItemSet]
# ItemSet (parser state)
#
# @return [Hash]
# Mapping of lookahead Tokens to the Rules to reduce.
def build_lookahead_reduce_actions_for_item_set(item_set)
# We will be looking for all possible tokens that can follow instances of # We will be looking for all possible tokens that can follow instances of
# these rules. Rather than looking through the entire grammar for the # these rules. Rather than looking through the entire grammar for the
# possible following tokens, we will only look in the item sets leading # possible following tokens, we will only look in the item sets leading
# up to this one. This restriction gives us a more precise lookahead set, # up to this one. This restriction gives us a more precise lookahead set,
# and allows us to parse LALR grammars. # and allows us to parse LALR grammars.
item_sets = item_set.leading_item_sets item_sets = Set[item_set] + item_set.leading_item_sets
reduce_rules.reduce({}) do |reduce_actions, reduce_rule| item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets) lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
lookahead_tokens_for_rule.each do |lookahead_token| lookahead_tokens_for_rule.each do |lookahead_token|
if existing_reduce_rule = reduce_actions[lookahead_token] if existing_reduce_rule = reduce_actions[lookahead_token]
raise Error.new("Error: reduce/reduce conflict between rule #{existing_reduce_rule.id} (#{existing_reduce_rule.name}) and rule #{reduce_rule.id} (#{reduce_rule.name})") raise Error.new("Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number})")
end end
reduce_actions[lookahead_token] = reduce_rule reduce_actions[lookahead_token] = reduce_rule
end end
@ -181,9 +213,9 @@ class Propane
# tokens to form the lookahead token set. # tokens to form the lookahead token set.
item_sets.each do |item_set| item_sets.each do |item_set|
item_set.items.each do |item| item_set.items.each do |item|
if item.following_symbol == rule_set if item.next_symbol == rule_set
(1..).each do |offset| (1..).each do |offset|
case symbol = item.following_symbol(offset) case symbol = item.next_symbol(offset)
when nil when nil
rule_set = item.rule.rule_set rule_set = item.rule.rule_set
unless checked_rule_sets.include?(rule_set) unless checked_rule_sets.include?(rule_set)
@ -240,20 +272,26 @@ class Propane
@log.puts @log.puts
@log.puts " Incoming states: #{incoming_ids.join(", ")}" @log.puts " Incoming states: #{incoming_ids.join(", ")}"
@log.puts " Outgoing states:" @log.puts " Outgoing states:"
item_set.following_item_set.each do |following_symbol, following_item_set| item_set.next_item_set.each do |next_symbol, next_item_set|
@log.puts " #{following_symbol.name} => #{following_item_set.id}" @log.puts " #{next_symbol.name} => #{next_item_set.id}"
end end
@log.puts @log.puts
@log.puts " Reduce actions:" @log.puts " Reduce actions:"
case item_set.reduce_actions if item_set.reduce_rule
when Rule @log.puts " * => rule #{item_set.reduce_rule.id}, rule set #{@rule_sets[item_set.reduce_rule.name].id} (#{item_set.reduce_rule.name})"
@log.puts " * => rule #{item_set.reduce_actions.id}, rule set #{@rule_sets[item_set.reduce_actions.name].id} (#{item_set.reduce_actions.name})" elsif item_set.reduce_actions
when Hash
item_set.reduce_actions.each do |token, rule| item_set.reduce_actions.each do |token, rule|
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}" @log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
end end
end end
end end
if @warnings.size > 0
@log.puts
@log.puts "Warnings:"
@warnings.each do |warning|
@log.puts " #{warning}"
end
end
end end
end end

View File

@ -56,7 +56,7 @@ class Propane
# Return the set of Items obtained by "closing" the current item. # Return the set of Items obtained by "closing" the current item.
# #
# If the following symbol for the current item is another Rule name, then # If the next symbol for the current item is another Rule name, then
# this method will return all Items for that Rule with a position of 0. # this method will return all Items for that Rule with a position of 0.
# Otherwise, an empty Array is returned. # Otherwise, an empty Array is returned.
# #
@ -81,17 +81,17 @@ class Propane
@position == @rule.components.size @position == @rule.components.size
end end
# Get the following symbol for the Item. # Get the next symbol for the Item.
# #
# That is, the symbol which follows the parse position marker in the # That is, the symbol which is after the parse position marker in the
# current Item. # current Item.
# #
# @param offset [Integer] # @param offset [Integer]
# Offset from current parse position to examine. # Offset from current parse position to examine.
# #
# @return [Token, RuleSet, nil] # @return [Token, RuleSet, nil]
# Following symbol for the Item. # Next symbol for the Item.
def following_symbol(offset = 0) def next_symbol(offset = 0)
@rule.components[@position + offset] @rule.components[@position + offset]
end end
@ -108,25 +108,25 @@ class Propane
end end
end end
# Get whether this Item is followed by the provided symbol. # Get whether this Item's next symbol is the given symbol.
# #
# @param symbol [Token, RuleSet] # @param symbol [Token, RuleSet]
# Symbol to query. # Symbol to query.
# #
# @return [Boolean] # @return [Boolean]
# Whether this Item is followed by the provided symbol. # Whether this Item's next symbol is the given symbol.
def followed_by?(symbol) def next_symbol?(symbol)
following_symbol == symbol next_symbol == symbol
end end
# Get the following item for this Item. # Get the next item for this Item.
# #
# That is, the Item formed by moving the parse position marker one place # That is, the Item formed by moving the parse position marker one place
# forward from its position in this Item. # forward from its position in this Item.
# #
# @return [Item] # @return [Item]
# The following item for this Item. # The next item for this Item.
def following_item def next_item
Item.new(@rule, @position + 1) Item.new(@rule, @position + 1)
end end

View File

@ -2,7 +2,7 @@ class Propane
class Parser class Parser
# Represent a parser "item set", which is a set of possible items that the # Represent a parser "item set", which is a set of possible items that the
# parser could currently be parsing. # parser could currently be parsing. This is equivalent to a parser state.
class ItemSet class ItemSet
# @return [Set<Item>] # @return [Set<Item>]
@ -14,15 +14,24 @@ class Propane
attr_accessor :id attr_accessor :id
# @return [Hash] # @return [Hash]
# Maps a following symbol to its ItemSet. # Maps a next symbol to its ItemSet.
attr_reader :following_item_set attr_reader :next_item_set
# @return [Set<ItemSet>] # @return [Set<ItemSet>]
# ItemSets leading to this item set. # ItemSets leading to this item set.
attr_reader :in_sets attr_reader :in_sets
# @return [nil, Rule, Hash] # @return [nil, Rule]
# Reduce actions, mapping lookahead tokens to rules. # Rule to reduce if there is only one possibility.
attr_accessor :reduce_rule
# @return [Set<Rule>]
# Set of rules that could be reduced in this parser state.
attr_accessor :reduce_rules
# @return [nil, Hash]
# Reduce actions, mapping lookahead tokens to rules, if there is
# more than one rule that could be reduced.
attr_accessor :reduce_actions attr_accessor :reduce_actions
# Build an ItemSet. # Build an ItemSet.
@ -31,28 +40,28 @@ class Propane
# Items in this ItemSet. # Items in this ItemSet.
def initialize(items) def initialize(items)
@items = Set.new(items) @items = Set.new(items)
@following_item_set = {} @next_item_set = {}
@in_sets = Set.new @in_sets = Set.new
close! close!
end end
# Get the set of following symbols for all Items in this ItemSet. # Get the set of next symbols for all Items in this ItemSet.
# #
# @return [Set<Token, RuleSet>] # @return [Set<Token, RuleSet>]
# Set of following symbols for all Items in this ItemSet. # Set of next symbols for all Items in this ItemSet.
def following_symbols def next_symbols
Set.new(@items.map(&:following_symbol).compact) @_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
end end
# Build a following ItemSet for the given following symbol. # Build a next ItemSet for the given next symbol.
# #
# @param symbol [Token, RuleSet] # @param symbol [Token, RuleSet]
# Following symbol to build the following ItemSet for. # Next symbol to build the next ItemSet for.
# #
# @return [ItemSet] # @return [ItemSet]
# Following ItemSet for the given following symbol. # Next ItemSet for the given next symbol.
def build_following_item_set(symbol) def build_next_item_set(symbol)
ItemSet.new(items_followed_by(symbol).map(&:following_item)) ItemSet.new(items_with_next(symbol).map(&:next_item))
end end
# Hash function. # Hash function.
@ -87,13 +96,26 @@ class Propane
# Set of ItemSets that lead to this ItemSet. # Set of ItemSets that lead to this ItemSet.
# #
# This set includes this ItemSet.
#
# @return [Set<ItemSet>] # @return [Set<ItemSet>]
# Set of all ItemSets that lead up to this ItemSet. # Set of all ItemSets that lead up to this ItemSet.
def leading_item_sets def leading_item_sets
@in_sets.reduce(Set[self]) do |result, item_set| @_leading_item_sets ||=
result + item_set.leading_item_sets begin
result = Set.new
eval_sets = Set[self]
evaled = Set.new
while eval_sets.size > 0
eval_set = eval_sets.first
eval_sets.delete(eval_set)
evaled << eval_set
eval_set.in_sets.each do |in_set|
result << in_set
unless evaled.include?(in_set)
eval_sets << in_set
end
end
end
result
end end
end end
@ -127,16 +149,16 @@ class Propane
end end
end end
# Get the Items followed by the given following symbol. # Get the Items with the given next symbol.
# #
# @param symbol [Token, RuleSet] # @param symbol [Token, RuleSet]
# Following symbol. # Next symbol.
# #
# @return [Array<Item>] # @return [Array<Item>]
# Items followed by the given following symbol. # Items with the given next symbol.
def items_followed_by(symbol) def items_with_next(symbol)
@items.select do |item| @items.select do |item|
item.followed_by?(symbol) item.next_symbol?(symbol)
end end
end end

View File

@ -134,8 +134,18 @@ class Propane
else else
c = @pattern.slice!(0) c = @pattern.slice!(0)
case c case c
when "a"
CharacterRangeUnit.new("\a", "\a")
when "b"
CharacterRangeUnit.new("\b", "\b")
when "d" when "d"
CharacterRangeUnit.new("0", "9") CharacterRangeUnit.new("0", "9")
when "f"
CharacterRangeUnit.new("\f", "\f")
when "n"
CharacterRangeUnit.new("\n", "\n")
when "r"
CharacterRangeUnit.new("\r", "\r")
when "s" when "s"
ccu = CharacterClassUnit.new ccu = CharacterClassUnit.new
ccu << CharacterRangeUnit.new(" ") ccu << CharacterRangeUnit.new(" ")
@ -145,6 +155,10 @@ class Propane
ccu << CharacterRangeUnit.new("\f") ccu << CharacterRangeUnit.new("\f")
ccu << CharacterRangeUnit.new("\v") ccu << CharacterRangeUnit.new("\v")
ccu ccu
when "t"
CharacterRangeUnit.new("\t", "\t")
when "v"
CharacterRangeUnit.new("\v", "\v")
else else
CharacterRangeUnit.new(c) CharacterRangeUnit.new(c)
end end

View File

@ -6,6 +6,10 @@ class Propane
# Rule components. # Rule components.
attr_reader :components attr_reader :components
# @return [Hash]
# Field aliases.
attr_reader :aliases
# @return [String] # @return [String]
# User code associated with the rule. # User code associated with the rule.
attr_reader :code attr_reader :code
@ -30,6 +34,11 @@ class Propane
# The RuleSet that this Rule is a part of. # The RuleSet that this Rule is a part of.
attr_accessor :rule_set attr_accessor :rule_set
# @return [Array<Integer>]
# Map this rule's components to their positions in the parent RuleSet's
# node field pointer array. This is used for AST construction.
attr_accessor :rule_set_node_field_index_map
# Construct a Rule. # Construct a Rule.
# #
# @param name [String] # @param name [String]
@ -44,7 +53,20 @@ class Propane
# Line number where the rule was defined in the input grammar. # Line number where the rule was defined in the input grammar.
def initialize(name, components, code, ptypename, line_number) def initialize(name, components, code, ptypename, line_number)
@name = name @name = name
@components = components @aliases = {}
@components = components.each_with_index.map do |component, i|
if component =~ /(\S+):(\S+)/
c, aliasname = $1, $2
if @aliases[aliasname]
raise Error.new("Error: duplicate field alias `#{aliasname}` for rule #{name} defined on line #{line_number}")
end
@aliases[aliasname] = i
c
else
component
end
end
@rule_set_node_field_index_map = components.map {0}
@code = code @code = code
@ptypename = ptypename @ptypename = ptypename
@line_number = line_number @line_number = line_number
@ -60,6 +82,14 @@ class Propane
@components.empty? @components.empty?
end end
# Return whether this is an optional Rule.
#
# @return [Boolean]
# Whether this is an optional Rule.
def optional?
@name.end_with?("?")
end
# Represent the Rule as a String. # Represent the Rule as a String.
# #
# @return [String] # @return [String]
@ -68,6 +98,17 @@ class Propane
"#{@name} -> #{@components.map(&:name).join(" ")}" "#{@name} -> #{@components.map(&:name).join(" ")}"
end end
# Check whether the rule set node field index map is just a 1:1 mapping.
#
# @return [Boolean]
# Boolean indicating whether the rule set node field index map is just a
# 1:1 mapping.
def flat_rule_set_node_field_index_map?
@rule_set_node_field_index_map.each_with_index.all? do |v, i|
v == i
end
end
end end
end end

View File

@ -1,7 +1,12 @@
class Propane class Propane
# A RuleSet collects all grammar rules of the same name.
class RuleSet class RuleSet
# @return [Array<Hash>]
# AST fields.
attr_reader :ast_fields
# @return [Integer] # @return [Integer]
# ID of the RuleSet. # ID of the RuleSet.
attr_reader :id attr_reader :id
@ -51,6 +56,24 @@ class Propane
@could_be_empty @could_be_empty
end end
# Return whether this is an optional RuleSet.
#
# @return [Boolean]
# Whether this is an optional RuleSet.
def optional?
@name.end_with?("?")
end
# For optional rule sets, return the underlying component that is optional.
def option_target
@rules.each do |rule|
if rule.components.size > 0
return rule.components[0]
end
end
raise "Optional rule target not found"
end
# Build the start token set for the RuleSet. # Build the start token set for the RuleSet.
# #
# @return [Set<Token>] # @return [Set<Token>]
@ -75,6 +98,72 @@ class Propane
@_start_token_set @_start_token_set
end end
# Finalize a RuleSet after adding all Rules to it.
def finalize(grammar)
if grammar.ast
build_ast_fields(grammar)
end
end
private
# Build the set of AST fields for this RuleSet.
#
# This is an Array of Hashes. Each entry in the Array corresponds to a
# field location in the AST node. The entry is a Hash. It could have one or
# two keys. It will always have the field name with a positional suffix as
# a key. It may also have the field name without the positional suffix if
# that field only exists in one position across all Rules in the RuleSet.
#
# @return [void]
def build_ast_fields(grammar)
field_ast_node_indexes = {}
field_indexes_across_all_rules = {}
@ast_fields = []
@rules.each do |rule|
rule.components.each_with_index do |component, i|
if component.is_a?(RuleSet) && component.optional?
component = component.option_target
end
if component.is_a?(Token)
node_name = "Token"
else
node_name = component.name
end
struct_name = "#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
field_name = "p#{node_name}#{i + 1}"
unless field_ast_node_indexes[field_name]
field_ast_node_indexes[field_name] = @ast_fields.size
@ast_fields << {field_name => struct_name}
end
field_indexes_across_all_rules[node_name] ||= Set.new
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
end
end
field_indexes_across_all_rules.each do |node_name, indexes_across_all_rules|
if indexes_across_all_rules.size == 1
# If this field was only seen in one position across all rules,
# then add an alias to the positional field name that does not
# include the position.
@ast_fields[indexes_across_all_rules.first]["p#{node_name}"] =
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
end
end
# Now merge in the field aliases as given by the user in the
# grammar.
field_aliases = {}
@rules.each do |rule|
rule.aliases.each do |alias_name, index|
if field_aliases[alias_name] && field_aliases[alias_name] != index
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}`")
end
field_aliases[alias_name] = index
@ast_fields[index][alias_name] = @ast_fields[index].first[1]
end
end
end
end end
end end

View File

@ -1,3 +1,3 @@
class Propane class Propane
VERSION = "0.1.0" VERSION = "1.5.1"
end end

View File

@ -1,2 +0,0 @@
#!/bin/sh
exec bundle exec ruby -Ilib bin/propane "$@"

View File

@ -1,5 +1,6 @@
#!/usr/bin/env ruby #!/usr/bin/env ruby
require "erb"
require "fileutils" require "fileutils"
require "digest/md5" require "digest/md5"
@ -13,6 +14,24 @@ START_FILE = "bin/#{PROG_NAME}"
LIB_DIR = "lib" LIB_DIR = "lib"
DIST = "dist" DIST = "dist"
ASSETS_TEMPLATE = <<EOF
class Propane
module Assets
class << self
def get(name)
case name
<% Dir.glob("assets/*").each do |asset_file| %>
when <%= File.basename(asset_file).inspect %>
<%= File.binread(asset_file).inspect %>
<% end %>
end
end
end
end
end
EOF
assets_module = ERB.new(ASSETS_TEMPLATE, trim_mode: "<>").result
files_processed = {} files_processed = {}
combined_file = [] combined_file = []
@ -25,8 +44,12 @@ combine_files = lambda do |file|
if File.exist?(path) if File.exist?(path)
unless files_processed[path] unless files_processed[path]
files_processed[path] = true files_processed[path] = true
if require_name == "propane/assets"
combined_file << assets_module
else
combine_files[path] combine_files[path]
end end
end
else else
raise "require path #{path.inspect} not found" raise "require path #{path.inspect} not found"
end end

183
spec/json_parser.c.propane Normal file
View File

@ -0,0 +1,183 @@
<<header
#include "json_types.h"
#include "testutils.h"
>>
<<
#include "math.h"
#include <stdbool.h>
static str_t string_value;
>>
ptype JSONValue *;
drop /\s+/;
token lbrace /\{/;
token rbrace /\}/;
token lbracket /\[/;
token rbracket /\]/;
token comma /,/;
token colon /:/;
token number /-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?/ <<
double n = 0.0;
bool negative = false;
size_t i = 0u;
if (match[i] == '-')
{
negative = true;
i++;
}
while ('0' <= match[i] && match[i] <= '9')
{
n *= 10.0;
n += (match[i] - '0');
i++;
}
if (match[i] == '.')
{
i++;
double mult = 0.1;
while ('0' <= match[i] && match[i] <= '9')
{
n += mult * (match[i] - '0');
mult /= 10.0;
i++;
}
}
if (match[i] == 'e' || match[i] == 'E')
{
bool exp_negative = false;
i++;
if (match[i] == '-')
{
exp_negative = true;
i++;
}
else if (match[i] == '+')
{
i++;
}
long exp = 0.0;
while ('0' <= match[i] && match[i] <= '9')
{
exp *= 10;
exp += (match[i] - '0');
i++;
}
if (exp_negative)
{
exp = -exp;
}
n = pow(n, exp);
}
if (negative)
{
n = -n;
}
$$ = JSONValue_new(JSON_NUMBER);
$$->number = n;
>>
token true <<
$$ = JSONValue_new(JSON_TRUE);
>>
token false <<
$$ = JSONValue_new(JSON_FALSE);
>>
token null <<
$$ = JSONValue_new(JSON_NULL);
>>
/"/ <<
$mode(string);
str_init(&string_value, "");
>>
string: token string /"/ <<
$$ = JSONValue_new(JSON_STRING);
$$->string = string_value;
$mode(default);
>>
string: /\\"/ <<
str_append(&string_value, "\"");
>>
string: /\\\\/ <<
str_append(&string_value, "\\");
>>
string: /\\\// <<
str_append(&string_value, "/");
>>
string: /\\b/ <<
str_append(&string_value, "\b");
>>
string: /\\f/ <<
str_append(&string_value, "\f");
>>
string: /\\n/ <<
str_append(&string_value, "\n");
>>
string: /\\r/ <<
str_append(&string_value, "\r");
>>
string: /\\t/ <<
str_append(&string_value, "\t");
>>
string: /\\u[0-9a-fA-F]{4}/ <<
/* Not actually going to encode the code point for this example... */
char s[] = {'{', match[2], match[3], match[4], match[5], '}', 0};
str_append(&string_value, s);
>>
string: /[^\\]/ <<
char s[] = {match[0], 0};
str_append(&string_value, s);
>>
Start -> Value <<
$$ = $1;
>>
Value -> string <<
$$ = $1;
>>
Value -> number <<
$$ = $1;
>>
Value -> Object <<
$$ = $1;
>>
Value -> Array <<
$$ = $1;
>>
Value -> true <<
$$ = $1;
>>
Value -> false <<
$$ = $1;
>>
Value -> null <<
$$ = $1;
>>
Object -> lbrace rbrace <<
$$ = JSONObject_new();
>>
Object -> lbrace KeyValues rbrace <<
$$ = $2;
>>
KeyValues -> KeyValue <<
$$ = $1;
>>
KeyValues -> KeyValues comma KeyValue <<
JSONObject_append($1, $3->object.entries[0].name, $3->object.entries[0].value);
$$ = $1;
>>
KeyValue -> string colon Value <<
$$ = JSONObject_new();
JSONObject_append($$, str_cstr(&$1->string), $3);
>>
Array -> lbracket rbracket <<
$$ = JSONArray_new();
>>
Array -> lbracket Values rbracket <<
$$ = $2;
>>
Values -> Value <<
$$ = $1;
>>
Values -> Values comma Value <<
JSONArray_append($1, $3);
$$ = $1;
>>

64
spec/json_types.c Normal file
View File

@ -0,0 +1,64 @@
#include "json_types.h"
#include <string.h>
#include <stdlib.h>
#include "testutils.h"
JSONValue * JSONValue_new(size_t id)
{
JSONValue * jv = calloc(1, sizeof(JSONValue));
jv->id = id;
return jv;
}
JSONValue * JSONObject_new(void)
{
JSONValue * jv = JSONValue_new(JSON_OBJECT);
jv->object.size = 0u;
return jv;
}
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value)
{
size_t const size = object->object.size;
for (size_t i = 0u; i < size; i++)
{
if (strcmp(name, object->object.entries[i].name) == 0)
{
object->object.entries[i].value = value;
return;
}
}
size_t const new_size = size + 1;
void * new_entries = malloc(sizeof(object->object.entries[0]) * new_size);
if (size > 0)
{
memcpy(new_entries, object->object.entries, size * sizeof(object->object.entries[0]));
free(object->object.entries);
}
object->object.entries = new_entries;
object->object.entries[size].name = name;
object->object.entries[size].value = value;
object->object.size = new_size;
}
JSONValue * JSONArray_new(void)
{
JSONValue * jv = JSONValue_new(JSON_ARRAY);
jv->array.size = 0u;
return jv;
}
void JSONArray_append(JSONValue * array, JSONValue * value)
{
size_t const size = array->array.size;
size_t const new_size = size + 1;
JSONValue ** new_entries = malloc(sizeof(JSONValue *) * new_size);
if (array->array.size > 0)
{
memcpy(new_entries, array->array.entries, sizeof(JSONValue *) * size);
free(array->array.entries);
}
array->array.entries = new_entries;
array->array.entries[size] = value;
array->array.size = new_size;
}

46
spec/json_types.h Normal file
View File

@ -0,0 +1,46 @@
#pragma once
#include <stddef.h>
#include "testutils.h"
#define JSON_OBJECT 0u
#define JSON_ARRAY 1u
#define JSON_NUMBER 2u
#define JSON_STRING 3u
#define JSON_TRUE 4u
#define JSON_FALSE 5u
#define JSON_NULL 6u
typedef struct JSONValue_s
{
size_t id;
union
{
struct
{
size_t size;
struct
{
char const * name;
struct JSONValue_s * value;
} * entries;
} object;
struct
{
size_t size;
struct JSONValue_s ** entries;
} array;
double number;
str_t string;
};
} JSONValue;
JSONValue * JSONValue_new(size_t id);
JSONValue * JSONObject_new(void);
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value);
JSONValue * JSONArray_new(void);
void JSONArray_append(JSONValue * array, JSONValue * value);

View File

@ -5,7 +5,6 @@ class Propane
# Comment line # Comment line
module a.b; module a.b;
class Foobar;
ptype XYZ * ; ptype XYZ * ;
token while; token while;
@ -30,7 +29,6 @@ B -> <<
>> >>
EOF EOF
grammar = Grammar.new(input) grammar = Grammar.new(input)
expect(grammar.classname).to eq "Foobar"
expect(grammar.modulename).to eq "a.b" expect(grammar.modulename).to eq "a.b"
expect(grammar.ptype).to eq "XYZ *" expect(grammar.ptype).to eq "XYZ *"
expect(grammar.ptypes).to eq("default" => "XYZ *") expect(grammar.ptypes).to eq("default" => "XYZ *")
@ -38,44 +36,44 @@ EOF
o = grammar.tokens.find {|token| token.name == "while"} o = grammar.tokens.find {|token| token.name == "while"}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.line_number).to eq 7 expect(o.line_number).to eq 6
o = grammar.patterns.find {|pattern| pattern.token == o} o = grammar.patterns.find {|pattern| pattern.token == o}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.pattern).to eq "while" expect(o.pattern).to eq "while"
expect(o.line_number).to eq 7 expect(o.line_number).to eq 6
expect(o.code).to be_nil expect(o.code).to be_nil
o = grammar.tokens.find {|token| token.name == "id"} o = grammar.tokens.find {|token| token.name == "id"}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.line_number).to eq 10 expect(o.line_number).to eq 9
o = grammar.patterns.find {|pattern| pattern.token == o} o = grammar.patterns.find {|pattern| pattern.token == o}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*" expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*"
expect(o.line_number).to eq 10 expect(o.line_number).to eq 9
expect(o.code).to be_nil expect(o.code).to be_nil
o = grammar.tokens.find {|token| token.name == "token_with_code"} o = grammar.tokens.find {|token| token.name == "token_with_code"}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.line_number).to eq 12 expect(o.line_number).to eq 11
o = grammar.patterns.find {|pattern| pattern.token == o} o = grammar.patterns.find {|pattern| pattern.token == o}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.pattern).to eq "token_with_code" expect(o.pattern).to eq "token_with_code"
expect(o.line_number).to eq 12 expect(o.line_number).to eq 11
expect(o.code).to eq "Code for the token\n" expect(o.code).to eq "Code for the token\n"
o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"} o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.line_number).to eq 16 expect(o.line_number).to eq 15
o = grammar.patterns.find {|pattern| pattern.token == o} o = grammar.patterns.find {|pattern| pattern.token == o}
expect(o).to be_nil expect(o).to be_nil
o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"} o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"}
expect(o).to_not be_nil expect(o).to_not be_nil
expect(o.line_number).to eq 18 expect(o.line_number).to eq 17
expect(o.token).to be_nil expect(o.token).to be_nil
expect(o.code).to be_nil expect(o.code).to be_nil
@ -84,19 +82,19 @@ EOF
o = grammar.rules[0] o = grammar.rules[0]
expect(o.name).to eq "A" expect(o.name).to eq "A"
expect(o.components).to eq %w[B] expect(o.components).to eq %w[B]
expect(o.line_number).to eq 20 expect(o.line_number).to eq 19
expect(o.code).to eq " a = 42;\n" expect(o.code).to eq " a = 42;\n"
o = grammar.rules[1] o = grammar.rules[1]
expect(o.name).to eq "B" expect(o.name).to eq "B"
expect(o.components).to eq %w[C while id] expect(o.components).to eq %w[C while id]
expect(o.line_number).to eq 23 expect(o.line_number).to eq 22
expect(o.code).to be_nil expect(o.code).to be_nil
o = grammar.rules[2] o = grammar.rules[2]
expect(o.name).to eq "B" expect(o.name).to eq "B"
expect(o.components).to eq [] expect(o.components).to eq []
expect(o.line_number).to eq 24 expect(o.line_number).to eq 23
expect(o.code).to eq " b = 0;\n" expect(o.code).to eq " b = 0;\n"
end end

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,27 @@
require "bundler/setup" unless ENV["dist_specs"]
require "propane" require "bundler/setup"
require "simplecov"
RSpec.configure do |config| SimpleCov.start do
add_filter "/spec/"
add_filter "/.bundle/"
if ENV["partial_specs"]
command_name "RSpec-partial"
else
command_name "RSpec"
end
project_name "Propane"
merge_timeout 3600
end
RSpec.configure do |config|
# Enable flags like --only-failures and --next-failure # Enable flags like --only-failures and --next-failure
config.example_status_persistence_file_path = ".rspec_status" config.example_status_persistence_file_path = ".rspec_status"
config.expect_with :rspec do |c| config.expect_with :rspec do |c|
c.syntax = :expect c.syntax = :expect
end end
end
end end
require "propane"

55
spec/test_ast.c Normal file
View File

@ -0,0 +1,55 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "a, ((b)), b";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
Start * start = p_result(&context);
assert(start->pItems1 != NULL);
assert(start->pItems != NULL);
Items * items = start->pItems;
assert(items->pItem != NULL);
assert(items->pItem->pToken1 != NULL);
assert_eq(TOKEN_a, items->pItem->pToken1->token);
assert_eq(11, items->pItem->pToken1->pvalue);
assert(items->pItemsMore != NULL);
ItemsMore * itemsmore = items->pItemsMore;
assert(itemsmore->pItem != NULL);
assert(itemsmore->pItem->pItem != NULL);
assert(itemsmore->pItem->pItem->pItem != NULL);
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
assert(itemsmore->pItemsMore != NULL);
itemsmore = itemsmore->pItemsMore;
assert(itemsmore->pItem != NULL);
assert(itemsmore->pItem->pToken1 != NULL);
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
assert(itemsmore->pItemsMore == NULL);
input = "";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start->pItems == NULL);
input = "2 1";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start->pItems != NULL);
assert(start->pItems->pItem != NULL);
assert(start->pItems->pItem->pDual != NULL);
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
assert(start->pItems->pItem->pDual->pOne2 != NULL);
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
assert(start->pItems->pItem->pDual->pOne1 == NULL);
return 0;
}

57
spec/test_ast.d Normal file
View File

@ -0,0 +1,57 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "a, ((b)), b";
p_context_t context;
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
Start * start = p_result(&context);
assert(start.pItems1 !is null);
assert(start.pItems !is null);
Items * items = start.pItems;
assert(items.pItem !is null);
assert(items.pItem.pToken1 !is null);
assert_eq(TOKEN_a, items.pItem.pToken1.token);
assert_eq(11, items.pItem.pToken1.pvalue);
assert(items.pItemsMore !is null);
ItemsMore * itemsmore = items.pItemsMore;
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore !is null);
itemsmore = itemsmore.pItemsMore;
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore is null);
input = "";
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start.pItems is null);
input = "2 1";
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start.pItems !is null);
assert(start.pItems.pItem !is null);
assert(start.pItems.pItem.pDual !is null);
assert(start.pItems.pItem.pDual.pTwo1 !is null);
assert(start.pItems.pItem.pDual.pOne2 !is null);
assert(start.pItems.pItem.pDual.pTwo2 is null);
assert(start.pItems.pItem.pDual.pOne1 is null);
}

View File

@ -0,0 +1,19 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "\na\nb\nc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(TOKEN_a, start->first->pToken->token);
assert_eq(TOKEN_b, start->second->pToken->token);
assert_eq(TOKEN_c, start->third->pToken->token);
return 0;
}

View File

@ -0,0 +1,21 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "\na\nb\nc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(TOKEN_a, start.first.pToken.token);
assert_eq(TOKEN_b, start.second.pToken.token);
assert_eq(TOKEN_c, start.third.pToken.token);
}

View File

@ -0,0 +1,102 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "\na\n bb ccc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(1, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(1, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(2, start->pT1->pA->position.row);
assert_eq(2, start->pT1->pA->position.col);
assert_eq(2, start->pT1->pA->end_position.row);
assert_eq(7, start->pT1->pA->end_position.col);
assert_eq(1, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(7, start->pT1->end_position.col);
assert_eq(1, start->position.row);
assert_eq(0, start->position.col);
assert_eq(2, start->end_position.row);
assert_eq(7, start->end_position.col);
input = "a\nbb";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(1, start->pT1->pA->position.row);
assert_eq(0, start->pT1->pA->position.col);
assert_eq(1, start->pT1->pA->end_position.row);
assert_eq(1, start->pT1->pA->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(1, start->pT1->end_position.row);
assert_eq(1, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(1, start->end_position.row);
assert_eq(1, start->end_position.col);
input = "a\nc\nc";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(1, start->pT1->pA->position.row);
assert_eq(0, start->pT1->pA->position.col);
assert_eq(2, start->pT1->pA->end_position.row);
assert_eq(0, start->pT1->pA->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(2, start->end_position.row);
assert_eq(0, start->end_position.col);
input = "a";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(!p_position_valid(start->pT1->pA->position));
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(0, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(0, start->end_position.row);
assert_eq(0, start->end_position.col);
return 0;
}

View File

@ -0,0 +1,104 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "\na\n bb ccc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(1, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(1, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(2, start.pT1.pA.position.row);
assert_eq(2, start.pT1.pA.position.col);
assert_eq(2, start.pT1.pA.end_position.row);
assert_eq(7, start.pT1.pA.end_position.col);
assert_eq(1, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(7, start.pT1.end_position.col);
assert_eq(1, start.position.row);
assert_eq(0, start.position.col);
assert_eq(2, start.end_position.row);
assert_eq(7, start.end_position.col);
input = "a\nbb";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(1, start.pT1.pA.position.row);
assert_eq(0, start.pT1.pA.position.col);
assert_eq(1, start.pT1.pA.end_position.row);
assert_eq(1, start.pT1.pA.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(1, start.pT1.end_position.row);
assert_eq(1, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(1, start.end_position.row);
assert_eq(1, start.end_position.col);
input = "a\nc\nc";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(1, start.pT1.pA.position.row);
assert_eq(0, start.pT1.pA.position.col);
assert_eq(2, start.pT1.pA.end_position.row);
assert_eq(0, start.pT1.pA.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(2, start.end_position.row);
assert_eq(0, start.end_position.col);
input = "a";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(!start.pT1.pA.position.valid);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(0, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(0, start.end_position.row);
assert_eq(0, start.end_position.col);
}

55
spec/test_ast_ps.c Normal file
View File

@ -0,0 +1,55 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "a, ((b)), b";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
PStartS * start = p_result(&context);
assert(start->pItems1 != NULL);
assert(start->pItems != NULL);
PItemsS * items = start->pItems;
assert(items->pItem != NULL);
assert(items->pItem->pToken1 != NULL);
assert_eq(TOKEN_a, items->pItem->pToken1->token);
assert_eq(11, items->pItem->pToken1->pvalue);
assert(items->pItemsMore != NULL);
PItemsMoreS * itemsmore = items->pItemsMore;
assert(itemsmore->pItem != NULL);
assert(itemsmore->pItem->pItem != NULL);
assert(itemsmore->pItem->pItem->pItem != NULL);
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
assert(itemsmore->pItemsMore != NULL);
itemsmore = itemsmore->pItemsMore;
assert(itemsmore->pItem != NULL);
assert(itemsmore->pItem->pToken1 != NULL);
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
assert(itemsmore->pItemsMore == NULL);
input = "";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start->pItems == NULL);
input = "2 1";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start->pItems != NULL);
assert(start->pItems->pItem != NULL);
assert(start->pItems->pItem->pDual != NULL);
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
assert(start->pItems->pItem->pDual->pOne2 != NULL);
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
assert(start->pItems->pItem->pDual->pOne1 == NULL);
return 0;
}

57
spec/test_ast_ps.d Normal file
View File

@ -0,0 +1,57 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "a, ((b)), b";
p_context_t context;
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
PStartS * start = p_result(&context);
assert(start.pItems1 !is null);
assert(start.pItems !is null);
PItemsS * items = start.pItems;
assert(items.pItem !is null);
assert(items.pItem.pToken1 !is null);
assert_eq(TOKEN_a, items.pItem.pToken1.token);
assert_eq(11, items.pItem.pToken1.pvalue);
assert(items.pItemsMore !is null);
PItemsMoreS * itemsmore = items.pItemsMore;
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore !is null);
itemsmore = itemsmore.pItemsMore;
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore is null);
input = "";
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start.pItems is null);
input = "2 1";
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
start = p_result(&context);
assert(start.pItems !is null);
assert(start.pItems.pItem !is null);
assert(start.pItems.pItem.pDual !is null);
assert(start.pItems.pItem.pDual.pTwo1 !is null);
assert(start.pItems.pItem.pDual.pOne2 !is null);
assert(start.pItems.pItem.pDual.pTwo2 is null);
assert(start.pItems.pItem.pDual.pOne1 is null);
}

View File

@ -0,0 +1,84 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "abbccc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(0, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->pT2->pToken->position.row);
assert_eq(1, start->pT2->pToken->position.col);
assert_eq(0, start->pT2->pToken->end_position.row);
assert_eq(2, start->pT2->pToken->end_position.col);
assert_eq(0, start->pT2->position.row);
assert_eq(1, start->pT2->position.col);
assert_eq(0, start->pT2->end_position.row);
assert_eq(2, start->pT2->end_position.col);
assert_eq(0, start->pT3->pToken->position.row);
assert_eq(3, start->pT3->pToken->position.col);
assert_eq(0, start->pT3->pToken->end_position.row);
assert_eq(5, start->pT3->pToken->end_position.col);
assert_eq(0, start->pT3->position.row);
assert_eq(3, start->pT3->position.col);
assert_eq(0, start->pT3->end_position.row);
assert_eq(5, start->pT3->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(0, start->end_position.row);
assert_eq(5, start->end_position.col);
input = "\n\n bb\nc\ncc\n\n a";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(2, start->pT1->pToken->position.row);
assert_eq(2, start->pT1->pToken->position.col);
assert_eq(2, start->pT1->pToken->end_position.row);
assert_eq(3, start->pT1->pToken->end_position.col);
assert_eq(2, start->pT1->position.row);
assert_eq(2, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(3, start->pT1->end_position.col);
assert_eq(3, start->pT2->pToken->position.row);
assert_eq(0, start->pT2->pToken->position.col);
assert_eq(4, start->pT2->pToken->end_position.row);
assert_eq(1, start->pT2->pToken->end_position.col);
assert_eq(3, start->pT2->position.row);
assert_eq(0, start->pT2->position.col);
assert_eq(4, start->pT2->end_position.row);
assert_eq(1, start->pT2->end_position.col);
assert_eq(6, start->pT3->pToken->position.row);
assert_eq(5, start->pT3->pToken->position.col);
assert_eq(6, start->pT3->pToken->end_position.row);
assert_eq(5, start->pT3->pToken->end_position.col);
assert_eq(6, start->pT3->position.row);
assert_eq(5, start->pT3->position.col);
assert_eq(6, start->pT3->end_position.row);
assert_eq(5, start->pT3->end_position.col);
assert_eq(2, start->position.row);
assert_eq(2, start->position.col);
assert_eq(6, start->end_position.row);
assert_eq(5, start->end_position.col);
return 0;
}

View File

@ -0,0 +1,86 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "abbccc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(0, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.pT2.pToken.position.row);
assert_eq(1, start.pT2.pToken.position.col);
assert_eq(0, start.pT2.pToken.end_position.row);
assert_eq(2, start.pT2.pToken.end_position.col);
assert_eq(0, start.pT2.position.row);
assert_eq(1, start.pT2.position.col);
assert_eq(0, start.pT2.end_position.row);
assert_eq(2, start.pT2.end_position.col);
assert_eq(0, start.pT3.pToken.position.row);
assert_eq(3, start.pT3.pToken.position.col);
assert_eq(0, start.pT3.pToken.end_position.row);
assert_eq(5, start.pT3.pToken.end_position.col);
assert_eq(0, start.pT3.position.row);
assert_eq(3, start.pT3.position.col);
assert_eq(0, start.pT3.end_position.row);
assert_eq(5, start.pT3.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(0, start.end_position.row);
assert_eq(5, start.end_position.col);
input = "\n\n bb\nc\ncc\n\n a";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(2, start.pT1.pToken.position.row);
assert_eq(2, start.pT1.pToken.position.col);
assert_eq(2, start.pT1.pToken.end_position.row);
assert_eq(3, start.pT1.pToken.end_position.col);
assert_eq(2, start.pT1.position.row);
assert_eq(2, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(3, start.pT1.end_position.col);
assert_eq(3, start.pT2.pToken.position.row);
assert_eq(0, start.pT2.pToken.position.col);
assert_eq(4, start.pT2.pToken.end_position.row);
assert_eq(1, start.pT2.pToken.end_position.col);
assert_eq(3, start.pT2.position.row);
assert_eq(0, start.pT2.position.col);
assert_eq(4, start.pT2.end_position.row);
assert_eq(1, start.pT2.end_position.col);
assert_eq(6, start.pT3.pToken.position.row);
assert_eq(5, start.pT3.pToken.position.col);
assert_eq(6, start.pT3.pToken.end_position.row);
assert_eq(5, start.pT3.pToken.end_position.col);
assert_eq(6, start.pT3.position.row);
assert_eq(5, start.pT3.position.col);
assert_eq(6, start.pT3.end_position.row);
assert_eq(5, start.pT3.end_position.col);
assert_eq(2, start.position.row);
assert_eq(2, start.position.col);
assert_eq(6, start.end_position.row);
assert_eq(5, start.end_position.col);
}

View File

@ -14,14 +14,14 @@ int main()
assert(p_parse(&context) == P_UNEXPECTED_TOKEN); assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
assert(p_position(&context).row == 2); assert(p_position(&context).row == 2);
assert(p_position(&context).col == 3); assert(p_position(&context).col == 3);
assert(context.token == TOKEN_a); assert(p_token(&context) == TOKEN_a);
input = "12"; input = "12";
p_context_init(&context, (uint8_t const *)input, strlen(input)); p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_UNEXPECTED_TOKEN); assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
assert(p_position(&context).row == 0); assert(p_position(&context).row == 0);
assert(p_position(&context).col == 0); assert(p_position(&context).col == 0);
assert(context.token == TOKEN_num); assert(p_token(&context) == TOKEN_num);
input = "a 12\n\nab"; input = "a 12\n\nab";
p_context_init(&context, (uint8_t const *)input, strlen(input)); p_context_init(&context, (uint8_t const *)input, strlen(input));
@ -35,5 +35,8 @@ int main()
assert(p_position(&context).row == 5); assert(p_position(&context).row == 5);
assert(p_position(&context).col == 4); assert(p_position(&context).col == 4);
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
return 0; return 0;
} }

View File

@ -17,13 +17,13 @@ unittest
p_context_init(&context, input); p_context_init(&context, input);
assert(p_parse(&context) == P_UNEXPECTED_TOKEN); assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
assert(p_position(&context) == p_position_t(2, 3)); assert(p_position(&context) == p_position_t(2, 3));
assert(context.token == TOKEN_a); assert(p_token(&context) == TOKEN_a);
input = "12"; input = "12";
p_context_init(&context, input); p_context_init(&context, input);
assert(p_parse(&context) == P_UNEXPECTED_TOKEN); assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
assert(p_position(&context) == p_position_t(0, 0)); assert(p_position(&context) == p_position_t(0, 0));
assert(context.token == TOKEN_num); assert(p_token(&context) == TOKEN_num);
input = "a 12\n\nab"; input = "a 12\n\nab";
p_context_init(&context, input); p_context_init(&context, input);
@ -34,4 +34,7 @@ unittest
p_context_init(&context, input); p_context_init(&context, input);
assert(p_parse(&context) == P_DECODE_ERROR); assert(p_parse(&context) == P_DECODE_ERROR);
assert(p_position(&context) == p_position_t(5, 4)); assert(p_position(&context) == p_position_t(5, 4));
assert(p_token_names[TOKEN_a] == "a");
assert(p_token_names[TOKEN_num] == "num");
} }

13
spec/test_field_aliases.c Normal file
View File

@ -0,0 +1,13 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "foo1\nbar2";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
return 0;
}

15
spec/test_field_aliases.d Normal file
View File

@ -0,0 +1,15 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "foo1\nbar2";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
}

View File

@ -43,41 +43,57 @@ int main()
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u); assert(token_info.position.row == 0u);
assert(token_info.position.col == 0u); assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 0u);
assert(token_info.length == 1u); assert(token_info.length == 1u);
assert(token_info.token == TOKEN_int); assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u); assert(token_info.position.row == 0u);
assert(token_info.position.col == 2u); assert(token_info.position.col == 2u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 2u);
assert(token_info.length == 1u); assert(token_info.length == 1u);
assert(token_info.token == TOKEN_plus); assert(token_info.token == TOKEN_plus);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u); assert(token_info.position.row == 0u);
assert(token_info.position.col == 4u); assert(token_info.position.col == 4u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 4u);
assert(token_info.length == 1u); assert(token_info.length == 1u);
assert(token_info.token == TOKEN_int); assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u); assert(token_info.position.row == 0u);
assert(token_info.position.col == 6u); assert(token_info.position.col == 6u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 6u);
assert(token_info.length == 1u); assert(token_info.length == 1u);
assert(token_info.token == TOKEN_times); assert(token_info.token == TOKEN_times);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u); assert(token_info.position.row == 1u);
assert(token_info.position.col == 0u); assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 2u);
assert(token_info.length == 3u); assert(token_info.length == 3u);
assert(token_info.token == TOKEN_int); assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u); assert(token_info.position.row == 1u);
assert(token_info.position.col == 4u); assert(token_info.position.col == 4u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 4u);
assert(token_info.length == 1u); assert(token_info.length == 1u);
assert(token_info.token == TOKEN_plus); assert(token_info.token == TOKEN_plus);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u); assert(token_info.position.row == 1u);
assert(token_info.position.col == 6u); assert(token_info.position.col == 6u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 8u);
assert(token_info.length == 3u); assert(token_info.length == 3u);
assert(token_info.token == TOKEN_int); assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u); assert(token_info.position.row == 1u);
assert(token_info.position.col == 9u); assert(token_info.position.col == 9u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 9u);
assert(token_info.length == 0u); assert(token_info.length == 0u);
assert(token_info.token == TOKEN___EOF); assert(token_info.token == TOKEN___EOF);
@ -85,6 +101,8 @@ int main()
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u); assert(token_info.position.row == 0u);
assert(token_info.position.col == 0u); assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 0u);
assert(token_info.length == 0u); assert(token_info.length == 0u);
assert(token_info.token == TOKEN___EOF); assert(token_info.token == TOKEN___EOF);

View File

@ -47,23 +47,23 @@ unittest
p_context_t context; p_context_t context;
p_context_init(&context, input); p_context_init(&context, input);
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 0), 1, TOKEN_int)); assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 1, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 2), 1, TOKEN_plus)); assert(token_info == p_token_info_t(p_position_t(0, 2), p_position_t(0, 2), 1, TOKEN_plus));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 4), 1, TOKEN_int)); assert(token_info == p_token_info_t(p_position_t(0, 4), p_position_t(0, 4), 1, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 6), 1, TOKEN_times)); assert(token_info == p_token_info_t(p_position_t(0, 6), p_position_t(0, 6), 1, TOKEN_times));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 0), 3, TOKEN_int)); assert(token_info == p_token_info_t(p_position_t(1, 0), p_position_t(1, 2), 3, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 4), 1, TOKEN_plus)); assert(token_info == p_token_info_t(p_position_t(1, 4), p_position_t(1, 4), 1, TOKEN_plus));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 6), 3, TOKEN_int)); assert(token_info == p_token_info_t(p_position_t(1, 6), p_position_t(1, 8), 3, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 9), 0, TOKEN___EOF)); assert(token_info == p_token_info_t(p_position_t(1, 9), p_position_t(1, 9), 0, TOKEN___EOF));
p_context_init(&context, ""); p_context_init(&context, "");
assert(p_lex(&context, &token_info) == P_SUCCESS); assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 0), 0, TOKEN___EOF)); assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 0, TOKEN___EOF));
} }

View File

@ -0,0 +1,13 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
int main()
{
char const * input = "\a\b\t\n\v\f\rt";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
return 0;
}

View File

@ -0,0 +1,15 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "\a\b\t\n\v\f\rt";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
}

View File

@ -0,0 +1,22 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
int main()
{
char const * input = "b";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
input = "abcd";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
input = "abdc";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
return 0;
}

View File

@ -0,0 +1,23 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "b";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
input = "abcd";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
input = "abdc";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
}

View File

@ -0,0 +1,42 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "b";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert(start->pToken1 == NULL);
assert(start->pToken2 != NULL);
assert_eq(TOKEN_b, start->pToken2->token);
assert(start->pR3 == NULL);
assert(start->pR == NULL);
input = "abcd";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert(start->pToken1 != NULL);
assert_eq(TOKEN_a, start->pToken1->token);
assert(start->pToken2 != NULL);
assert(start->pR3 != NULL);
assert(start->pR != NULL);
assert(start->pR == start->pR3);
assert_eq(TOKEN_c, start->pR->pToken1->token);
input = "bdc";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert(start->pToken1 == NULL);
assert(start->pToken2 != NULL);
assert(start->pR != NULL);
assert_eq(TOKEN_d, start->pR->pToken1->token);
return 0;
}

View File

@ -0,0 +1,43 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "b";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert(start.pToken1 is null);
assert(start.pToken2 !is null);
assert_eq(TOKEN_b, start.pToken2.token);
assert(start.pR3 is null);
assert(start.pR is null);
input = "abcd";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert(start.pToken1 != null);
assert_eq(TOKEN_a, start.pToken1.token);
assert(start.pToken2 != null);
assert(start.pR3 != null);
assert(start.pR != null);
assert(start.pR == start.pR3);
assert_eq(TOKEN_c, start.pR.pToken1.token);
input = "bdc";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert(start.pToken1 is null);
assert(start.pToken2 !is null);
assert(start.pR !is null);
assert_eq(TOKEN_d, start.pR.pToken1.token);
}

56
spec/test_parsing_json.c Normal file
View File

@ -0,0 +1,56 @@
#include "testparser.h"
#include "json_types.h"
#include <string.h>
#include <assert.h>
int main()
{
char const * input = "";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
input = "{}";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
assert(p_result(&context)->id == JSON_OBJECT);
input = "[]";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
assert(p_result(&context)->id == JSON_ARRAY);
input = "-45.6";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
assert(p_result(&context)->id == JSON_NUMBER);
assert(p_result(&context)->number == -45.6);
input = "2E-2";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
assert(p_result(&context)->id == JSON_NUMBER);
assert(p_result(&context)->number == 0.02);
input = "{\"hi\":true}";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
JSONValue * o = p_result(&context);
assert(o->id == JSON_OBJECT);
assert_eq(1, o->object.size);
assert(strcmp(o->object.entries[0].name, "hi") == 0);
assert(o->object.entries[0].value->id == JSON_TRUE);
input = "{\"ff\": false, \"nn\": null}";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
o = p_result(&context);
assert(o->id == JSON_OBJECT);
assert_eq(2, o->object.size);
assert(strcmp(o->object.entries[0].name, "ff") == 0);
assert(o->object.entries[0].value->id == JSON_FALSE);
assert(strcmp(o->object.entries[1].name, "nn") == 0);
assert(o->object.entries[1].value->id == JSON_NULL);
return 0;
}

9
spec/test_start_rule.c Normal file
View File

@ -0,0 +1,9 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
return 0;
}

8
spec/test_start_rule.d Normal file
View File

@ -0,0 +1,8 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}

View File

@ -0,0 +1,17 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "hi";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert_eq(P_SUCCESS, p_parse(&context));
Top * top = p_result(&context);
assert(top->pToken != NULL);
assert_eq(TOKEN_hi, top->pToken->token);
return 0;
}

View File

@ -0,0 +1,19 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "hi";
p_context_t context;
p_context_init(&context, input);
assert_eq(P_SUCCESS, p_parse(&context));
Top * top = p_result(&context);
assert(top.pToken !is null);
assert_eq(TOKEN_hi, top.pToken.token);
}

View File

@ -0,0 +1,19 @@
#include "testparser.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
int main()
{
char const * input = "aacc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
input = "abc";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_USER_TERMINATED);
assert(p_user_terminate_code(&context) == 4200);
return 0;
}

View File

@ -0,0 +1,20 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "aacc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
input = "abc";
p_context_init(&context, input);
assert(p_parse(&context) == P_USER_TERMINATED);
assert(p_user_terminate_code(&context) == 4200);
}

View File

@ -0,0 +1,19 @@
#include "testparser.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
int main()
{
char const * input = "a";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
input = "b";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_USER_TERMINATED);
assert(p_user_terminate_code(&context) == 8675309);
return 0;
}

View File

@ -0,0 +1,20 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "a";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
input = "b";
p_context_init(&context, input);
assert(p_parse(&context) == P_USER_TERMINATED);
assert(p_user_terminate_code(&context) == 8675309);
}

View File

@ -1,6 +1,9 @@
#include <stdio.h> #include <stdio.h>
#include <assert.h> #include <assert.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "testutils.h"
void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line) void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line)
{ {
@ -10,3 +13,26 @@ void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_
assert(false); assert(false);
} }
} }
void str_init(str_t * str, char const * cs)
{
size_t length = strlen(cs);
str->cs = malloc(length + 1u);
strcpy(str->cs, cs);
}
void str_append(str_t * str, char const * cs)
{
size_t length = strlen(str->cs);
size_t length2 = strlen(cs);
char * new_cs = malloc(length + length2 + 1u);
memcpy(new_cs, str->cs, length);
strcpy(&new_cs[length], cs);
free(str->cs);
str->cs = new_cs;
}
void str_free(str_t * str)
{
free(str->cs);
}

View File

@ -5,3 +5,15 @@ void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_
#define assert_eq(expected, actual) \ #define assert_eq(expected, actual) \
assert_eq_size_t_i(expected, actual, __FILE__, __LINE__) assert_eq_size_t_i(expected, actual, __FILE__, __LINE__)
typedef struct
{
char * cs;
} str_t;
void str_init(str_t * str, char const * cs);
void str_append(str_t * str, char const * cs);
void str_free(str_t * str);
static inline char * str_cstr(str_t * str)
{
return str->cs;
}