Compare commits

..

No commits in common. "master" and "v1.4.0" have entirely different histories.

29 changed files with 193 additions and 1166 deletions

View File

@ -1,22 +1,3 @@
## v1.5.1
### Improvements
- Improve performance (#28)
## v1.5.0
### New Features
- Track start and end text positions for tokens and rules in AST node structures (#27)
- Add warnings for shift/reduce conflicts to log file (#25)
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
- Add rule field aliases (#24)
### Improvements
- Show line numbers of rules on conflict (#23)
## v1.4.0
### New Features

View File

@ -31,14 +31,9 @@ Propane is typically invoked from the command-line as `./propane`.
Usage: ./propane [options] <input-file> <output-file>
Options:
-h, --help Show this usage and exit.
--log LOG Write log file. This will show all parser states and their
associated shifts and reduces. It can be helpful when
debugging a grammar.
--version Show program version and exit.
-w Treat warnings as errors. This option will treat shift/reduce
conflicts as fatal errors and will print them to stderr in
addition to the log file.
--log LOG Write log file
--version Show program version and exit
-h, --help Show this usage and exit
The user must specify the path to a Propane input grammar file and a path to an
output file.

View File

@ -226,10 +226,7 @@ typedef struct
/** Number of bytes of input text used to match. */
size_t length;
/** Input text position delta to end of token. */
<%= @grammar.prefix %>position_t end_delta_position;
/** Input text position delta to next code point after token end. */
/** Input text position delta. */
<%= @grammar.prefix %>position_t delta_position;
/** Accepting lexer state from the match. */
@ -361,7 +358,6 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
if (transition_state != INVALID_LEXER_STATE_ID)
{
attempt_match.length += code_point_length;
attempt_match.end_delta_position = attempt_match.delta_position;
if (code_point == '\n')
{
attempt_match.delta_position.row++;
@ -448,6 +444,7 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
<%= @grammar.prefix %>token_info_t token_info = {0};
token_info.position = context->text_position;
token_info.token = INVALID_TOKEN_ID;
*out_token_info = token_info; // TODO: remove
lexer_match_info_t match_info;
size_t unexpected_input_length;
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
@ -494,22 +491,11 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
}
token_info.token = token_to_accept;
token_info.length = match_info.length;
if (match_info.end_delta_position.row != 0u)
{
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
token_info.end_position.col = match_info.end_delta_position.col;
}
else
{
token_info.end_position.row = token_info.position.row;
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
}
*out_token_info = token_info;
return P_SUCCESS;
case P_EOF:
token_info.token = TOKEN___EOF;
token_info.end_position = token_info.position;
*out_token_info = token_info;
return P_SUCCESS;
@ -566,9 +552,6 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
* Parser
*************************************************************************/
/** Invalid position value. */
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0xFFFFFFFFu, 0xFFFFFFFFu}
/** Reduce ID type. */
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
@ -684,18 +667,10 @@ typedef struct
<% end %>
} state_value_t;
/** Common AST node structure. */
typedef struct
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
void * fields[];
} ASTNode;
/** Parser shift table. */
static const shift_t parser_shift_table[] = {
<% @parser.shift_table.each do |shift| %>
{<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u},
{<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u},
<% end %>
};
@ -975,8 +950,6 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
/* We shifted a token, mark it consumed. */
<% if @grammar.ast %>
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = malloc(sizeof(<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>));
token_ast_node->position = token_info.position;
token_ast_node->end_position = token_info.end_position;
token_ast_node->token = token;
token_ast_node->pvalue = token_info.pvalue;
state_values_stack_index(&statevalues, -1)->ast_node = token_ast_node;
@ -1011,43 +984,22 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
}
else if (parser_reduce_table[reduce_index].n_states > 0)
{
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
ASTNode * node = (ASTNode *)malloc(sizeof(ASTNode) + n_fields * sizeof(void *));
node->position = INVALID_POSITION;
node->end_position = INVALID_POSITION;
for (size_t i = 0; i < n_fields; i++)
{
node->fields[i] = NULL;
}
void ** node_fields = calloc(parser_reduce_table[reduce_index].rule_set_node_field_array_size, sizeof(void *));
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map == NULL)
{
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
{
node->fields[i] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
node_fields[i] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
}
}
else
{
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
{
node->fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
node_fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
}
}
bool position_found = false;
for (size_t i = 0; i < n_fields; i++)
{
ASTNode * child = (ASTNode *)node->fields[i];
if ((child != NULL) && <%= @grammar.prefix %>position_valid(child->position))
{
if (!position_found)
{
node->position = child->position;
position_found = true;
}
node->end_position = child->end_position;
}
}
reduced_parser_node = node;
reduced_parser_node = node_fields;
}
else
{

View File

@ -8,8 +8,6 @@
module <%= @grammar.modulename %>;
<% end %>
import core.stdc.stdlib : malloc;
/**************************************************************************
* User code blocks
*************************************************************************/
@ -51,29 +49,6 @@ public enum : <%= @grammar.prefix %>token_t
/** Code point type. */
public alias <%= @grammar.prefix %>code_point_t = uint;
/**
* A structure to keep track of input position.
*
* This is useful for reporting errors, etc...
*/
public struct <%= @grammar.prefix %>position_t
{
/** Input text row (0-based). */
uint row;
/** Input text column (0-based). */
uint col;
/** Invalid position value. */
enum INVALID = <%= @grammar.prefix %>position_t(0xFFFF_FFFF, 0xFFFF_FFFF);
/** Return whether the position is valid. */
public @property bool valid()
{
return row != 0xFFFF_FFFFu;
}
}
<% if @grammar.ast %>
/** Parser values type. */
public alias <%= @grammar.prefix %>value_t = <%= @grammar.ptype %>;
@ -88,20 +63,9 @@ public union <%= @grammar.prefix %>value_t
<% end %>
<% if @grammar.ast %>
/** Common AST node structure. */
private struct ASTNode
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
void *[0] fields;
}
/** AST node types. @{ */
public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
{
/* ASTNode fields must be present in the same order here. */
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<%= @grammar.prefix %>token_t token;
<%= @grammar.prefix %>value_t pvalue;
}
@ -111,8 +75,6 @@ public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
<% next if rule_set.optional? %>
public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<% rule_set.ast_fields.each do |fields| %>
union
{
@ -127,15 +89,26 @@ public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
/** @} */
<% end %>
/**
* A structure to keep track of parser position.
*
* This is useful for reporting errors, etc...
*/
public struct <%= @grammar.prefix %>position_t
{
/** Input text row (0-based). */
uint row;
/** Input text column (0-based). */
uint col;
}
/** Lexed token information. */
public struct <%= @grammar.prefix %>token_info_t
{
/** Text position of first code point in token. */
/** Text position where the token was found. */
<%= @grammar.prefix %>position_t position;
/** Text position of last code point in token. */
<%= @grammar.prefix %>position_t end_position;
/** Number of input bytes used by the token. */
size_t length;
@ -399,10 +372,7 @@ private struct lexer_match_info_t
/** Number of bytes of input text used to match. */
size_t length;
/** Input text position delta to end of token. */
<%= @grammar.prefix %>position_t end_delta_position;
/** Input text position delta to next code point after token end. */
/** Input text position delta. */
<%= @grammar.prefix %>position_t delta_position;
/** Accepting lexer state from the match. */
@ -530,7 +500,6 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
if (transition_state != INVALID_LEXER_STATE_ID)
{
attempt_match.length += code_point_length;
attempt_match.end_delta_position = attempt_match.delta_position;
if (code_point == '\n')
{
attempt_match.delta_position.row++;
@ -617,6 +586,7 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
<%= @grammar.prefix %>token_info_t token_info;
token_info.position = context.text_position;
token_info.token = INVALID_TOKEN_ID;
*out_token_info = token_info; // TODO: remove
lexer_match_info_t match_info;
size_t unexpected_input_length;
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
@ -663,22 +633,11 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
}
token_info.token = token_to_accept;
token_info.length = match_info.length;
if (match_info.end_delta_position.row != 0u)
{
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
token_info.end_position.col = match_info.end_delta_position.col;
}
else
{
token_info.end_position.row = token_info.position.row;
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
}
*out_token_info = token_info;
return P_SUCCESS;
case P_EOF:
token_info.token = TOKEN___EOF;
token_info.end_position = token_info.position;
*out_token_info = token_info;
return P_SUCCESS;
@ -858,7 +817,7 @@ private struct state_value_t
/** Parser shift table. */
private immutable shift_t[] parser_shift_table = [
<% @parser.shift_table.each do |shift| %>
shift_t(<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u),
shift_t(<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u),
<% end %>
];
@ -1038,7 +997,7 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
{
/* We shifted a token, mark it consumed. */
<% if @grammar.ast %>
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token_info.position, token_info.end_position, token, token_info.pvalue);
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token, token_info.pvalue);
statevalues[$-1].ast_node = token_ast_node;
<% else %>
statevalues[$-1].pvalue = token_info.pvalue;
@ -1071,43 +1030,26 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
}
else if (parser_reduce_table[reduce_index].n_states > 0)
{
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
ASTNode * node = cast(ASTNode *)malloc(ASTNode.sizeof + n_fields * (void *).sizeof);
node.position = <%= @grammar.prefix %>position_t.INVALID;
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
foreach (i; 0..n_fields)
void *[] node_fields = new void *[parser_reduce_table[reduce_index].rule_set_node_field_array_size];
foreach (i; 0..parser_reduce_table[reduce_index].rule_set_node_field_array_size)
{
node.fields[i] = null;
node_fields[i] = null;
}
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map is null)
{
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
{
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
node_fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
}
}
else
{
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
{
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
node_fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
}
}
bool position_found = false;
foreach (i; 0..n_fields)
{
ASTNode * child = cast(ASTNode *)node.fields[i];
if (child && child.position.valid)
{
if (!position_found)
{
node.position = child.position;
position_found = true;
}
node.end_position = child.end_position;
}
}
reduced_parser_node = node;
reduced_parser_node = node_fields.ptr;
}
else
{

View File

@ -38,23 +38,6 @@ typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>t
/** Code point type. */
typedef uint32_t <%= @grammar.prefix %>code_point_t;
/**
* A structure to keep track of input position.
*
* This is useful for reporting errors, etc...
*/
typedef struct
{
/** Input text row (0-based). */
uint32_t row;
/** Input text column (0-based). */
uint32_t col;
} <%= @grammar.prefix %>position_t;
/** Return whether the position is valid. */
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0xFFFFFFFFu)
/** User header code blocks. */
<%= @grammar.code_blocks.fetch("header", "") %>
@ -75,9 +58,6 @@ typedef union
/** AST node types. @{ */
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
{
/* ASTNode fields must be present in the same order here. */
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<%= @grammar.prefix %>token_t token;
<%= @grammar.prefix %>value_t pvalue;
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
@ -93,8 +73,6 @@ struct <%= name %>;
<% next if rule_set.optional? %>
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
{
<%= @grammar.prefix %>position_t position;
<%= @grammar.prefix %>position_t end_position;
<% rule_set.ast_fields.each do |fields| %>
union
{
@ -109,15 +87,26 @@ typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
/** @} */
<% end %>
/**
* A structure to keep track of parser position.
*
* This is useful for reporting errors, etc...
*/
typedef struct
{
/** Input text row (0-based). */
uint32_t row;
/** Input text column (0-based). */
uint32_t col;
} <%= @grammar.prefix %>position_t;
/** Lexed token information. */
typedef struct
{
/** Text position of first code point in token. */
/** Text position where the token was found. */
<%= @grammar.prefix %>position_t position;
/** Text position of last code point in token. */
<%= @grammar.prefix %>position_t end_position;
/** Number of input bytes used by the token. */
size_t length;

View File

@ -15,7 +15,6 @@ Propane is a LALR Parser Generator (LPG) which:
* generates a table-driven shift/reduce parser to parse input in linear time
* targets C or D language outputs
* optionally supports automatic full AST generation
* tracks input text start and end positions for all matched tokens/rules
* is MIT-licensed
* is distributable as a standalone Ruby script
@ -36,14 +35,9 @@ Propane is typically invoked from the command-line as `./propane`.
Usage: ./propane [options] <input-file> <output-file>
Options:
-h, --help Show this usage and exit.
--log LOG Write log file. This will show all parser states and their
associated shifts and reduces. It can be helpful when
debugging a grammar.
--version Show program version and exit.
-w Treat warnings as errors. This option will treat shift/reduce
conflicts as fatal errors and will print them to stderr in
addition to the log file.
--log LOG Write log file
--version Show program version and exit
-h, --help Show this usage and exit
The user must specify the path to a Propane input grammar file and a path to an
output file.
@ -234,15 +228,15 @@ drop /\\s+/;
Start -> Items;
Items -> Item:item ItemsMore;
Items -> Item ItemsMore;
Items -> ;
ItemsMore -> comma Item:item ItemsMore;
ItemsMore -> comma Item ItemsMore;
ItemsMore -> ;
Item -> a;
Item -> b;
Item -> lparen Item:item rparen;
Item -> lparen Item rparen;
Item -> Dual;
Dual -> One Two;
@ -263,24 +257,24 @@ Start * start = p_result(&context);
assert(start.pItems1 !is null);
assert(start.pItems !is null);
Items * items = start.pItems;
assert(items.item !is null);
assert(items.item.pToken1 !is null);
assert_eq(TOKEN_a, items.item.pToken1.token);
assert_eq(11, items.item.pToken1.pvalue);
assert(items.pItem !is null);
assert(items.pItem.pToken1 !is null);
assert_eq(TOKEN_a, items.pItem.pToken1.token);
assert_eq(11, items.pItem.pToken1.pvalue);
assert(items.pItemsMore !is null);
ItemsMore * itemsmore = items.pItemsMore;
assert(itemsmore.item !is null);
assert(itemsmore.item.item !is null);
assert(itemsmore.item.item.item !is null);
assert(itemsmore.item.item.item.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.item.item.item.pToken1.token);
assert_eq(22, itemsmore.item.item.item.pToken1.pvalue);
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem !is null);
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore !is null);
itemsmore = itemsmore.pItemsMore;
assert(itemsmore.item !is null);
assert(itemsmore.item.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.item.pToken1.token);
assert_eq(22, itemsmore.item.pToken1.pvalue);
assert(itemsmore.pItem !is null);
assert(itemsmore.pItem.pToken1 !is null);
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
assert(itemsmore.pItemsMore is null);
```
@ -508,7 +502,7 @@ tokenid str;
mystringvalue = "";
$mode(string);
>>
string: /[^"]+/ << mystringvalue ~= match; >>
string: /[^"]+/ << mystringvalue += match; >>
string: /"/ <<
$mode(default);
return $token(str);
@ -607,10 +601,6 @@ This can be changed with the `start` statement.
The grammar file must define a rule with the name of the start rule name which
will be used as the top-level starting rule that the parser attempts to reduce.
Rule statements are composed of the name of the rule, a `->` token, the fields
defining the rule pattern that must be matched, and a terminating semicolon or
user code block.
Example:
```
@ -639,13 +629,9 @@ E4 -> lparen E1 rparen << $$ = $2; >>
This example uses the default start rule name of `Start`.
A parser rule has zero or more fields on the right side of its definition.
Each of these fields is either a token name or a rule name.
A field can optionally be followed by a `:` and then a field alias name.
If present, the field alias name is used to refer to the field value in user
code blocks, or if AST mode is active, the field alias name is used as the
field name in the generated AST node structure.
A field can be immediately followed by a `?` character to signify that it is
A parser rule has zero or more terms on the right side of its definition.
Each of these terms is either a token name or a rule name.
A term can be immediately followed by a `?` character to signify that it is
optional.
Another example:
@ -655,16 +641,14 @@ token private;
token int;
token ident /[a-zA-Z_][a-zA-Z_0-9]*/;
token semicolon /;/;
IntegerDeclaration -> Visibility? int ident:name semicolon;
IntegerDeclaration -> Visibility? int ident semicolon;
Visibility -> public;
Visibility -> private;
```
In a parser rule code block, parser values for the right side fields are
accessible as `$1` for the first field's parser value, `$2` for the second
field's parser value, etc...
For the `IntegerDeclaration` rule, the third field value can also be referred
to as `${name}`.
In a parser rule code block, parser values for the right side terms are
accessible as `$1` for the first term's parser value, `$2` for the second
term's parser value, etc...
The `$$` symbol accesses the output parser value for this rule.
The above examples demonstrate how the parser values for the rule components
can be used to produce the parser value for the accepted rule.
@ -778,13 +762,6 @@ A pointer to this instance is passed to the generated functions.
The `p_position_t` structure contains two fields `row` and `col`.
These fields contain the 0-based row and column describing a parser position.
For D targets, the `p_position_t` structure can be checked for validity by
querying the `valid` property.
For C targets, the `p_position_t` structure can be checked for validity by
calling `p_position_valid(pos)` where `pos` is a `p_position_t` structure
instance.
### AST Node Types
If AST generation mode is enabled, a structure type for each rule will be
@ -795,26 +772,13 @@ AST node which refers to a raw parser token rather than a composite rule.
#### AST Node Fields
All AST nodes have a `position` field specifying the text position of the
beginning of the matched token or rule, and an `end_position` field specifying
the text position of the end of the matched token or rule.
Each of these fields are instances of the `p_position_t` structure.
A `Token` node will always have a valid `position` and `end_position`.
A rule node may not have valid positions if the rule allows for an empty match.
In this case the `position` structure should be checked for validity before
using it.
For C targets this can be accomplished with
`if (p_position_valid(node->position))` and for D targets this can be
accomplished with `if (node.position.valid)`.
A `Token` node has the following additional fields:
A `Token` node has two fields:
* `token` which specifies which token was parsed (one of `TOKEN_*`)
* `pvalue` which specifies the parser value for the token. If a lexer user
code block assigned to `$$`, the assigned value will be stored here.
AST node structures for rules contain generated fields based on the
The other generated AST node structures have fields generated based on the
right hand side components specified for all rules of a given name.
In this example:
@ -838,7 +802,7 @@ The `Items` structure will have fields:
If a rule can be empty (for example in the second `Items` rule above), then
an instance of a pointer to that rule's generated AST node will be null if the
parser matches the empty rule pattern.
parser matches the empty rule definition.
The non-positional AST node field pointer will not be generated if there are
multiple positions in which an instance of the node it points to could be
@ -859,19 +823,6 @@ If the first rule is matched, then `pOne1` and `pTwo2` will be non-null while
`pTwo1` and `pOne2` will be null.
If the second rule is matched instead, then the opposite would be the case.
If a field alias is present in a rule definition, an additional field will be
generated in the AST node with the field alias name.
For example:
```
Exp -> Exp:left plus ExpB:right;
```
In the generated `Exp` structure, the fields `pExp`, `pExp1`, and `left` will
all point to the same child node (an instance of the `Exp` structure), and the
fields `pExpB`, `pExpB3`, and `right` will all point to the same child node
(an instance of the `ExpB` structure).
##> Functions
### `p_context_init`
@ -908,24 +859,6 @@ p_context_init(&context, input, input_length);
size_t result = p_parse(&context);
```
### `p_position_valid`
The `p_position_valid()` function is only generated for C targets.
it is used to determine whether or not a `p_position_t` structure is valid.
Example:
```
if (p_position_valid(node->position))
{
....
}
```
For D targets, rather than using `p_position_valid()`, the `valid` property
function of the `p_position_t` structure can be queried
(e.g. `if (node.position.valid)`).
### `p_result`
The `p_result()` function can be used to retrieve the final parse value after

View File

@ -1 +0,0 @@
au BufNewFile,BufRead *.propane set filetype=propane

View File

@ -1,33 +0,0 @@
" Vim syntax file for Propane
" Language: propane
" Maintainer: Josh Holtrop
" URL: https://github.com/holtrop/propane
if exists("b:current_syntax")
finish
endif
if !exists("b:propane_subtype")
let b:propane_subtype = "d"
endif
exe "syn include @propaneTarget syntax/".b:propane_subtype.".vim"
syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contains=@propaneTarget keepend
syn match propaneComment "#.*"
syn match propaneOperator "->"
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
syn match propaneFieldOperator ":" contained
syn match propaneOperator "?"
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
syn region propaneRegex start="/" end="/" skip="\\/"
hi def link propaneComment Comment
hi def link propaneKeyword Keyword
hi def link propaneRegex String
hi def link propaneOperator Operator
hi def link propaneFieldOperator Operator
hi def link propaneDelimiter Delimiter
hi def link propaneFieldAlias Identifier

View File

@ -31,10 +31,10 @@ class Propane
class << self
def run(input_file, output_file, log_file, options)
def run(input_file, output_file, log_file)
begin
grammar = Grammar.new(File.read(input_file))
generator = Generator.new(grammar, output_file, log_file, options)
generator = Generator.new(grammar, output_file, log_file)
generator.generate
rescue Error => e
$stderr.puts e.message

View File

@ -4,21 +4,15 @@ class Propane
USAGE = <<EOF
Usage: #{$0} [options] <input-file> <output-file>
Options:
-h, --help Show this usage and exit.
--log LOG Write log file. This will show all parser states and their
associated shifts and reduces. It can be helpful when
debugging a grammar.
--version Show program version and exit.
-w Treat warnings as errors. This option will treat shift/reduce
conflicts as fatal errors and will print them to stderr in
addition to the log file.
--log LOG Write log file
--version Show program version and exit
-h, --help Show this usage and exit
EOF
class << self
def run(args)
params = []
options = {}
log_file = nil
i = 0
while i < args.size
@ -35,8 +29,6 @@ EOF
when "-h", "--help"
puts USAGE
return 0
when "-w"
options[:warnings_as_errors] = true
when /^-/
$stderr.puts "Error: unknown option #{arg}"
return 1
@ -53,7 +45,7 @@ EOF
$stderr.puts "Error: cannot read #{params[0]}"
return 2
end
Propane.run(*params, log_file, options)
Propane.run(*params, log_file)
end
end

View File

@ -2,7 +2,7 @@ class Propane
class Generator
def initialize(grammar, output_file, log_file, options)
def initialize(grammar, output_file, log_file)
@grammar = grammar
@output_file = output_file
if log_file
@ -16,7 +16,6 @@ class Propane
else
"d"
end
@options = options
process_grammar!
end
@ -130,7 +129,7 @@ class Propane
# Generate the lexer.
@lexer = Lexer.new(@grammar)
# Generate the parser.
@parser = Parser.new(@grammar, rule_sets, @log, @options)
@parser = Parser.new(@grammar, rule_sets, @log)
end
# Check that any referenced ptypes have been defined.
@ -276,19 +275,6 @@ class Propane
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
end
end
code = code.gsub(/\$\{(\w+)\}/) do |match|
aliasname = $1
if index = rule.aliases[aliasname]
case @language
when "c"
"state_values_stack_index(statevalues, -(int)n_states + #{index})->pvalue.v_#{rule.components[index].ptypename}"
when "d"
"statevalues[$-n_states+#{index}].pvalue.v_#{rule.components[index].ptypename}"
end
else
raise Error.new("Field alias '#{aliasname}' not found")
end
end
else
code = code.gsub(/\$\$/) do |match|
if @grammar.ast

View File

@ -198,7 +198,7 @@ class Propane
if @ast && ptypename
raise Error.new("Multiple ptypes are unsupported in AST mode")
end
md = consume!(/((?:#{IDENTIFIER_REGEX}(?::#{IDENTIFIER_REGEX})?\??\s*)*)\s*/, "expected rule component list")
md = consume!(/((?:#{IDENTIFIER_REGEX}\??\s*)*)\s*/, "expected rule component list")
components = md[1].strip.split(/\s+/)
if @ast
consume!(/;/, "expected `;'")

View File

@ -7,14 +7,12 @@ class Propane
attr_reader :reduce_table
attr_reader :rule_sets
def initialize(grammar, rule_sets, log, options)
def initialize(grammar, rule_sets, log)
@grammar = grammar
@rule_sets = rule_sets
@log = log
@item_sets = []
@item_sets_set = {}
@warnings = Set.new
@options = options
start_item = Item.new(grammar.rules.first, 0)
eval_item_sets = Set[ItemSet.new([start_item])]
@ -25,10 +23,10 @@ class Propane
item_set.id = @item_sets.size
@item_sets << item_set
@item_sets_set[item_set] = item_set
item_set.next_symbols.each do |next_symbol|
unless next_symbol.name == "$EOF"
next_item_set = item_set.build_next_item_set(next_symbol)
eval_item_sets << next_item_set
item_set.following_symbols.each do |following_symbol|
unless following_symbol.name == "$EOF"
following_set = item_set.build_following_item_set(following_symbol)
eval_item_sets << following_set
end
end
end
@ -39,11 +37,8 @@ class Propane
end
build_reduce_actions!
build_tables!
write_log!
if @warnings.size > 0 && @options[:warnings_as_errors]
raise Error.new("Fatal errors (-w):\n" + @warnings.join("\n"))
end
build_tables!
end
private
@ -53,34 +48,26 @@ class Propane
@shift_table = []
@reduce_table = []
@item_sets.each do |item_set|
shift_entries = item_set.next_symbols.map do |next_symbol|
shift_entries = item_set.following_symbols.map do |following_symbol|
state_id =
if next_symbol.name == "$EOF"
if following_symbol.name == "$EOF"
0
else
item_set.next_item_set[next_symbol].id
item_set.following_item_set[following_symbol].id
end
{
symbol: next_symbol,
symbol_id: following_symbol.id,
state_id: state_id,
}
end
unless item_set.reduce_rules.empty?
shift_entries.each do |shift_entry|
token = shift_entry[:symbol]
if get_lookahead_reduce_actions_for_item_set(item_set).include?(token)
rule = item_set.reduce_actions[token]
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
end
end
end
reduce_entries =
if rule = item_set.reduce_rule
[{token_id: @grammar.invalid_token_id, rule_id: rule.id, rule: rule,
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
propagate_optional_target: rule.optional? && rule.components.size == 1}]
elsif reduce_actions = item_set.reduce_actions
reduce_actions.map do |token, rule|
case ra = item_set.reduce_actions
when Rule
[{token_id: @grammar.invalid_token_id, rule_id: ra.id, rule: ra,
rule_set_id: ra.rule_set.id, n_states: ra.components.size,
propagate_optional_target: ra.optional? && ra.components.size == 1}]
when Hash
ra.map do |token, rule|
{token_id: token.id, rule_id: rule.id, rule: rule,
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
propagate_optional_target: rule.optional? && rule.components.size == 1}
@ -100,11 +87,11 @@ class Propane
end
def process_item_set(item_set)
item_set.next_symbols.each do |next_symbol|
unless next_symbol.name == "$EOF"
next_item_set = @item_sets_set[item_set.build_next_item_set(next_symbol)]
item_set.next_item_set[next_symbol] = next_item_set
next_item_set.in_sets << item_set
item_set.following_symbols.each do |following_symbol|
unless following_symbol.name == "$EOF"
following_set = @item_sets_set[item_set.build_following_item_set(following_symbol)]
item_set.following_item_set[following_symbol] = following_set
following_set.in_sets << item_set
end
end
end
@ -114,7 +101,7 @@ class Propane
# @return [void]
def build_reduce_actions!
@item_sets.each do |item_set|
build_reduce_actions_for_item_set(item_set)
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set)
end
end
@ -123,55 +110,38 @@ class Propane
# @param item_set [ItemSet]
# ItemSet (parser state)
#
# @return [void]
# @return [nil, Rule, Hash]
# If no reduce actions are possible for the given item set, nil.
# If only one reduce action is possible for the given item set, the Rule
# to reduce.
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
def build_reduce_actions_for_item_set(item_set)
# To build the reduce actions, we start by looking at any
# "complete" items, i.e., items where the parse position is at the
# end of a rule. These are the only rules that are candidates for
# reduction in the current ItemSet.
item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
if item_set.reduce_rules.size == 1
item_set.reduce_rule = item_set.reduce_rules.first
end
# If there are no rules to reduce for this ItemSet, we're done here.
return nil if reduce_rules.size == 0
if item_set.reduce_rules.size > 1
# Force item_set.reduce_actions to be built to store the lookahead
# tokens for the possible reduce rules if there is more than one.
get_lookahead_reduce_actions_for_item_set(item_set)
end
end
# If there is exactly one rule to reduce for this ItemSet, then do not
# figure out the lookaheads; just reduce it.
return reduce_rules.first if reduce_rules.size == 1
# Get the reduce actions for a single item set (parser state).
#
# @param item_set [ItemSet]
# ItemSet (parser state)
#
# @return [Hash]
# Mapping of lookahead Tokens to the Rules to reduce.
def get_lookahead_reduce_actions_for_item_set(item_set)
item_set.reduce_actions ||= build_lookahead_reduce_actions_for_item_set(item_set)
end
# Otherwise, we have more than one possible rule to reduce.
# Build the reduce actions for a single item set (parser state).
#
# @param item_set [ItemSet]
# ItemSet (parser state)
#
# @return [Hash]
# Mapping of lookahead Tokens to the Rules to reduce.
def build_lookahead_reduce_actions_for_item_set(item_set)
# We will be looking for all possible tokens that can follow instances of
# these rules. Rather than looking through the entire grammar for the
# possible following tokens, we will only look in the item sets leading
# up to this one. This restriction gives us a more precise lookahead set,
# and allows us to parse LALR grammars.
item_sets = Set[item_set] + item_set.leading_item_sets
item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
lookahead_tokens_for_rule.each do |lookahead_token|
if existing_reduce_rule = reduce_actions[lookahead_token]
raise Error.new("Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number})")
raise Error.new("Error: reduce/reduce conflict between rule #{existing_reduce_rule.id} (#{existing_reduce_rule.name}) and rule #{reduce_rule.id} (#{reduce_rule.name})")
end
reduce_actions[lookahead_token] = reduce_rule
end
@ -213,9 +183,9 @@ class Propane
# tokens to form the lookahead token set.
item_sets.each do |item_set|
item_set.items.each do |item|
if item.next_symbol == rule_set
if item.following_symbol == rule_set
(1..).each do |offset|
case symbol = item.next_symbol(offset)
case symbol = item.following_symbol(offset)
when nil
rule_set = item.rule.rule_set
unless checked_rule_sets.include?(rule_set)
@ -272,26 +242,20 @@ class Propane
@log.puts
@log.puts " Incoming states: #{incoming_ids.join(", ")}"
@log.puts " Outgoing states:"
item_set.next_item_set.each do |next_symbol, next_item_set|
@log.puts " #{next_symbol.name} => #{next_item_set.id}"
item_set.following_item_set.each do |following_symbol, following_item_set|
@log.puts " #{following_symbol.name} => #{following_item_set.id}"
end
@log.puts
@log.puts " Reduce actions:"
if item_set.reduce_rule
@log.puts " * => rule #{item_set.reduce_rule.id}, rule set #{@rule_sets[item_set.reduce_rule.name].id} (#{item_set.reduce_rule.name})"
elsif item_set.reduce_actions
case item_set.reduce_actions
when Rule
@log.puts " * => rule #{item_set.reduce_actions.id}, rule set #{@rule_sets[item_set.reduce_actions.name].id} (#{item_set.reduce_actions.name})"
when Hash
item_set.reduce_actions.each do |token, rule|
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
end
end
end
if @warnings.size > 0
@log.puts
@log.puts "Warnings:"
@warnings.each do |warning|
@log.puts " #{warning}"
end
end
end
end

View File

@ -56,7 +56,7 @@ class Propane
# Return the set of Items obtained by "closing" the current item.
#
# If the next symbol for the current item is another Rule name, then
# If the following symbol for the current item is another Rule name, then
# this method will return all Items for that Rule with a position of 0.
# Otherwise, an empty Array is returned.
#
@ -81,17 +81,17 @@ class Propane
@position == @rule.components.size
end
# Get the next symbol for the Item.
# Get the following symbol for the Item.
#
# That is, the symbol which is after the parse position marker in the
# That is, the symbol which follows the parse position marker in the
# current Item.
#
# @param offset [Integer]
# Offset from current parse position to examine.
#
# @return [Token, RuleSet, nil]
# Next symbol for the Item.
def next_symbol(offset = 0)
# Following symbol for the Item.
def following_symbol(offset = 0)
@rule.components[@position + offset]
end
@ -108,25 +108,25 @@ class Propane
end
end
# Get whether this Item's next symbol is the given symbol.
# Get whether this Item is followed by the provided symbol.
#
# @param symbol [Token, RuleSet]
# Symbol to query.
#
# @return [Boolean]
# Whether this Item's next symbol is the given symbol.
def next_symbol?(symbol)
next_symbol == symbol
# Whether this Item is followed by the provided symbol.
def followed_by?(symbol)
following_symbol == symbol
end
# Get the next item for this Item.
# Get the following item for this Item.
#
# That is, the Item formed by moving the parse position marker one place
# forward from its position in this Item.
#
# @return [Item]
# The next item for this Item.
def next_item
# The following item for this Item.
def following_item
Item.new(@rule, @position + 1)
end

View File

@ -2,7 +2,7 @@ class Propane
class Parser
# Represent a parser "item set", which is a set of possible items that the
# parser could currently be parsing. This is equivalent to a parser state.
# parser could currently be parsing.
class ItemSet
# @return [Set<Item>]
@ -14,24 +14,15 @@ class Propane
attr_accessor :id
# @return [Hash]
# Maps a next symbol to its ItemSet.
attr_reader :next_item_set
# Maps a following symbol to its ItemSet.
attr_reader :following_item_set
# @return [Set<ItemSet>]
# ItemSets leading to this item set.
attr_reader :in_sets
# @return [nil, Rule]
# Rule to reduce if there is only one possibility.
attr_accessor :reduce_rule
# @return [Set<Rule>]
# Set of rules that could be reduced in this parser state.
attr_accessor :reduce_rules
# @return [nil, Hash]
# Reduce actions, mapping lookahead tokens to rules, if there is
# more than one rule that could be reduced.
# @return [nil, Rule, Hash]
# Reduce actions, mapping lookahead tokens to rules.
attr_accessor :reduce_actions
# Build an ItemSet.
@ -40,28 +31,28 @@ class Propane
# Items in this ItemSet.
def initialize(items)
@items = Set.new(items)
@next_item_set = {}
@following_item_set = {}
@in_sets = Set.new
close!
end
# Get the set of next symbols for all Items in this ItemSet.
# Get the set of following symbols for all Items in this ItemSet.
#
# @return [Set<Token, RuleSet>]
# Set of next symbols for all Items in this ItemSet.
def next_symbols
@_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
# Set of following symbols for all Items in this ItemSet.
def following_symbols
Set.new(@items.map(&:following_symbol).compact)
end
# Build a next ItemSet for the given next symbol.
# Build a following ItemSet for the given following symbol.
#
# @param symbol [Token, RuleSet]
# Next symbol to build the next ItemSet for.
# Following symbol to build the following ItemSet for.
#
# @return [ItemSet]
# Next ItemSet for the given next symbol.
def build_next_item_set(symbol)
ItemSet.new(items_with_next(symbol).map(&:next_item))
# Following ItemSet for the given following symbol.
def build_following_item_set(symbol)
ItemSet.new(items_followed_by(symbol).map(&:following_item))
end
# Hash function.
@ -99,8 +90,6 @@ class Propane
# @return [Set<ItemSet>]
# Set of all ItemSets that lead up to this ItemSet.
def leading_item_sets
@_leading_item_sets ||=
begin
result = Set.new
eval_sets = Set[self]
evaled = Set.new
@ -117,7 +106,6 @@ class Propane
end
result
end
end
# Represent the ItemSet as a String.
#
@ -149,16 +137,16 @@ class Propane
end
end
# Get the Items with the given next symbol.
# Get the Items followed by the given following symbol.
#
# @param symbol [Token, RuleSet]
# Next symbol.
# Following symbol.
#
# @return [Array<Item>]
# Items with the given next symbol.
def items_with_next(symbol)
# Items followed by the given following symbol.
def items_followed_by(symbol)
@items.select do |item|
item.next_symbol?(symbol)
item.followed_by?(symbol)
end
end

View File

@ -6,10 +6,6 @@ class Propane
# Rule components.
attr_reader :components
# @return [Hash]
# Field aliases.
attr_reader :aliases
# @return [String]
# User code associated with the rule.
attr_reader :code
@ -53,19 +49,7 @@ class Propane
# Line number where the rule was defined in the input grammar.
def initialize(name, components, code, ptypename, line_number)
@name = name
@aliases = {}
@components = components.each_with_index.map do |component, i|
if component =~ /(\S+):(\S+)/
c, aliasname = $1, $2
if @aliases[aliasname]
raise Error.new("Error: duplicate field alias `#{aliasname}` for rule #{name} defined on line #{line_number}")
end
@aliases[aliasname] = i
c
else
component
end
end
@components = components
@rule_set_node_field_index_map = components.map {0}
@code = code
@ptypename = ptypename

View File

@ -100,10 +100,8 @@ class Propane
# Finalize a RuleSet after adding all Rules to it.
def finalize(grammar)
if grammar.ast
build_ast_fields(grammar)
end
end
private
@ -150,18 +148,6 @@ class Propane
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
end
end
# Now merge in the field aliases as given by the user in the
# grammar.
field_aliases = {}
@rules.each do |rule|
rule.aliases.each do |alias_name, index|
if field_aliases[alias_name] && field_aliases[alias_name] != index
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}`")
end
field_aliases[alias_name] = index
@ast_fields[index][alias_name] = @ast_fields[index].first[1]
end
end
end
end

View File

@ -1,3 +1,3 @@
class Propane
VERSION = "1.5.1"
VERSION = "1.4.0"
end

View File

@ -54,7 +54,6 @@ EOF
else
command += %W[spec/run/testparser#{options[:name]}.propane spec/run/testparser#{options[:name]}.#{options[:language]} --log spec/run/testparser#{options[:name]}.log]
end
command += (options[:extra_args] || [])
if (options[:capture])
stdout, stderr, status = Open3.capture3(*command)
Results.new(stdout, stderr, status)
@ -185,70 +184,6 @@ EOF
expect(results.status).to_not eq 0
end
it "warns on shift/reduce conflicts" do
write_grammar <<EOF
token a;
token b;
Start -> As? b?;
As -> a As2?;
As2 -> b a As2?;
EOF
results = run_propane(capture: true)
expect(results.stderr).to eq ""
expect(results.status).to eq 0
expect(File.binread("spec/run/testparser.log")).to match %r{Shift/Reduce conflict \(state \d+\) between token b and rule As2\? \(defined on line 4\)}
end
it "errors on shift/reduce conflicts with -w" do
write_grammar <<EOF
token a;
token b;
Start -> As? b?;
As -> a As2?;
As2 -> b a As2?;
EOF
results = run_propane(extra_args: %w[-w], capture: true)
expect(results.stderr).to match %r{Shift/Reduce conflict \(state \d+\) between token b and rule As2\? \(defined on line 4\)}m
expect(results.status).to_not eq 0
expect(File.binread("spec/run/testparser.log")).to match %r{Shift/Reduce conflict \(state \d+\) between token b and rule As2\? \(defined on line 4\)}
end
it "errors on duplicate field aliases in a rule" do
write_grammar <<EOF
token a;
token b;
Start -> a:foo b:foo;
EOF
results = run_propane(extra_args: %w[-w], capture: true)
expect(results.stderr).to match %r{Error: duplicate field alias `foo` for rule Start defined on line 3}
expect(results.status).to_not eq 0
end
it "errors when an alias is in different positions for different rules in a rule set when AST mode is enabled" do
write_grammar <<EOF
ast;
token a;
token b;
Start -> a:foo b;
Start -> b b:foo;
EOF
results = run_propane(extra_args: %w[-w], capture: true)
expect(results.stderr).to match %r{Error: conflicting AST node field positions for alias `foo`}
expect(results.status).to_not eq 0
end
it "does not error when an alias is in different positions for different rules in a rule set when AST mode is not enabled" do
write_grammar <<EOF
token a;
token b;
Start -> a:foo b;
Start -> b b:foo;
EOF
results = run_propane(extra_args: %w[-w], capture: true)
expect(results.stderr).to eq ""
expect(results.status).to eq 0
end
%w[d c].each do |language|
context "#{language.upcase} language" do
@ -688,7 +623,7 @@ F -> e;
EOF
results = run_propane(capture: true, language: language)
expect(results.status).to_not eq 0
expect(results.stderr).to match %r{Error: reduce/reduce conflict \(state \d+\) between rule E#\d+ \(defined on line 10\) and rule F#\d+ \(defined on line 11\)}
expect(results.stderr).to match %r{reduce/reduce conflict.*\(E\).*\(F\)}
end
it "provides matched text to user code blocks" do
@ -1116,110 +1051,6 @@ EOF
expect(results.stderr).to eq ""
expect(results.status).to eq 0
end
it "stores token and rule positions in AST nodes" do
write_grammar <<EOF
ast;
token a;
token bb;
token c /c(.|\\n)*c/;
drop /\\s+/;
Start -> T T T;
T -> a;
T -> bb;
T -> c;
EOF
run_propane(language: language)
compile("spec/test_ast_token_positions.#{language}", language: language)
results = run_test
expect(results.stderr).to eq ""
expect(results.status).to eq 0
end
it "stores invalid positions for empty rule matches" do
write_grammar <<EOF
ast;
token a;
token bb;
token c /c(.|\\n)*c/;
drop /\\s+/;
Start -> T Start;
Start -> ;
T -> a A;
A -> bb? c?;
EOF
run_propane(language: language)
compile("spec/test_ast_invalid_positions.#{language}", language: language)
results = run_test
expect(results.stderr).to eq ""
expect(results.status).to eq 0
end
it "allows specifying field aliases in AST mode" do
write_grammar <<EOF
ast;
token a;
token b;
token c;
drop /\\s+/;
Start -> T:first T:second T:third;
T -> a;
T -> b;
T -> c;
EOF
run_propane(language: language)
compile("spec/test_ast_field_aliases.#{language}", language: language)
results = run_test
expect(results.stderr).to eq ""
expect(results.status).to eq 0
end
it "allows specifying field aliases when AST mode is not enabled" do
if language == "d"
write_grammar <<EOF
<<
import std.stdio;
>>
ptype string;
token id /[a-zA-Z_][a-zA-Z0-9_]*/ <<
$$ = match;
>>
drop /\\s+/;
Start -> id:first id:second <<
writeln("first is ", ${first});
writeln("second is ", ${second});
>>
EOF
else
write_grammar <<EOF
<<
#include <stdio.h>
#include <string.h>
>>
ptype char const *;
token id /[a-zA-Z_][a-zA-Z0-9_]*/ <<
char * s = malloc(match_length + 1);
strncpy(s, (char const *)match, match_length);
s[match_length] = 0;
$$ = s;
>>
drop /\\s+/;
Start -> id:first id:second <<
printf("first is %s\\n", ${first});
printf("second is %s\\n", ${second});
>>
EOF
end
run_propane(language: language)
compile("spec/test_field_aliases.#{language}", language: language)
results = run_test
expect(results.stderr).to eq ""
expect(results.status).to eq 0
expect(results.stdout).to match /first is foo1.*second is bar2/m
end
end
end
end

View File

@ -1,19 +0,0 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "\na\nb\nc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(TOKEN_a, start->first->pToken->token);
assert_eq(TOKEN_b, start->second->pToken->token);
assert_eq(TOKEN_c, start->third->pToken->token);
return 0;
}

View File

@ -1,21 +0,0 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "\na\nb\nc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(TOKEN_a, start.first.pToken.token);
assert_eq(TOKEN_b, start.second.pToken.token);
assert_eq(TOKEN_c, start.third.pToken.token);
}

View File

@ -1,102 +0,0 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "\na\n bb ccc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(1, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(1, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(2, start->pT1->pA->position.row);
assert_eq(2, start->pT1->pA->position.col);
assert_eq(2, start->pT1->pA->end_position.row);
assert_eq(7, start->pT1->pA->end_position.col);
assert_eq(1, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(7, start->pT1->end_position.col);
assert_eq(1, start->position.row);
assert_eq(0, start->position.col);
assert_eq(2, start->end_position.row);
assert_eq(7, start->end_position.col);
input = "a\nbb";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(1, start->pT1->pA->position.row);
assert_eq(0, start->pT1->pA->position.col);
assert_eq(1, start->pT1->pA->end_position.row);
assert_eq(1, start->pT1->pA->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(1, start->pT1->end_position.row);
assert_eq(1, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(1, start->end_position.row);
assert_eq(1, start->end_position.col);
input = "a\nc\nc";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(p_position_valid(start->pT1->pA->position));
assert_eq(1, start->pT1->pA->position.row);
assert_eq(0, start->pT1->pA->position.col);
assert_eq(2, start->pT1->pA->end_position.row);
assert_eq(0, start->pT1->pA->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(2, start->end_position.row);
assert_eq(0, start->end_position.col);
input = "a";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert(!p_position_valid(start->pT1->pA->position));
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(0, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(0, start->end_position.row);
assert_eq(0, start->end_position.col);
return 0;
}

View File

@ -1,104 +0,0 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "\na\n bb ccc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(1, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(1, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(2, start.pT1.pA.position.row);
assert_eq(2, start.pT1.pA.position.col);
assert_eq(2, start.pT1.pA.end_position.row);
assert_eq(7, start.pT1.pA.end_position.col);
assert_eq(1, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(7, start.pT1.end_position.col);
assert_eq(1, start.position.row);
assert_eq(0, start.position.col);
assert_eq(2, start.end_position.row);
assert_eq(7, start.end_position.col);
input = "a\nbb";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(1, start.pT1.pA.position.row);
assert_eq(0, start.pT1.pA.position.col);
assert_eq(1, start.pT1.pA.end_position.row);
assert_eq(1, start.pT1.pA.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(1, start.pT1.end_position.row);
assert_eq(1, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(1, start.end_position.row);
assert_eq(1, start.end_position.col);
input = "a\nc\nc";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(start.pT1.pA.position.valid);
assert_eq(1, start.pT1.pA.position.row);
assert_eq(0, start.pT1.pA.position.col);
assert_eq(2, start.pT1.pA.end_position.row);
assert_eq(0, start.pT1.pA.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(2, start.end_position.row);
assert_eq(0, start.end_position.col);
input = "a";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert(!start.pT1.pA.position.valid);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(0, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(0, start.end_position.row);
assert_eq(0, start.end_position.col);
}

View File

@ -1,84 +0,0 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "abbccc";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(0, start->pT1->pToken->position.row);
assert_eq(0, start->pT1->pToken->position.col);
assert_eq(0, start->pT1->pToken->end_position.row);
assert_eq(0, start->pT1->pToken->end_position.col);
assert_eq(0, start->pT1->position.row);
assert_eq(0, start->pT1->position.col);
assert_eq(0, start->pT1->end_position.row);
assert_eq(0, start->pT1->end_position.col);
assert_eq(0, start->pT2->pToken->position.row);
assert_eq(1, start->pT2->pToken->position.col);
assert_eq(0, start->pT2->pToken->end_position.row);
assert_eq(2, start->pT2->pToken->end_position.col);
assert_eq(0, start->pT2->position.row);
assert_eq(1, start->pT2->position.col);
assert_eq(0, start->pT2->end_position.row);
assert_eq(2, start->pT2->end_position.col);
assert_eq(0, start->pT3->pToken->position.row);
assert_eq(3, start->pT3->pToken->position.col);
assert_eq(0, start->pT3->pToken->end_position.row);
assert_eq(5, start->pT3->pToken->end_position.col);
assert_eq(0, start->pT3->position.row);
assert_eq(3, start->pT3->position.col);
assert_eq(0, start->pT3->end_position.row);
assert_eq(5, start->pT3->end_position.col);
assert_eq(0, start->position.row);
assert_eq(0, start->position.col);
assert_eq(0, start->end_position.row);
assert_eq(5, start->end_position.col);
input = "\n\n bb\nc\ncc\n\n a";
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(2, start->pT1->pToken->position.row);
assert_eq(2, start->pT1->pToken->position.col);
assert_eq(2, start->pT1->pToken->end_position.row);
assert_eq(3, start->pT1->pToken->end_position.col);
assert_eq(2, start->pT1->position.row);
assert_eq(2, start->pT1->position.col);
assert_eq(2, start->pT1->end_position.row);
assert_eq(3, start->pT1->end_position.col);
assert_eq(3, start->pT2->pToken->position.row);
assert_eq(0, start->pT2->pToken->position.col);
assert_eq(4, start->pT2->pToken->end_position.row);
assert_eq(1, start->pT2->pToken->end_position.col);
assert_eq(3, start->pT2->position.row);
assert_eq(0, start->pT2->position.col);
assert_eq(4, start->pT2->end_position.row);
assert_eq(1, start->pT2->end_position.col);
assert_eq(6, start->pT3->pToken->position.row);
assert_eq(5, start->pT3->pToken->position.col);
assert_eq(6, start->pT3->pToken->end_position.row);
assert_eq(5, start->pT3->pToken->end_position.col);
assert_eq(6, start->pT3->position.row);
assert_eq(5, start->pT3->position.col);
assert_eq(6, start->pT3->end_position.row);
assert_eq(5, start->pT3->end_position.col);
assert_eq(2, start->position.row);
assert_eq(2, start->position.col);
assert_eq(6, start->end_position.row);
assert_eq(5, start->end_position.col);
return 0;
}

View File

@ -1,86 +0,0 @@
import testparser;
import std.stdio;
import testutils;
int main()
{
return 0;
}
unittest
{
string input = "abbccc";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
Start * start = p_result(&context);
assert_eq(0, start.pT1.pToken.position.row);
assert_eq(0, start.pT1.pToken.position.col);
assert_eq(0, start.pT1.pToken.end_position.row);
assert_eq(0, start.pT1.pToken.end_position.col);
assert_eq(0, start.pT1.position.row);
assert_eq(0, start.pT1.position.col);
assert_eq(0, start.pT1.end_position.row);
assert_eq(0, start.pT1.end_position.col);
assert_eq(0, start.pT2.pToken.position.row);
assert_eq(1, start.pT2.pToken.position.col);
assert_eq(0, start.pT2.pToken.end_position.row);
assert_eq(2, start.pT2.pToken.end_position.col);
assert_eq(0, start.pT2.position.row);
assert_eq(1, start.pT2.position.col);
assert_eq(0, start.pT2.end_position.row);
assert_eq(2, start.pT2.end_position.col);
assert_eq(0, start.pT3.pToken.position.row);
assert_eq(3, start.pT3.pToken.position.col);
assert_eq(0, start.pT3.pToken.end_position.row);
assert_eq(5, start.pT3.pToken.end_position.col);
assert_eq(0, start.pT3.position.row);
assert_eq(3, start.pT3.position.col);
assert_eq(0, start.pT3.end_position.row);
assert_eq(5, start.pT3.end_position.col);
assert_eq(0, start.position.row);
assert_eq(0, start.position.col);
assert_eq(0, start.end_position.row);
assert_eq(5, start.end_position.col);
input = "\n\n bb\nc\ncc\n\n a";
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
start = p_result(&context);
assert_eq(2, start.pT1.pToken.position.row);
assert_eq(2, start.pT1.pToken.position.col);
assert_eq(2, start.pT1.pToken.end_position.row);
assert_eq(3, start.pT1.pToken.end_position.col);
assert_eq(2, start.pT1.position.row);
assert_eq(2, start.pT1.position.col);
assert_eq(2, start.pT1.end_position.row);
assert_eq(3, start.pT1.end_position.col);
assert_eq(3, start.pT2.pToken.position.row);
assert_eq(0, start.pT2.pToken.position.col);
assert_eq(4, start.pT2.pToken.end_position.row);
assert_eq(1, start.pT2.pToken.end_position.col);
assert_eq(3, start.pT2.position.row);
assert_eq(0, start.pT2.position.col);
assert_eq(4, start.pT2.end_position.row);
assert_eq(1, start.pT2.end_position.col);
assert_eq(6, start.pT3.pToken.position.row);
assert_eq(5, start.pT3.pToken.position.col);
assert_eq(6, start.pT3.pToken.end_position.row);
assert_eq(5, start.pT3.pToken.end_position.col);
assert_eq(6, start.pT3.position.row);
assert_eq(5, start.pT3.position.col);
assert_eq(6, start.pT3.end_position.row);
assert_eq(5, start.pT3.end_position.col);
assert_eq(2, start.position.row);
assert_eq(2, start.position.col);
assert_eq(6, start.end_position.row);
assert_eq(5, start.end_position.col);
}

View File

@ -1,13 +0,0 @@
#include "testparser.h"
#include <assert.h>
#include <string.h>
#include "testutils.h"
int main()
{
char const * input = "foo1\nbar2";
p_context_t context;
p_context_init(&context, (uint8_t const *)input, strlen(input));
assert(p_parse(&context) == P_SUCCESS);
return 0;
}

View File

@ -1,15 +0,0 @@
import testparser;
import std.stdio;
int main()
{
return 0;
}
unittest
{
string input = "foo1\nbar2";
p_context_t context;
p_context_init(&context, input);
assert(p_parse(&context) == P_SUCCESS);
}

View File

@ -43,57 +43,41 @@ int main()
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u);
assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 0u);
assert(token_info.length == 1u);
assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u);
assert(token_info.position.col == 2u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 2u);
assert(token_info.length == 1u);
assert(token_info.token == TOKEN_plus);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u);
assert(token_info.position.col == 4u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 4u);
assert(token_info.length == 1u);
assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u);
assert(token_info.position.col == 6u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 6u);
assert(token_info.length == 1u);
assert(token_info.token == TOKEN_times);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u);
assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 2u);
assert(token_info.length == 3u);
assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u);
assert(token_info.position.col == 4u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 4u);
assert(token_info.length == 1u);
assert(token_info.token == TOKEN_plus);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u);
assert(token_info.position.col == 6u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 8u);
assert(token_info.length == 3u);
assert(token_info.token == TOKEN_int);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 1u);
assert(token_info.position.col == 9u);
assert(token_info.end_position.row == 1u);
assert(token_info.end_position.col == 9u);
assert(token_info.length == 0u);
assert(token_info.token == TOKEN___EOF);
@ -101,8 +85,6 @@ int main()
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info.position.row == 0u);
assert(token_info.position.col == 0u);
assert(token_info.end_position.row == 0u);
assert(token_info.end_position.col == 0u);
assert(token_info.length == 0u);
assert(token_info.token == TOKEN___EOF);

View File

@ -47,23 +47,23 @@ unittest
p_context_t context;
p_context_init(&context, input);
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 1, TOKEN_int));
assert(token_info == p_token_info_t(p_position_t(0, 0), 1, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 2), p_position_t(0, 2), 1, TOKEN_plus));
assert(token_info == p_token_info_t(p_position_t(0, 2), 1, TOKEN_plus));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 4), p_position_t(0, 4), 1, TOKEN_int));
assert(token_info == p_token_info_t(p_position_t(0, 4), 1, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 6), p_position_t(0, 6), 1, TOKEN_times));
assert(token_info == p_token_info_t(p_position_t(0, 6), 1, TOKEN_times));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 0), p_position_t(1, 2), 3, TOKEN_int));
assert(token_info == p_token_info_t(p_position_t(1, 0), 3, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 4), p_position_t(1, 4), 1, TOKEN_plus));
assert(token_info == p_token_info_t(p_position_t(1, 4), 1, TOKEN_plus));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 6), p_position_t(1, 8), 3, TOKEN_int));
assert(token_info == p_token_info_t(p_position_t(1, 6), 3, TOKEN_int));
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(1, 9), p_position_t(1, 9), 0, TOKEN___EOF));
assert(token_info == p_token_info_t(p_position_t(1, 9), 0, TOKEN___EOF));
p_context_init(&context, "");
assert(p_lex(&context, &token_info) == P_SUCCESS);
assert(token_info == p_token_info_t(p_position_t(0, 0), p_position_t(0, 0), 0, TOKEN___EOF));
assert(token_info == p_token_info_t(p_position_t(0, 0), 0, TOKEN___EOF));
}