Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 06af7fbe3b | |||
| 9760da4df4 | |||
| 35acdde09f | |||
| aef5367378 | |||
| 36213d9e9c | |||
| 7b1d903b00 | |||
| 59e8e0a095 | |||
| dace12310a | |||
| c7185edef0 | |||
| 9d2b3be20b | |||
| 2b515e1a7a | |||
| 4ffdea07bb | |||
| cdb6294f1f | |||
| 48b4033ef2 |
38
.github/workflows/run-tests.yml
vendored
38
.github/workflows/run-tests.yml
vendored
@ -1,38 +0,0 @@
|
|||||||
name: Run Propane Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest]
|
|
||||||
ruby-version: ['3.4']
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Install dependencies (Linux)
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
run: sudo apt-get update && sudo apt-get install -y gcc gdc ldc
|
|
||||||
|
|
||||||
- name: Install dependencies (macOS)
|
|
||||||
if: runner.os == 'macOS'
|
|
||||||
run: brew install gcc ldc
|
|
||||||
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Ruby
|
|
||||||
uses: ruby/setup-ruby@v1
|
|
||||||
with:
|
|
||||||
ruby-version: ${{ matrix.ruby-version }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: bundle install
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: rake all
|
|
||||||
119
CHANGELOG.md
119
CHANGELOG.md
@ -1,122 +1,3 @@
|
|||||||
## v2.3.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Add \D, \S, \w, \W special character classes
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Include line numbers for pattern errors
|
|
||||||
- Improve performance in a few places
|
|
||||||
- Parallelize parser table generation on Linux hosts
|
|
||||||
- Add github workflow to run unit tests
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
- Fix a couple clang warnings for C backend
|
|
||||||
- Fix C backend not fully initializing pvalues when multiple ptypes are used with different sizes.
|
|
||||||
- Fix some user guide examples
|
|
||||||
|
|
||||||
## v2.2.1
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
- Fix GC issue for D backend when AST is enabled (#36)
|
|
||||||
|
|
||||||
## v2.2.0
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Allow multiple lexer modes to be specified for a lexer pattern (#35)
|
|
||||||
- Document p_decode_code_point() API function (#34)
|
|
||||||
|
|
||||||
## v2.1.1
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
- Field aliases for AST node fields could alias incorrect field when multiple rule alternatives present for one rule set (#33)
|
|
||||||
|
|
||||||
## v2.1.0
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Report rule name and line number for conflicting AST node field positions errors (#32)
|
|
||||||
|
|
||||||
## v2.0.0
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Log conflicting rules on reduce/reduce conflict (#31)
|
|
||||||
- Use 1-based row and column values for position values (#30)
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
- Fix named optional rules (#29)
|
|
||||||
|
|
||||||
### Upgrading
|
|
||||||
|
|
||||||
- Adjust all uses of p_position_t row and col values to expect 1-based instead
|
|
||||||
of 0-based values.
|
|
||||||
|
|
||||||
## v1.5.1
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Improve performance (#28)
|
|
||||||
|
|
||||||
## v1.5.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Track start and end text positions for tokens and rules in AST node structures (#27)
|
|
||||||
- Add warnings for shift/reduce conflicts to log file (#25)
|
|
||||||
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
|
|
||||||
- Add rule field aliases (#24)
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Show line numbers of rules on conflict (#23)
|
|
||||||
|
|
||||||
## v1.4.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Allow user to specify AST node name prefix or suffix
|
|
||||||
- Allow specifying the start rule name
|
|
||||||
- Allow rule terms to be marked as optional
|
|
||||||
|
|
||||||
### Improvements
|
|
||||||
|
|
||||||
- Give a better error message when a referenced ptype has not been declared
|
|
||||||
|
|
||||||
## v1.3.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Add AST generation (#22)
|
|
||||||
|
|
||||||
## v1.2.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Allow one line user code blocks (#21)
|
|
||||||
- Add backslash escape codes (#19)
|
|
||||||
- Add API to access unexpected token found (#18)
|
|
||||||
- Add token_names API (#17)
|
|
||||||
- Add D example to user guide for p_context_init() (#16)
|
|
||||||
- Allow user termination from lexer code blocks (#15)
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
- Fix generator hang when state transition cycle is present (#20)
|
|
||||||
|
|
||||||
## v1.1.0
|
|
||||||
|
|
||||||
### New Features
|
|
||||||
|
|
||||||
- Add user parser terminations (#13)
|
|
||||||
- Document generated parser API in user guide (#14)
|
|
||||||
|
|
||||||
## v1.0.0
|
## v1.0.0
|
||||||
|
|
||||||
- Initial release
|
- Initial release
|
||||||
|
|||||||
2
Gemfile
2
Gemfile
@ -1,9 +1,7 @@
|
|||||||
source "https://rubygems.org"
|
source "https://rubygems.org"
|
||||||
|
|
||||||
gem "base64"
|
|
||||||
gem "rake"
|
gem "rake"
|
||||||
gem "rspec"
|
gem "rspec"
|
||||||
gem "rdoc"
|
gem "rdoc"
|
||||||
gem "redcarpet"
|
gem "redcarpet"
|
||||||
gem "syntax"
|
gem "syntax"
|
||||||
gem "simplecov"
|
|
||||||
|
|||||||
48
Gemfile.lock
48
Gemfile.lock
@ -1,51 +1,37 @@
|
|||||||
GEM
|
GEM
|
||||||
remote: https://rubygems.org/
|
remote: https://rubygems.org/
|
||||||
specs:
|
specs:
|
||||||
base64 (0.3.0)
|
diff-lcs (1.5.0)
|
||||||
date (3.4.1)
|
psych (5.1.0)
|
||||||
diff-lcs (1.6.2)
|
|
||||||
docile (1.4.1)
|
|
||||||
erb (5.0.2)
|
|
||||||
psych (5.2.6)
|
|
||||||
date
|
|
||||||
stringio
|
stringio
|
||||||
rake (13.3.0)
|
rake (13.0.6)
|
||||||
rdoc (6.14.2)
|
rdoc (6.5.0)
|
||||||
erb
|
|
||||||
psych (>= 4.0.0)
|
psych (>= 4.0.0)
|
||||||
redcarpet (3.6.1)
|
redcarpet (3.6.0)
|
||||||
rspec (3.13.1)
|
rspec (3.12.0)
|
||||||
rspec-core (~> 3.13.0)
|
rspec-core (~> 3.12.0)
|
||||||
rspec-expectations (~> 3.13.0)
|
rspec-expectations (~> 3.12.0)
|
||||||
rspec-mocks (~> 3.13.0)
|
rspec-mocks (~> 3.12.0)
|
||||||
rspec-core (3.13.5)
|
rspec-core (3.12.2)
|
||||||
rspec-support (~> 3.13.0)
|
rspec-support (~> 3.12.0)
|
||||||
rspec-expectations (3.13.5)
|
rspec-expectations (3.12.3)
|
||||||
diff-lcs (>= 1.2.0, < 2.0)
|
diff-lcs (>= 1.2.0, < 2.0)
|
||||||
rspec-support (~> 3.13.0)
|
rspec-support (~> 3.12.0)
|
||||||
rspec-mocks (3.13.5)
|
rspec-mocks (3.12.6)
|
||||||
diff-lcs (>= 1.2.0, < 2.0)
|
diff-lcs (>= 1.2.0, < 2.0)
|
||||||
rspec-support (~> 3.13.0)
|
rspec-support (~> 3.12.0)
|
||||||
rspec-support (3.13.4)
|
rspec-support (3.12.1)
|
||||||
simplecov (0.22.0)
|
stringio (3.0.7)
|
||||||
docile (~> 1.1)
|
|
||||||
simplecov-html (~> 0.11)
|
|
||||||
simplecov_json_formatter (~> 0.1)
|
|
||||||
simplecov-html (0.13.2)
|
|
||||||
simplecov_json_formatter (0.1.4)
|
|
||||||
stringio (3.1.7)
|
|
||||||
syntax (1.2.2)
|
syntax (1.2.2)
|
||||||
|
|
||||||
PLATFORMS
|
PLATFORMS
|
||||||
ruby
|
ruby
|
||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
base64
|
|
||||||
rake
|
rake
|
||||||
rdoc
|
rdoc
|
||||||
redcarpet
|
redcarpet
|
||||||
rspec
|
rspec
|
||||||
simplecov
|
|
||||||
syntax
|
syntax
|
||||||
|
|
||||||
BUNDLED WITH
|
BUNDLED WITH
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2010-2024 Josh Holtrop
|
Copyright (c) 2010-2023 Josh Holtrop
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
91
README.md
91
README.md
@ -1,104 +1,21 @@
|
|||||||
# The Propane Parser Generator
|
# The Propane Parser Generator
|
||||||
|
|
||||||
Propane is a LALR Parser Generator (LPG) which:
|
Propane is an LR Parser Generator (LPG) which:
|
||||||
|
|
||||||
* accepts LR(0), SLR, and LALR grammars
|
* accepts LR(0), SLR, and LALR grammars
|
||||||
* generates a built-in lexer to tokenize input
|
* generates a built-in lexer to tokenize input
|
||||||
* supports UTF-8 lexer inputs
|
* supports UTF-8 lexer inputs
|
||||||
* generates a table-driven shift/reduce parser to parse input in linear time
|
* generates a table-driven parser to parse input in linear time
|
||||||
* targets C or D language outputs
|
|
||||||
* optionally supports automatic full AST generation
|
|
||||||
* is MIT-licensed
|
* is MIT-licensed
|
||||||
* is distributable as a standalone Ruby script
|
* is distributable as a standalone Ruby script
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Propane is designed to be distributed as a stand-alone single file script that
|
TODO
|
||||||
can be copied into and versioned in a project's source tree.
|
|
||||||
The only requirement to run Propane is that the system has a Ruby interpreter
|
|
||||||
installed.
|
|
||||||
The latest release can be downloaded from [https://github.com/holtrop/propane/releases](https://github.com/holtrop/propane/releases).
|
|
||||||
|
|
||||||
Simply copy the `propane` executable script into the desired location within
|
|
||||||
the project to be built (typically the root of the repository) and mark it
|
|
||||||
executable.
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
### Command Line Interface
|
TODO: Write usage instructions here
|
||||||
|
|
||||||
Propane is typically invoked from the command-line as `./propane`.
|
|
||||||
|
|
||||||
Usage: ./propane [options] <input-file> <output-file>
|
|
||||||
Options:
|
|
||||||
-h, --help Show this usage and exit.
|
|
||||||
--log LOG Write log file. This will show all parser states and their
|
|
||||||
associated shifts and reduces. It can be helpful when
|
|
||||||
debugging a grammar.
|
|
||||||
--version Show program version and exit.
|
|
||||||
-w Treat warnings as errors. This option will treat shift/reduce
|
|
||||||
conflicts as fatal errors and will print them to stderr in
|
|
||||||
addition to the log file.
|
|
||||||
|
|
||||||
The user must specify the path to a Propane input grammar file and a path to an
|
|
||||||
output file.
|
|
||||||
The generated source code will be written to the output file.
|
|
||||||
If a log file path is specified, Propane will write a log file containing
|
|
||||||
detailed information about the parser states and transitions.
|
|
||||||
|
|
||||||
### Propane Grammar File
|
|
||||||
|
|
||||||
A Propane grammar file provides Propane with the patterns, tokens, grammar
|
|
||||||
rules, and user code blocks from which to build the generated lexer and parser.
|
|
||||||
|
|
||||||
Example grammar file:
|
|
||||||
|
|
||||||
```
|
|
||||||
<<
|
|
||||||
import std.math;
|
|
||||||
>>
|
|
||||||
|
|
||||||
# Parser values are unsigned integers.
|
|
||||||
ptype ulong;
|
|
||||||
|
|
||||||
# A few basic arithmetic operators.
|
|
||||||
token plus /\+/;
|
|
||||||
token times /\*/;
|
|
||||||
token power /\*\*/;
|
|
||||||
token integer /\d+/ <<
|
|
||||||
ulong v;
|
|
||||||
foreach (c; match)
|
|
||||||
{
|
|
||||||
v *= 10;
|
|
||||||
v += (c - '0');
|
|
||||||
}
|
|
||||||
$$ = v;
|
|
||||||
>>
|
|
||||||
token lparen /\(/;
|
|
||||||
token rparen /\)/;
|
|
||||||
# Drop whitespace.
|
|
||||||
drop /\s+/;
|
|
||||||
|
|
||||||
Start -> E1 << $$ = $1; >>
|
|
||||||
E1 -> E2 << $$ = $1; >>
|
|
||||||
E1 -> E1 plus E2 << $$ = $1 + $3; >>
|
|
||||||
E2 -> E3 << $$ = $1; >>
|
|
||||||
E2 -> E2 times E3 << $$ = $1 * $3; >>
|
|
||||||
E3 -> E4 << $$ = $1; >>
|
|
||||||
E3 -> E3 power E4 <<
|
|
||||||
$$ = pow($1, $3);
|
|
||||||
>>
|
|
||||||
E4 -> integer << $$ = $1; >>
|
|
||||||
E4 -> lparen E1 rparen << $$ = $2; >>
|
|
||||||
```
|
|
||||||
|
|
||||||
Grammar files can contain comment lines beginning with `#` which are ignored.
|
|
||||||
White space in the grammar file is also ignored.
|
|
||||||
|
|
||||||
It is convention to use the extension `.propane` for the Propane grammar file,
|
|
||||||
however any file name is accepted by Propane.
|
|
||||||
|
|
||||||
See [https://holtrop.github.io/propane/index.html](https://holtrop.github.io/propane/index.html) for the full User Guide.
|
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
|
|||||||
14
Rakefile
14
Rakefile
@ -1,8 +1,5 @@
|
|||||||
require "rake/clean"
|
|
||||||
require "rspec/core/rake_task"
|
require "rspec/core/rake_task"
|
||||||
|
|
||||||
CLEAN.include %w[spec/run gen .yardoc yard coverage dist]
|
|
||||||
|
|
||||||
task :build_dist do
|
task :build_dist do
|
||||||
sh "ruby rb/build_dist.rb"
|
sh "ruby rb/build_dist.rb"
|
||||||
end
|
end
|
||||||
@ -13,20 +10,9 @@ RSpec::Core::RakeTask.new(:spec, :example_pattern) do |task, args|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# dspec task is useful to test the distributable release script, but is not
|
|
||||||
# useful for coverage information.
|
|
||||||
desc "Dist Specs"
|
|
||||||
task :dspec, [:example_string] => :build_dist do |task, args|
|
|
||||||
ENV["dist_specs"] = "1"
|
|
||||||
Rake::Task["spec"].execute(args)
|
|
||||||
ENV.delete("dist_specs")
|
|
||||||
end
|
|
||||||
|
|
||||||
task :default => :spec
|
task :default => :spec
|
||||||
|
|
||||||
desc "Build user guide"
|
desc "Build user guide"
|
||||||
task :user_guide do
|
task :user_guide do
|
||||||
system("ruby", "-Ilib", "rb/gen_user_guide.rb")
|
system("ruby", "-Ilib", "rb/gen_user_guide.rb")
|
||||||
end
|
end
|
||||||
|
|
||||||
task :all => [:spec, :dspec, :user_guide]
|
|
||||||
|
|||||||
@ -3,22 +3,13 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
/**************************************************************************
|
|
||||||
* Public data
|
|
||||||
*************************************************************************/
|
|
||||||
|
|
||||||
/** Token names. */
|
|
||||||
const char * <%= @grammar.prefix %>token_names[] = {
|
|
||||||
<% @grammar.tokens.each_with_index do |token, index| %>
|
|
||||||
"<%= token.name %>",
|
|
||||||
<% end %>
|
|
||||||
};
|
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
* User code blocks
|
* User code blocks
|
||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
|
|
||||||
<%= @grammar.code_blocks.fetch("", "") %>
|
<% @grammar.code_blocks.each do |code| %>
|
||||||
|
<%= code %>
|
||||||
|
<% end %>
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
* Private types
|
* Private types
|
||||||
@ -32,7 +23,6 @@ const char * <%= @grammar.prefix %>token_names[] = {
|
|||||||
#define P_UNEXPECTED_TOKEN 3u
|
#define P_UNEXPECTED_TOKEN 3u
|
||||||
#define P_DROP 4u
|
#define P_DROP 4u
|
||||||
#define P_EOF 5u
|
#define P_EOF 5u
|
||||||
#define P_USER_TERMINATED 6u
|
|
||||||
<% end %>
|
<% end %>
|
||||||
|
|
||||||
/* An invalid ID value. */
|
/* An invalid ID value. */
|
||||||
@ -55,14 +45,11 @@ const char * <%= @grammar.prefix %>token_names[] = {
|
|||||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length)
|
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length)
|
||||||
{
|
{
|
||||||
/* New default-initialized context structure. */
|
/* New default-initialized context structure. */
|
||||||
<%= @grammar.prefix %>context_t newcontext;
|
<%= @grammar.prefix %>context_t newcontext = {0};
|
||||||
memset(&newcontext, 0, sizeof(newcontext));
|
|
||||||
|
|
||||||
/* Lexer initialization. */
|
/* Lexer initialization. */
|
||||||
newcontext.input = input;
|
newcontext.input = input;
|
||||||
newcontext.input_length = input_length;
|
newcontext.input_length = input_length;
|
||||||
newcontext.text_position.row = 1u;
|
|
||||||
newcontext.text_position.col = 1u;
|
|
||||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||||
|
|
||||||
/* Copy to the user's context structure. */
|
/* Copy to the user's context structure. */
|
||||||
@ -229,10 +216,7 @@ typedef struct
|
|||||||
/** Number of bytes of input text used to match. */
|
/** Number of bytes of input text used to match. */
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
/** Input text position delta to end of token. */
|
/** Input text position delta. */
|
||||||
<%= @grammar.prefix %>position_t end_delta_position;
|
|
||||||
|
|
||||||
/** Input text position delta to next code point after token end. */
|
|
||||||
<%= @grammar.prefix %>position_t delta_position;
|
<%= @grammar.prefix %>position_t delta_position;
|
||||||
|
|
||||||
/** Accepting lexer state from the match. */
|
/** Accepting lexer state from the match. */
|
||||||
@ -326,12 +310,9 @@ static lexer_state_id_t check_lexer_transition(uint32_t current_state, uint32_t
|
|||||||
*
|
*
|
||||||
* @param context
|
* @param context
|
||||||
* Lexer/parser context structure.
|
* Lexer/parser context structure.
|
||||||
* @param[out] out_match_info
|
* @param[out] out_token_info
|
||||||
* The longest match information is stored here if the return value is
|
* The lexed token information is stored here if the return value is
|
||||||
* P_SUCCESS or P_DECODE_ERROR.
|
* P_SUCCESS.
|
||||||
* @param[out] out_unexpected_input_length
|
|
||||||
* The unexpected input length is stored here if the return value is
|
|
||||||
* P_UNEXPECTED_INPUT.
|
|
||||||
*
|
*
|
||||||
* @reval P_SUCCESS
|
* @reval P_SUCCESS
|
||||||
* A token was successfully lexed.
|
* A token was successfully lexed.
|
||||||
@ -345,10 +326,8 @@ static lexer_state_id_t check_lexer_transition(uint32_t current_state, uint32_t
|
|||||||
static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
||||||
lexer_match_info_t * out_match_info, size_t * out_unexpected_input_length)
|
lexer_match_info_t * out_match_info, size_t * out_unexpected_input_length)
|
||||||
{
|
{
|
||||||
lexer_match_info_t longest_match;
|
lexer_match_info_t longest_match = {0};
|
||||||
memset(&longest_match, 0, sizeof(longest_match));
|
lexer_match_info_t attempt_match = {0};
|
||||||
lexer_match_info_t attempt_match;
|
|
||||||
memset(&attempt_match, 0, sizeof(attempt_match));
|
|
||||||
*out_match_info = longest_match;
|
*out_match_info = longest_match;
|
||||||
uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
|
uint32_t current_state = lexer_mode_table[context->mode].state_table_offset;
|
||||||
for (;;)
|
for (;;)
|
||||||
@ -362,16 +341,14 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
switch (result)
|
switch (result)
|
||||||
{
|
{
|
||||||
case P_SUCCESS:
|
case P_SUCCESS:
|
||||||
{
|
|
||||||
lexer_state_id_t transition_state = check_lexer_transition(current_state, code_point);
|
lexer_state_id_t transition_state = check_lexer_transition(current_state, code_point);
|
||||||
if (transition_state != INVALID_LEXER_STATE_ID)
|
if (transition_state != INVALID_LEXER_STATE_ID)
|
||||||
{
|
{
|
||||||
attempt_match.length += code_point_length;
|
attempt_match.length += code_point_length;
|
||||||
attempt_match.end_delta_position = attempt_match.delta_position;
|
|
||||||
if (code_point == '\n')
|
if (code_point == '\n')
|
||||||
{
|
{
|
||||||
attempt_match.delta_position.row++;
|
attempt_match.delta_position.row++;
|
||||||
attempt_match.delta_position.col = 1u;
|
attempt_match.delta_position.col = 0u;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -394,7 +371,6 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
*out_unexpected_input_length = attempt_match.length + code_point_length;
|
*out_unexpected_input_length = attempt_match.length + code_point_length;
|
||||||
return P_UNEXPECTED_INPUT;
|
return P_UNEXPECTED_INPUT;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case P_EOF:
|
case P_EOF:
|
||||||
@ -416,6 +392,7 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
/* Valid EOF return. */
|
/* Valid EOF return. */
|
||||||
return P_EOF;
|
return P_EOF;
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case P_DECODE_ERROR:
|
case P_DECODE_ERROR:
|
||||||
/* If we see a decode error, we may be partially in the middle of
|
/* If we see a decode error, we may be partially in the middle of
|
||||||
@ -447,34 +424,25 @@ static size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
* Input text does not match any lexer pattern.
|
* Input text does not match any lexer pattern.
|
||||||
* @retval P_DROP
|
* @retval P_DROP
|
||||||
* A drop pattern was matched so the lexer should continue.
|
* A drop pattern was matched so the lexer should continue.
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User code has requested to terminate the lexer.
|
|
||||||
*/
|
*/
|
||||||
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||||
{
|
{
|
||||||
<%= @grammar.prefix %>token_info_t token_info;
|
<%= @grammar.prefix %>token_info_t token_info = {0};
|
||||||
memset(&token_info, 0, sizeof(token_info));
|
|
||||||
token_info.position = context->text_position;
|
token_info.position = context->text_position;
|
||||||
token_info.token = INVALID_TOKEN_ID;
|
token_info.token = INVALID_TOKEN_ID;
|
||||||
|
*out_token_info = token_info; // TODO: remove
|
||||||
lexer_match_info_t match_info;
|
lexer_match_info_t match_info;
|
||||||
size_t unexpected_input_length;
|
size_t unexpected_input_length;
|
||||||
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
|
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
|
||||||
switch (result)
|
switch (result)
|
||||||
{
|
{
|
||||||
case P_SUCCESS:
|
case P_SUCCESS:
|
||||||
{
|
|
||||||
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state->token;
|
<%= @grammar.prefix %>token_t token_to_accept = match_info.accepting_state->token;
|
||||||
if (match_info.accepting_state->code_id != INVALID_USER_CODE_ID)
|
if (match_info.accepting_state->code_id != INVALID_USER_CODE_ID)
|
||||||
{
|
{
|
||||||
uint8_t const * match = &context->input[context->input_index];
|
uint8_t const * match = &context->input[context->input_index];
|
||||||
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
|
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
|
||||||
match_info.accepting_state->code_id, match, match_info.length, &token_info);
|
match_info.accepting_state->code_id, match, match_info.length, &token_info);
|
||||||
/* A TERMINATE_TOKEN_ID return code from lexer_user_code() means
|
|
||||||
* that the user code is requesting to terminate the lexer. */
|
|
||||||
if (user_code_token == TERMINATE_TOKEN_ID)
|
|
||||||
{
|
|
||||||
return P_USER_TERMINATED;
|
|
||||||
}
|
|
||||||
/* An invalid token returned from lexer_user_code() means that the
|
/* An invalid token returned from lexer_user_code() means that the
|
||||||
* user code did not explicitly return a token. So only override
|
* user code did not explicitly return a token. So only override
|
||||||
* the token to return if the user code does explicitly return a
|
* the token to return if the user code does explicitly return a
|
||||||
@ -503,23 +471,11 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
|||||||
}
|
}
|
||||||
token_info.token = token_to_accept;
|
token_info.token = token_to_accept;
|
||||||
token_info.length = match_info.length;
|
token_info.length = match_info.length;
|
||||||
if (match_info.end_delta_position.row != 0u)
|
|
||||||
{
|
|
||||||
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
|
|
||||||
token_info.end_position.col = match_info.end_delta_position.col;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
token_info.end_position.row = token_info.position.row;
|
|
||||||
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
|
||||||
}
|
|
||||||
*out_token_info = token_info;
|
*out_token_info = token_info;
|
||||||
}
|
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
|
|
||||||
case P_EOF:
|
case P_EOF:
|
||||||
token_info.token = TOKEN___EOF;
|
token_info.token = TOKEN___EOF;
|
||||||
token_info.end_position = token_info.position;
|
|
||||||
*out_token_info = token_info;
|
*out_token_info = token_info;
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
|
|
||||||
@ -557,8 +513,6 @@ static size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @
|
|||||||
* The decoder encountered invalid text encoding.
|
* The decoder encountered invalid text encoding.
|
||||||
* @reval P_UNEXPECTED_INPUT
|
* @reval P_UNEXPECTED_INPUT
|
||||||
* Input text does not match any lexer pattern.
|
* Input text does not match any lexer pattern.
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User code has requested to terminate the lexer.
|
|
||||||
*/
|
*/
|
||||||
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||||
{
|
{
|
||||||
@ -576,9 +530,6 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
* Parser
|
* Parser
|
||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
|
|
||||||
/** Invalid position value. */
|
|
||||||
#define INVALID_POSITION (<%= @grammar.prefix %>position_t){0u, 0u}
|
|
||||||
|
|
||||||
/** Reduce ID type. */
|
/** Reduce ID type. */
|
||||||
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
|
typedef <%= get_type_for(@parser.reduce_table.size) %> reduce_id_t;
|
||||||
|
|
||||||
@ -638,25 +589,6 @@ typedef struct
|
|||||||
* reduce action.
|
* reduce action.
|
||||||
*/
|
*/
|
||||||
parser_state_id_t n_states;
|
parser_state_id_t n_states;
|
||||||
<% if @grammar.ast %>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Map of rule components to rule set child fields.
|
|
||||||
*/
|
|
||||||
uint16_t const * rule_set_node_field_index_map;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Number of rule set AST node fields.
|
|
||||||
*/
|
|
||||||
uint16_t rule_set_node_field_array_size;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Whether this rule was a generated optional rule that matched the
|
|
||||||
* optional target. In this case, propagate the matched target node up
|
|
||||||
* instead of making a new node for this rule.
|
|
||||||
*/
|
|
||||||
bool propagate_optional_target;
|
|
||||||
<% end %>
|
|
||||||
} reduce_t;
|
} reduce_t;
|
||||||
|
|
||||||
/** Parser state entry. */
|
/** Parser state entry. */
|
||||||
@ -687,55 +619,19 @@ typedef struct
|
|||||||
|
|
||||||
/** Parser value from this state. */
|
/** Parser value from this state. */
|
||||||
<%= @grammar.prefix %>value_t pvalue;
|
<%= @grammar.prefix %>value_t pvalue;
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** AST node. */
|
|
||||||
void * ast_node;
|
|
||||||
<% end %>
|
|
||||||
} state_value_t;
|
} state_value_t;
|
||||||
|
|
||||||
/** Common AST node structure. */
|
|
||||||
typedef struct
|
|
||||||
{
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
void * fields[];
|
|
||||||
} ASTNode;
|
|
||||||
|
|
||||||
/** Parser shift table. */
|
/** Parser shift table. */
|
||||||
static const shift_t parser_shift_table[] = {
|
static const shift_t parser_shift_table[] = {
|
||||||
<% @parser.shift_table.each do |shift| %>
|
<% @parser.shift_table.each do |shift| %>
|
||||||
{<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u},
|
{<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u},
|
||||||
<% end %>
|
<% end %>
|
||||||
};
|
};
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
<% @grammar.rules.each do |rule| %>
|
|
||||||
<% unless rule.flat_rule_set_node_field_index_map? %>
|
|
||||||
const uint16_t r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map[<%= rule.rule_set_node_field_index_map.size %>] = {<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>};
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Parser reduce table. */
|
/** Parser reduce table. */
|
||||||
static const reduce_t parser_reduce_table[] = {
|
static const reduce_t parser_reduce_table[] = {
|
||||||
<% @parser.reduce_table.each do |reduce| %>
|
<% @parser.reduce_table.each do |reduce| %>
|
||||||
{
|
{<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u},
|
||||||
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
|
||||||
<%= reduce[:rule_id] %>u, /* Rule ID */
|
|
||||||
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= reduce[:n_states] %>u, /* Number of states */
|
|
||||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
|
||||||
NULL, /* No rule set node field index map (flat map) */
|
|
||||||
<% else %>
|
|
||||||
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
|
||||||
<% end %>
|
|
||||||
<%= reduce[:rule].rule_set.ast_fields.size %>, /* Number of AST fields */
|
|
||||||
<%= reduce[:propagate_optional_target] %>}, /* Propagate optional target? */
|
|
||||||
<% else %>
|
|
||||||
<%= reduce[:n_states] %>u},
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
<% end %>
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -839,19 +735,17 @@ static void state_values_stack_free(state_values_stack_t * stack)
|
|||||||
free(stack->entries);
|
free(stack->entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
<% unless @grammar.ast %>
|
|
||||||
/**
|
/**
|
||||||
* Execute user code associated with a parser rule.
|
* Execute user code associated with a parser rule.
|
||||||
*
|
*
|
||||||
* @param rule The ID of the rule.
|
* @param rule The ID of the rule.
|
||||||
*
|
*
|
||||||
* @retval P_SUCCESS
|
* @return Parse value.
|
||||||
* Continue parsing.
|
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User requested to terminate parsing.
|
|
||||||
*/
|
*/
|
||||||
static size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states, <%= @grammar.prefix %>context_t * context)
|
static <%= @grammar.prefix %>value_t parser_user_code(uint32_t rule, state_values_stack_t * statevalues, uint32_t n_states)
|
||||||
{
|
{
|
||||||
|
<%= @grammar.prefix %>value_t _pvalue = {0};
|
||||||
|
|
||||||
switch (rule)
|
switch (rule)
|
||||||
{
|
{
|
||||||
<% @grammar.rules.each do |rule| %>
|
<% @grammar.rules.each do |rule| %>
|
||||||
@ -864,9 +758,8 @@ static size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint32_t
|
|||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return P_SUCCESS;
|
return _pvalue;
|
||||||
}
|
}
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the parser should shift to a new state.
|
* Check if the parser should shift to a new state.
|
||||||
@ -928,7 +821,7 @@ static size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token)
|
|||||||
* can be accessed with <%= @grammar.prefix %>result().
|
* can be accessed with <%= @grammar.prefix %>result().
|
||||||
* @retval P_UNEXPECTED_TOKEN
|
* @retval P_UNEXPECTED_TOKEN
|
||||||
* An unexpected token was encountered that does not match any grammar rule.
|
* An unexpected token was encountered that does not match any grammar rule.
|
||||||
* The function p_token(&context) can be used to get the unexpected token.
|
* The value context->token holds the unexpected token.
|
||||||
* @reval P_DECODE_ERROR
|
* @reval P_DECODE_ERROR
|
||||||
* The decoder encountered invalid text encoding.
|
* The decoder encountered invalid text encoding.
|
||||||
* @reval P_UNEXPECTED_INPUT
|
* @reval P_UNEXPECTED_INPUT
|
||||||
@ -940,11 +833,7 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
||||||
state_values_stack_t statevalues;
|
state_values_stack_t statevalues;
|
||||||
size_t reduced_rule_set = INVALID_ID;
|
size_t reduced_rule_set = INVALID_ID;
|
||||||
<% if @grammar.ast %>
|
|
||||||
void * reduced_parser_node;
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t reduced_parser_value;
|
<%= @grammar.prefix %>value_t reduced_parser_value;
|
||||||
<% end %>
|
|
||||||
state_values_stack_init(&statevalues);
|
state_values_stack_init(&statevalues);
|
||||||
state_values_stack_push(&statevalues);
|
state_values_stack_push(&statevalues);
|
||||||
size_t result;
|
size_t result;
|
||||||
@ -971,11 +860,7 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
||||||
{
|
{
|
||||||
/* Successful parse. */
|
/* Successful parse. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
context->parse_result = (<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)state_values_stack_index(&statevalues, -1)->ast_node;
|
|
||||||
<% else %>
|
|
||||||
context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue;
|
context->parse_result = state_values_stack_index(&statevalues, -1)->pvalue;
|
||||||
<% end %>
|
|
||||||
result = P_SUCCESS;
|
result = P_SUCCESS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -988,29 +873,15 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
if (reduced_rule_set == INVALID_ID)
|
if (reduced_rule_set == INVALID_ID)
|
||||||
{
|
{
|
||||||
/* We shifted a token, mark it consumed. */
|
/* We shifted a token, mark it consumed. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = malloc(sizeof(<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>));
|
|
||||||
token_ast_node->position = token_info.position;
|
|
||||||
token_ast_node->end_position = token_info.end_position;
|
|
||||||
token_ast_node->token = token;
|
|
||||||
token_ast_node->pvalue = token_info.pvalue;
|
|
||||||
state_values_stack_index(&statevalues, -1)->ast_node = token_ast_node;
|
|
||||||
<% else %>
|
|
||||||
state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue;
|
|
||||||
<% end %>
|
|
||||||
token = INVALID_TOKEN_ID;
|
token = INVALID_TOKEN_ID;
|
||||||
|
state_values_stack_index(&statevalues, -1)->pvalue = token_info.pvalue;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* We shifted a RuleSet. */
|
/* We shifted a RuleSet. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
state_values_stack_index(&statevalues, -1)->ast_node = reduced_parser_node;
|
|
||||||
<% else %>
|
|
||||||
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
|
state_values_stack_index(&statevalues, -1)->pvalue = reduced_parser_value;
|
||||||
<%= @grammar.prefix %>value_t new_parse_result;
|
<%= @grammar.prefix %>value_t new_parse_result = {0};
|
||||||
memset(&new_parse_result, 0, sizeof(new_parse_result));
|
|
||||||
reduced_parser_value = new_parse_result;
|
reduced_parser_value = new_parse_result;
|
||||||
<% end %>
|
|
||||||
reduced_rule_set = INVALID_ID;
|
reduced_rule_set = INVALID_ID;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@ -1020,64 +891,7 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
if (reduce_index != INVALID_ID)
|
if (reduce_index != INVALID_ID)
|
||||||
{
|
{
|
||||||
/* We have something to reduce. */
|
/* We have something to reduce. */
|
||||||
<% if @grammar.ast %>
|
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states);
|
||||||
if (parser_reduce_table[reduce_index].propagate_optional_target)
|
|
||||||
{
|
|
||||||
reduced_parser_node = state_values_stack_index(&statevalues, -1)->ast_node;
|
|
||||||
}
|
|
||||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
|
||||||
{
|
|
||||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
|
||||||
ASTNode * node = (ASTNode *)malloc(sizeof(ASTNode) + n_fields * sizeof(void *));
|
|
||||||
node->position = INVALID_POSITION;
|
|
||||||
node->end_position = INVALID_POSITION;
|
|
||||||
for (size_t i = 0; i < n_fields; i++)
|
|
||||||
{
|
|
||||||
node->fields[i] = NULL;
|
|
||||||
}
|
|
||||||
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map == NULL)
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
|
|
||||||
{
|
|
||||||
node->fields[i] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < parser_reduce_table[reduce_index].n_states; i++)
|
|
||||||
{
|
|
||||||
node->fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = state_values_stack_index(&statevalues, -(int)parser_reduce_table[reduce_index].n_states + (int)i)->ast_node;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bool position_found = false;
|
|
||||||
for (size_t i = 0; i < n_fields; i++)
|
|
||||||
{
|
|
||||||
ASTNode * child = (ASTNode *)node->fields[i];
|
|
||||||
if ((child != NULL) && <%= @grammar.prefix %>position_valid(child->position))
|
|
||||||
{
|
|
||||||
if (!position_found)
|
|
||||||
{
|
|
||||||
node->position = child->position;
|
|
||||||
position_found = true;
|
|
||||||
}
|
|
||||||
node->end_position = child->end_position;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reduced_parser_node = node;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
reduced_parser_node = NULL;
|
|
||||||
}
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t reduced_parser_value2;
|
|
||||||
memset(&reduced_parser_value2, 0, sizeof(reduced_parser_value2));
|
|
||||||
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, &statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
|
||||||
{
|
|
||||||
return P_USER_TERMINATED;
|
|
||||||
}
|
|
||||||
reduced_parser_value = reduced_parser_value2;
|
|
||||||
<% end %>
|
|
||||||
reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
|
reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
|
||||||
state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states);
|
state_values_stack_pop(&statevalues, parser_reduce_table[reduce_index].n_states);
|
||||||
continue;
|
continue;
|
||||||
@ -1105,17 +919,9 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
*
|
*
|
||||||
* @return Parse result value.
|
* @return Parse result value.
|
||||||
*/
|
*/
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
|
||||||
<% else %>
|
|
||||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||||
<% end %>
|
|
||||||
{
|
{
|
||||||
<% if @grammar.ast %>
|
|
||||||
return context->parse_result;
|
|
||||||
<% else %>
|
|
||||||
return context->parse_result.v_<%= start_rule_type[0] %>;
|
return context->parse_result.v_<%= start_rule_type[0] %>;
|
||||||
<% end %>
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1130,26 +936,3 @@ size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context)
|
|||||||
{
|
{
|
||||||
return context->text_position;
|
return context->text_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the user terminate code.
|
|
||||||
*
|
|
||||||
* @param context
|
|
||||||
* Lexer/parser context structure.
|
|
||||||
*
|
|
||||||
* @return User terminate code.
|
|
||||||
*/
|
|
||||||
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context)
|
|
||||||
{
|
|
||||||
return context->user_terminate_code;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the parse token.
|
|
||||||
*
|
|
||||||
* @return Parse token.
|
|
||||||
*/
|
|
||||||
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context)
|
|
||||||
{
|
|
||||||
return context->token;
|
|
||||||
}
|
|
||||||
|
|||||||
@ -8,14 +8,13 @@
|
|||||||
module <%= @grammar.modulename %>;
|
module <%= @grammar.modulename %>;
|
||||||
<% end %>
|
<% end %>
|
||||||
|
|
||||||
import core.memory;
|
|
||||||
import core.stdc.stdlib : malloc;
|
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
* User code blocks
|
* User code blocks
|
||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
|
|
||||||
<%= @grammar.code_blocks.fetch("", "") %>
|
<% @grammar.code_blocks.each do |code| %>
|
||||||
|
<%= code %>
|
||||||
|
<% end %>
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
* Public types
|
* Public types
|
||||||
@ -30,11 +29,10 @@ public enum : size_t
|
|||||||
<%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN,
|
<%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN,
|
||||||
<%= @grammar.prefix.upcase %>DROP,
|
<%= @grammar.prefix.upcase %>DROP,
|
||||||
<%= @grammar.prefix.upcase %>EOF,
|
<%= @grammar.prefix.upcase %>EOF,
|
||||||
<%= @grammar.prefix.upcase %>USER_TERMINATED,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Token type. */
|
/** Token type. */
|
||||||
public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.terminate_token_id) %>;
|
public alias <%= @grammar.prefix %>token_t = <%= get_type_for(@grammar.invalid_token_id) %>;
|
||||||
|
|
||||||
/** Token IDs. */
|
/** Token IDs. */
|
||||||
public enum : <%= @grammar.prefix %>token_t
|
public enum : <%= @grammar.prefix %>token_t
|
||||||
@ -46,14 +44,21 @@ public enum : <%= @grammar.prefix %>token_t
|
|||||||
<% end %>
|
<% end %>
|
||||||
<% end %>
|
<% end %>
|
||||||
INVALID_TOKEN_ID = <%= @grammar.invalid_token_id %>,
|
INVALID_TOKEN_ID = <%= @grammar.invalid_token_id %>,
|
||||||
TERMINATE_TOKEN_ID = <%= @grammar.terminate_token_id %>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Code point type. */
|
/** Code point type. */
|
||||||
public alias <%= @grammar.prefix %>code_point_t = uint;
|
public alias <%= @grammar.prefix %>code_point_t = uint;
|
||||||
|
|
||||||
|
/** Parser values type(s). */
|
||||||
|
public union <%= @grammar.prefix %>value_t
|
||||||
|
{
|
||||||
|
<% @grammar.ptypes.each do |name, typestring| %>
|
||||||
|
<%= typestring %> v_<%= name %>;
|
||||||
|
<% end %>
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A structure to keep track of input position.
|
* A structure to keep track of parser position.
|
||||||
*
|
*
|
||||||
* This is useful for reporting errors, etc...
|
* This is useful for reporting errors, etc...
|
||||||
*/
|
*/
|
||||||
@ -64,79 +69,14 @@ public struct <%= @grammar.prefix %>position_t
|
|||||||
|
|
||||||
/** Input text column (0-based). */
|
/** Input text column (0-based). */
|
||||||
uint col;
|
uint col;
|
||||||
|
|
||||||
/** Invalid position value. */
|
|
||||||
enum INVALID = <%= @grammar.prefix %>position_t(0u, 0u);
|
|
||||||
|
|
||||||
/** Return whether the position is valid. */
|
|
||||||
public @property bool valid()
|
|
||||||
{
|
|
||||||
return row != 0u;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** Parser values type. */
|
|
||||||
public alias <%= @grammar.prefix %>value_t = <%= @grammar.ptype %>;
|
|
||||||
<% else %>
|
|
||||||
/** Parser values type(s). */
|
|
||||||
public union <%= @grammar.prefix %>value_t
|
|
||||||
{
|
|
||||||
<% @grammar.ptypes.each do |name, typestring| %>
|
|
||||||
<%= typestring %> v_<%= name %>;
|
|
||||||
<% end %>
|
|
||||||
}
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** Common AST node structure. */
|
|
||||||
private struct ASTNode
|
|
||||||
{
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
void *[0] fields;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** AST node types. @{ */
|
|
||||||
public struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
|
||||||
{
|
|
||||||
/* ASTNode fields must be present in the same order here. */
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
<%= @grammar.prefix %>token_t token;
|
|
||||||
<%= @grammar.prefix %>value_t pvalue;
|
|
||||||
}
|
|
||||||
|
|
||||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
|
||||||
<% next if name.start_with?("$") %>
|
|
||||||
<% next if rule_set.optional? %>
|
|
||||||
public struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
|
||||||
{
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
<% rule_set.ast_fields.each do |fields| %>
|
|
||||||
union
|
|
||||||
{
|
|
||||||
<% fields.each do |field_name, type| %>
|
|
||||||
<%= type %> * <%= field_name %>;
|
|
||||||
<% end %>
|
|
||||||
}
|
|
||||||
<% end %>
|
|
||||||
}
|
|
||||||
|
|
||||||
<% end %>
|
|
||||||
/** @} */
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Lexed token information. */
|
/** Lexed token information. */
|
||||||
public struct <%= @grammar.prefix %>token_info_t
|
public struct <%= @grammar.prefix %>token_info_t
|
||||||
{
|
{
|
||||||
/** Text position of first code point in token. */
|
/** Text position where the token was found. */
|
||||||
<%= @grammar.prefix %>position_t position;
|
<%= @grammar.prefix %>position_t position;
|
||||||
|
|
||||||
/** Text position of last code point in token. */
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
|
|
||||||
/** Number of input bytes used by the token. */
|
/** Number of input bytes used by the token. */
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
@ -172,17 +112,10 @@ public struct <%= @grammar.prefix %>context_t
|
|||||||
/* Parser context data. */
|
/* Parser context data. */
|
||||||
|
|
||||||
/** Parse result value. */
|
/** Parse result value. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t parse_result;
|
<%= @grammar.prefix %>value_t parse_result;
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Unexpected token received. */
|
/** Unexpected token received. */
|
||||||
<%= @grammar.prefix %>token_t token;
|
<%= @grammar.prefix %>token_t token;
|
||||||
|
|
||||||
/** User terminate code. */
|
|
||||||
size_t user_terminate_code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
@ -210,7 +143,6 @@ private enum : size_t
|
|||||||
P_UNEXPECTED_TOKEN,
|
P_UNEXPECTED_TOKEN,
|
||||||
P_DROP,
|
P_DROP,
|
||||||
P_EOF,
|
P_EOF,
|
||||||
P_USER_TERMINATED,
|
|
||||||
}
|
}
|
||||||
<% end %>
|
<% end %>
|
||||||
|
|
||||||
@ -236,8 +168,6 @@ public void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t *
|
|||||||
|
|
||||||
/* Lexer initialization. */
|
/* Lexer initialization. */
|
||||||
newcontext.input = input;
|
newcontext.input = input;
|
||||||
newcontext.text_position.row = 1u;
|
|
||||||
newcontext.text_position.col = 1u;
|
|
||||||
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
newcontext.mode = <%= @lexer.mode_id("default") %>;
|
||||||
|
|
||||||
/* Copy to the user's context structure. */
|
/* Copy to the user's context structure. */
|
||||||
@ -402,10 +332,7 @@ private struct lexer_match_info_t
|
|||||||
/** Number of bytes of input text used to match. */
|
/** Number of bytes of input text used to match. */
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
/** Input text position delta to end of token. */
|
/** Input text position delta. */
|
||||||
<%= @grammar.prefix %>position_t end_delta_position;
|
|
||||||
|
|
||||||
/** Input text position delta to next code point after token end. */
|
|
||||||
<%= @grammar.prefix %>position_t delta_position;
|
<%= @grammar.prefix %>position_t delta_position;
|
||||||
|
|
||||||
/** Accepting lexer state from the match. */
|
/** Accepting lexer state from the match. */
|
||||||
@ -497,12 +424,9 @@ private lexer_state_id_t check_lexer_transition(uint current_state, uint code_po
|
|||||||
*
|
*
|
||||||
* @param context
|
* @param context
|
||||||
* Lexer/parser context structure.
|
* Lexer/parser context structure.
|
||||||
* @param[out] out_match_info
|
* @param[out] out_token_info
|
||||||
* The longest match information is stored here if the return value is
|
* The lexed token information is stored here if the return value is
|
||||||
* P_SUCCESS or P_DECODE_ERROR.
|
* P_SUCCESS.
|
||||||
* @param[out] out_unexpected_input_length
|
|
||||||
* The unexpected input length is stored here if the return value is
|
|
||||||
* P_UNEXPECTED_INPUT.
|
|
||||||
*
|
*
|
||||||
* @reval P_SUCCESS
|
* @reval P_SUCCESS
|
||||||
* A token was successfully lexed.
|
* A token was successfully lexed.
|
||||||
@ -533,11 +457,10 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
if (transition_state != INVALID_LEXER_STATE_ID)
|
if (transition_state != INVALID_LEXER_STATE_ID)
|
||||||
{
|
{
|
||||||
attempt_match.length += code_point_length;
|
attempt_match.length += code_point_length;
|
||||||
attempt_match.end_delta_position = attempt_match.delta_position;
|
|
||||||
if (code_point == '\n')
|
if (code_point == '\n')
|
||||||
{
|
{
|
||||||
attempt_match.delta_position.row++;
|
attempt_match.delta_position.row++;
|
||||||
attempt_match.delta_position.col = 1u;
|
attempt_match.delta_position.col = 0u;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -581,6 +504,7 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
/* Valid EOF return. */
|
/* Valid EOF return. */
|
||||||
return P_EOF;
|
return P_EOF;
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case P_DECODE_ERROR:
|
case P_DECODE_ERROR:
|
||||||
/* If we see a decode error, we may be partially in the middle of
|
/* If we see a decode error, we may be partially in the middle of
|
||||||
@ -612,14 +536,13 @@ private size_t find_longest_match(<%= @grammar.prefix %>context_t * context,
|
|||||||
* Input text does not match any lexer pattern.
|
* Input text does not match any lexer pattern.
|
||||||
* @retval P_DROP
|
* @retval P_DROP
|
||||||
* A drop pattern was matched so the lexer should continue.
|
* A drop pattern was matched so the lexer should continue.
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User code has requested to terminate the lexer.
|
|
||||||
*/
|
*/
|
||||||
private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||||
{
|
{
|
||||||
<%= @grammar.prefix %>token_info_t token_info;
|
<%= @grammar.prefix %>token_info_t token_info;
|
||||||
token_info.position = context.text_position;
|
token_info.position = context.text_position;
|
||||||
token_info.token = INVALID_TOKEN_ID;
|
token_info.token = INVALID_TOKEN_ID;
|
||||||
|
*out_token_info = token_info; // TODO: remove
|
||||||
lexer_match_info_t match_info;
|
lexer_match_info_t match_info;
|
||||||
size_t unexpected_input_length;
|
size_t unexpected_input_length;
|
||||||
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
|
size_t result = find_longest_match(context, &match_info, &unexpected_input_length);
|
||||||
@ -632,12 +555,6 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
string match = context.input[context.input_index..(context.input_index + match_info.length)];
|
string match = context.input[context.input_index..(context.input_index + match_info.length)];
|
||||||
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
|
<%= @grammar.prefix %>token_t user_code_token = lexer_user_code(context,
|
||||||
match_info.accepting_state.code_id, match, &token_info);
|
match_info.accepting_state.code_id, match, &token_info);
|
||||||
/* A TERMINATE_TOKEN_ID return code from lexer_user_code() means
|
|
||||||
* that the user code is requesting to terminate the lexer. */
|
|
||||||
if (user_code_token == TERMINATE_TOKEN_ID)
|
|
||||||
{
|
|
||||||
return P_USER_TERMINATED;
|
|
||||||
}
|
|
||||||
/* An invalid token returned from lexer_user_code() means that the
|
/* An invalid token returned from lexer_user_code() means that the
|
||||||
* user code did not explicitly return a token. So only override
|
* user code did not explicitly return a token. So only override
|
||||||
* the token to return if the user code does explicitly return a
|
* the token to return if the user code does explicitly return a
|
||||||
@ -666,22 +583,11 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
}
|
}
|
||||||
token_info.token = token_to_accept;
|
token_info.token = token_to_accept;
|
||||||
token_info.length = match_info.length;
|
token_info.length = match_info.length;
|
||||||
if (match_info.end_delta_position.row != 0u)
|
|
||||||
{
|
|
||||||
token_info.end_position.row = token_info.position.row + match_info.end_delta_position.row;
|
|
||||||
token_info.end_position.col = match_info.end_delta_position.col;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
token_info.end_position.row = token_info.position.row;
|
|
||||||
token_info.end_position.col = token_info.position.col + match_info.end_delta_position.col;
|
|
||||||
}
|
|
||||||
*out_token_info = token_info;
|
*out_token_info = token_info;
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
|
|
||||||
case P_EOF:
|
case P_EOF:
|
||||||
token_info.token = TOKEN___EOF;
|
token_info.token = TOKEN___EOF;
|
||||||
token_info.end_position = token_info.position;
|
|
||||||
*out_token_info = token_info;
|
*out_token_info = token_info;
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
|
|
||||||
@ -719,8 +625,6 @@ private size_t attempt_lex_token(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
* The decoder encountered invalid text encoding.
|
* The decoder encountered invalid text encoding.
|
||||||
* @reval P_UNEXPECTED_INPUT
|
* @reval P_UNEXPECTED_INPUT
|
||||||
* Input text does not match any lexer pattern.
|
* Input text does not match any lexer pattern.
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User code has requested to terminate the lexer.
|
|
||||||
*/
|
*/
|
||||||
public size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
public size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info)
|
||||||
{
|
{
|
||||||
@ -797,25 +701,6 @@ private struct reduce_t
|
|||||||
* reduce action.
|
* reduce action.
|
||||||
*/
|
*/
|
||||||
parser_state_id_t n_states;
|
parser_state_id_t n_states;
|
||||||
<% if @grammar.ast %>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Map of rule components to rule set child fields.
|
|
||||||
*/
|
|
||||||
immutable(ushort) * rule_set_node_field_index_map;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Number of rule set AST node fields.
|
|
||||||
*/
|
|
||||||
ushort rule_set_node_field_array_size;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Whether this rule was a generated optional rule that matched the
|
|
||||||
* optional target. In this case, propagate the matched target node up
|
|
||||||
* instead of making a new node for this rule.
|
|
||||||
*/
|
|
||||||
bool propagate_optional_target;
|
|
||||||
<% end %>
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Parser state entry. */
|
/** Parser state entry. */
|
||||||
@ -847,11 +732,6 @@ private struct state_value_t
|
|||||||
/** Parser value from this state. */
|
/** Parser value from this state. */
|
||||||
<%= @grammar.prefix %>value_t pvalue;
|
<%= @grammar.prefix %>value_t pvalue;
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** AST node. */
|
|
||||||
void * ast_node;
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
this(size_t state_id)
|
this(size_t state_id)
|
||||||
{
|
{
|
||||||
this.state_id = state_id;
|
this.state_id = state_id;
|
||||||
@ -861,37 +741,14 @@ private struct state_value_t
|
|||||||
/** Parser shift table. */
|
/** Parser shift table. */
|
||||||
private immutable shift_t[] parser_shift_table = [
|
private immutable shift_t[] parser_shift_table = [
|
||||||
<% @parser.shift_table.each do |shift| %>
|
<% @parser.shift_table.each do |shift| %>
|
||||||
shift_t(<%= shift[:symbol].id %>u, <%= shift[:state_id] %>u),
|
shift_t(<%= shift[:symbol_id] %>u, <%= shift[:state_id] %>u),
|
||||||
<% end %>
|
<% end %>
|
||||||
];
|
];
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
<% @grammar.rules.each do |rule| %>
|
|
||||||
<% unless rule.flat_rule_set_node_field_index_map? %>
|
|
||||||
immutable ushort[<%= rule.rule_set_node_field_index_map.size %>] r_<%= rule.name.gsub("$", "_") %><%= rule.id %>_node_field_index_map = [<%= rule.rule_set_node_field_index_map.map {|v| v.to_s}.join(", ") %>];
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Parser reduce table. */
|
/** Parser reduce table. */
|
||||||
private immutable reduce_t[] parser_reduce_table = [
|
private immutable reduce_t[] parser_reduce_table = [
|
||||||
<% @parser.reduce_table.each do |reduce| %>
|
<% @parser.reduce_table.each do |reduce| %>
|
||||||
reduce_t(
|
reduce_t(<%= reduce[:token_id] %>u, <%= reduce[:rule_id] %>u, <%= reduce[:rule_set_id] %>u, <%= reduce[:n_states] %>u),
|
||||||
<%= reduce[:token_id] %>u, /* Token: <%= reduce[:token] ? reduce[:token].name : "(any)" %> */
|
|
||||||
<%= reduce[:rule_id] %>u, /* Rule ID */
|
|
||||||
<%= reduce[:rule_set_id] %>u, /* Rule set ID (<%= reduce[:rule].rule_set.name %>) */
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= reduce[:n_states] %>u, /* Number of states */
|
|
||||||
<% if reduce[:rule].flat_rule_set_node_field_index_map? %>
|
|
||||||
null, /* No rule set node field index map (flat map) */
|
|
||||||
<% else %>
|
|
||||||
&r_<%= reduce[:rule].name.gsub("$", "_") %><%= reduce[:rule].id %>_node_field_index_map[0], /* Rule set node field index map */
|
|
||||||
<% end %>
|
|
||||||
<%= reduce[:rule].rule_set.ast_fields.size %>, /* Number of AST fields */
|
|
||||||
<%= reduce[:propagate_optional_target] %>), /* Propagate optional target? */
|
|
||||||
<% else %>
|
|
||||||
<%= reduce[:n_states] %>u), /* Number of states */
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
<% end %>
|
||||||
];
|
];
|
||||||
|
|
||||||
@ -902,19 +759,17 @@ private immutable parser_state_t[] parser_state_table = [
|
|||||||
<% end %>
|
<% end %>
|
||||||
];
|
];
|
||||||
|
|
||||||
<% unless @grammar.ast %>
|
|
||||||
/**
|
/**
|
||||||
* Execute user code associated with a parser rule.
|
* Execute user code associated with a parser rule.
|
||||||
*
|
*
|
||||||
* @param rule The ID of the rule.
|
* @param rule The ID of the rule.
|
||||||
*
|
*
|
||||||
* @retval P_SUCCESS
|
* @return Parse value.
|
||||||
* Continue parsing.
|
|
||||||
* @retval P_USER_TERMINATED
|
|
||||||
* User requested to terminate parsing.
|
|
||||||
*/
|
*/
|
||||||
private size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint rule, state_value_t[] statevalues, uint n_states, <%= @grammar.prefix %>context_t * context)
|
private <%= @grammar.prefix %>value_t parser_user_code(uint rule, state_value_t[] statevalues, uint n_states)
|
||||||
{
|
{
|
||||||
|
<%= @grammar.prefix %>value_t _pvalue;
|
||||||
|
|
||||||
switch (rule)
|
switch (rule)
|
||||||
{
|
{
|
||||||
<% @grammar.rules.each do |rule| %>
|
<% @grammar.rules.each do |rule| %>
|
||||||
@ -927,9 +782,8 @@ private size_t parser_user_code(<%= @grammar.prefix %>value_t * _pvalue, uint ru
|
|||||||
default: break;
|
default: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return P_SUCCESS;
|
return _pvalue;
|
||||||
}
|
}
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the parser should shift to a new state.
|
* Check if the parser should shift to a new state.
|
||||||
@ -991,7 +845,7 @@ private size_t check_reduce(size_t state_id, <%= @grammar.prefix %>token_t token
|
|||||||
* can be accessed with <%= @grammar.prefix %>result().
|
* can be accessed with <%= @grammar.prefix %>result().
|
||||||
* @retval P_UNEXPECTED_TOKEN
|
* @retval P_UNEXPECTED_TOKEN
|
||||||
* An unexpected token was encountered that does not match any grammar rule.
|
* An unexpected token was encountered that does not match any grammar rule.
|
||||||
* The function p_token(&context) can be used to get the unexpected token.
|
* The value context.token holds the unexpected token.
|
||||||
* @reval P_DECODE_ERROR
|
* @reval P_DECODE_ERROR
|
||||||
* The decoder encountered invalid text encoding.
|
* The decoder encountered invalid text encoding.
|
||||||
* @reval P_UNEXPECTED_INPUT
|
* @reval P_UNEXPECTED_INPUT
|
||||||
@ -1003,11 +857,7 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
<%= @grammar.prefix %>token_t token = INVALID_TOKEN_ID;
|
||||||
state_value_t[] statevalues = new state_value_t[](1);
|
state_value_t[] statevalues = new state_value_t[](1);
|
||||||
size_t reduced_rule_set = INVALID_ID;
|
size_t reduced_rule_set = INVALID_ID;
|
||||||
<% if @grammar.ast %>
|
|
||||||
void * reduced_parser_node;
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t reduced_parser_value;
|
<%= @grammar.prefix %>value_t reduced_parser_value;
|
||||||
<% end %>
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
if (token == INVALID_TOKEN_ID)
|
if (token == INVALID_TOKEN_ID)
|
||||||
@ -1030,11 +880,7 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
if ((shift_state != INVALID_ID) && (token == TOKEN___EOF))
|
||||||
{
|
{
|
||||||
/* Successful parse. */
|
/* Successful parse. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
context.parse_result = cast(<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> *)statevalues[$-1].ast_node;
|
|
||||||
<% else %>
|
|
||||||
context.parse_result = statevalues[$-1].pvalue;
|
context.parse_result = statevalues[$-1].pvalue;
|
||||||
<% end %>
|
|
||||||
return P_SUCCESS;
|
return P_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1045,24 +891,15 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
if (reduced_rule_set == INVALID_ID)
|
if (reduced_rule_set == INVALID_ID)
|
||||||
{
|
{
|
||||||
/* We shifted a token, mark it consumed. */
|
/* We shifted a token, mark it consumed. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %> * token_ast_node = new <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>(token_info.position, token_info.end_position, token, token_info.pvalue);
|
|
||||||
statevalues[$-1].ast_node = token_ast_node;
|
|
||||||
<% else %>
|
|
||||||
statevalues[$-1].pvalue = token_info.pvalue;
|
|
||||||
<% end %>
|
|
||||||
token = INVALID_TOKEN_ID;
|
token = INVALID_TOKEN_ID;
|
||||||
|
statevalues[$-1].pvalue = token_info.pvalue;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* We shifted a RuleSet. */
|
/* We shifted a RuleSet. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
statevalues[$-1].ast_node = reduced_parser_node;
|
|
||||||
<% else %>
|
|
||||||
statevalues[$-1].pvalue = reduced_parser_value;
|
statevalues[$-1].pvalue = reduced_parser_value;
|
||||||
<%= @grammar.prefix %>value_t new_parse_result;
|
<%= @grammar.prefix %>value_t new_parse_result;
|
||||||
reduced_parser_value = new_parse_result;
|
reduced_parser_value = new_parse_result;
|
||||||
<% end %>
|
|
||||||
reduced_rule_set = INVALID_ID;
|
reduced_rule_set = INVALID_ID;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@ -1072,65 +909,7 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
if (reduce_index != INVALID_ID)
|
if (reduce_index != INVALID_ID)
|
||||||
{
|
{
|
||||||
/* We have something to reduce. */
|
/* We have something to reduce. */
|
||||||
<% if @grammar.ast %>
|
reduced_parser_value = parser_user_code(parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states);
|
||||||
if (parser_reduce_table[reduce_index].propagate_optional_target)
|
|
||||||
{
|
|
||||||
reduced_parser_node = statevalues[$ - 1].ast_node;
|
|
||||||
}
|
|
||||||
else if (parser_reduce_table[reduce_index].n_states > 0)
|
|
||||||
{
|
|
||||||
size_t n_fields = parser_reduce_table[reduce_index].rule_set_node_field_array_size;
|
|
||||||
size_t node_size = ASTNode.sizeof + n_fields * (void *).sizeof;
|
|
||||||
ASTNode * node = cast(ASTNode *)malloc(node_size);
|
|
||||||
GC.addRange(node, node_size);
|
|
||||||
node.position = <%= @grammar.prefix %>position_t.INVALID;
|
|
||||||
node.end_position = <%= @grammar.prefix %>position_t.INVALID;
|
|
||||||
foreach (i; 0..n_fields)
|
|
||||||
{
|
|
||||||
node.fields[i] = null;
|
|
||||||
}
|
|
||||||
if (parser_reduce_table[reduce_index].rule_set_node_field_index_map is null)
|
|
||||||
{
|
|
||||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
|
||||||
{
|
|
||||||
node.fields[i] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
foreach (i; 0..parser_reduce_table[reduce_index].n_states)
|
|
||||||
{
|
|
||||||
node.fields[parser_reduce_table[reduce_index].rule_set_node_field_index_map[i]] = statevalues[$ - parser_reduce_table[reduce_index].n_states + i].ast_node;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bool position_found = false;
|
|
||||||
foreach (i; 0..n_fields)
|
|
||||||
{
|
|
||||||
ASTNode * child = cast(ASTNode *)node.fields[i];
|
|
||||||
if (child && child.position.valid)
|
|
||||||
{
|
|
||||||
if (!position_found)
|
|
||||||
{
|
|
||||||
node.position = child.position;
|
|
||||||
position_found = true;
|
|
||||||
}
|
|
||||||
node.end_position = child.end_position;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reduced_parser_node = node;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
reduced_parser_node = null;
|
|
||||||
}
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t reduced_parser_value2;
|
|
||||||
if (parser_user_code(&reduced_parser_value2, parser_reduce_table[reduce_index].rule, statevalues, parser_reduce_table[reduce_index].n_states, context) == P_USER_TERMINATED)
|
|
||||||
{
|
|
||||||
return P_USER_TERMINATED;
|
|
||||||
}
|
|
||||||
reduced_parser_value = reduced_parser_value2;
|
|
||||||
<% end %>
|
|
||||||
reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
|
reduced_rule_set = parser_reduce_table[reduce_index].rule_set;
|
||||||
statevalues.length -= parser_reduce_table[reduce_index].n_states;
|
statevalues.length -= parser_reduce_table[reduce_index].n_states;
|
||||||
continue;
|
continue;
|
||||||
@ -1155,17 +934,9 @@ public size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * cont
|
|||||||
*
|
*
|
||||||
* @return Parse result value.
|
* @return Parse result value.
|
||||||
*/
|
*/
|
||||||
<% if @grammar.ast %>
|
|
||||||
public <%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
|
||||||
<% else %>
|
|
||||||
public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
public <%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context)
|
||||||
<% end %>
|
|
||||||
{
|
{
|
||||||
<% if @grammar.ast %>
|
|
||||||
return context.parse_result;
|
|
||||||
<% else %>
|
|
||||||
return context.parse_result.v_<%= start_rule_type[0] %>;
|
return context.parse_result.v_<%= start_rule_type[0] %>;
|
||||||
<% end %>
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1180,26 +951,3 @@ public <%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @gram
|
|||||||
{
|
{
|
||||||
return context.text_position;
|
return context.text_position;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the user terminate code.
|
|
||||||
*
|
|
||||||
* @param context
|
|
||||||
* Lexer/parser context structure.
|
|
||||||
*
|
|
||||||
* @return User terminate code.
|
|
||||||
*/
|
|
||||||
public size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context)
|
|
||||||
{
|
|
||||||
return context.user_terminate_code;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the parse token.
|
|
||||||
*
|
|
||||||
* @return Parse token.
|
|
||||||
*/
|
|
||||||
public <%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context)
|
|
||||||
{
|
|
||||||
return context.token;
|
|
||||||
}
|
|
||||||
|
|||||||
@ -20,10 +20,9 @@
|
|||||||
#define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u
|
#define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u
|
||||||
#define <%= @grammar.prefix.upcase %>DROP 4u
|
#define <%= @grammar.prefix.upcase %>DROP 4u
|
||||||
#define <%= @grammar.prefix.upcase %>EOF 5u
|
#define <%= @grammar.prefix.upcase %>EOF 5u
|
||||||
#define <%= @grammar.prefix.upcase %>USER_TERMINATED 6u
|
|
||||||
|
|
||||||
/** Token type. */
|
/** Token type. */
|
||||||
typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>token_t;
|
typedef <%= get_type_for(@grammar.invalid_token_id) %> <%= @grammar.prefix %>token_t;
|
||||||
|
|
||||||
/** Token IDs. */
|
/** Token IDs. */
|
||||||
<% @grammar.tokens.each_with_index do |token, index| %>
|
<% @grammar.tokens.each_with_index do |token, index| %>
|
||||||
@ -33,13 +32,20 @@ typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>t
|
|||||||
<% end %>
|
<% end %>
|
||||||
<% end %>
|
<% end %>
|
||||||
#define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u
|
#define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u
|
||||||
#define TERMINATE_TOKEN_ID <%= @grammar.terminate_token_id %>u
|
|
||||||
|
|
||||||
/** Code point type. */
|
/** Code point type. */
|
||||||
typedef uint32_t <%= @grammar.prefix %>code_point_t;
|
typedef uint32_t <%= @grammar.prefix %>code_point_t;
|
||||||
|
|
||||||
|
/** Parser values type(s). */
|
||||||
|
typedef union
|
||||||
|
{
|
||||||
|
<% @grammar.ptypes.each do |name, typestring| %>
|
||||||
|
<%= typestring %> v_<%= name %>;
|
||||||
|
<% end %>
|
||||||
|
} <%= @grammar.prefix %>value_t;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A structure to keep track of input position.
|
* A structure to keep track of parser position.
|
||||||
*
|
*
|
||||||
* This is useful for reporting errors, etc...
|
* This is useful for reporting errors, etc...
|
||||||
*/
|
*/
|
||||||
@ -52,72 +58,12 @@ typedef struct
|
|||||||
uint32_t col;
|
uint32_t col;
|
||||||
} <%= @grammar.prefix %>position_t;
|
} <%= @grammar.prefix %>position_t;
|
||||||
|
|
||||||
/** Return whether the position is valid. */
|
|
||||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0u)
|
|
||||||
|
|
||||||
/** User header code blocks. */
|
|
||||||
<%= @grammar.code_blocks.fetch("header", "") %>
|
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** Parser values type. */
|
|
||||||
typedef <%= @grammar.ptype %> <%= @grammar.prefix %>value_t;
|
|
||||||
<% else %>
|
|
||||||
/** Parser values type(s). */
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
<% @grammar.ptypes.each do |name, typestring| %>
|
|
||||||
<%= typestring %> v_<%= name %>;
|
|
||||||
<% end %>
|
|
||||||
} <%= @grammar.prefix %>value_t;
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
/** AST node types. @{ */
|
|
||||||
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
|
||||||
{
|
|
||||||
/* ASTNode fields must be present in the same order here. */
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
<%= @grammar.prefix %>token_t token;
|
|
||||||
<%= @grammar.prefix %>value_t pvalue;
|
|
||||||
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
|
|
||||||
|
|
||||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
|
||||||
<% next if name.start_with?("$") %>
|
|
||||||
<% next if rule_set.optional? %>
|
|
||||||
struct <%= name %>;
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
|
||||||
<% next if name.start_with?("$") %>
|
|
||||||
<% next if rule_set.optional? %>
|
|
||||||
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
|
||||||
{
|
|
||||||
<%= @grammar.prefix %>position_t position;
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
<% rule_set.ast_fields.each do |fields| %>
|
|
||||||
union
|
|
||||||
{
|
|
||||||
<% fields.each do |field_name, type| %>
|
|
||||||
struct <%= type %> * <%= field_name %>;
|
|
||||||
<% end %>
|
|
||||||
};
|
|
||||||
<% end %>
|
|
||||||
} <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>;
|
|
||||||
|
|
||||||
<% end %>
|
|
||||||
/** @} */
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Lexed token information. */
|
/** Lexed token information. */
|
||||||
typedef struct
|
typedef struct
|
||||||
{
|
{
|
||||||
/** Text position of first code point in token. */
|
/** Text position where the token was found. */
|
||||||
<%= @grammar.prefix %>position_t position;
|
<%= @grammar.prefix %>position_t position;
|
||||||
|
|
||||||
/** Text position of last code point in token. */
|
|
||||||
<%= @grammar.prefix %>position_t end_position;
|
|
||||||
|
|
||||||
/** Number of input bytes used by the token. */
|
/** Number of input bytes used by the token. */
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
@ -156,26 +102,12 @@ typedef struct
|
|||||||
/* Parser context data. */
|
/* Parser context data. */
|
||||||
|
|
||||||
/** Parse result value. */
|
/** Parse result value. */
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
|
||||||
<% else %>
|
|
||||||
<%= @grammar.prefix %>value_t parse_result;
|
<%= @grammar.prefix %>value_t parse_result;
|
||||||
<% end %>
|
|
||||||
|
|
||||||
/** Unexpected token received. */
|
/** Unexpected token received. */
|
||||||
<%= @grammar.prefix %>token_t token;
|
<%= @grammar.prefix %>token_t token;
|
||||||
|
|
||||||
/** User terminate code. */
|
|
||||||
size_t user_terminate_code;
|
|
||||||
} <%= @grammar.prefix %>context_t;
|
} <%= @grammar.prefix %>context_t;
|
||||||
|
|
||||||
/**************************************************************************
|
|
||||||
* Public data
|
|
||||||
*************************************************************************/
|
|
||||||
|
|
||||||
/** Token names. */
|
|
||||||
extern const char * <%= @grammar.prefix %>token_names[];
|
|
||||||
|
|
||||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
|
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
|
||||||
|
|
||||||
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
|
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
|
||||||
@ -185,14 +117,6 @@ size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%=
|
|||||||
|
|
||||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
|
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
|
||||||
|
|
||||||
<% if @grammar.ast %>
|
|
||||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
|
||||||
<% else %>
|
|
||||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||||
<% end %>
|
|
||||||
|
|
||||||
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
|
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
|
||||||
|
|
||||||
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context);
|
|
||||||
|
|
||||||
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context);
|
|
||||||
|
|||||||
1079
doc/user_guide.md
1079
doc/user_guide.md
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
|||||||
au BufNewFile,BufRead *.propane set filetype=propane
|
|
||||||
@ -1,33 +0,0 @@
|
|||||||
" Vim syntax file for Propane
|
|
||||||
" Language: propane
|
|
||||||
" Maintainer: Josh Holtrop
|
|
||||||
" URL: https://github.com/holtrop/propane
|
|
||||||
|
|
||||||
if exists("b:current_syntax")
|
|
||||||
finish
|
|
||||||
endif
|
|
||||||
|
|
||||||
if !exists("b:propane_subtype")
|
|
||||||
let b:propane_subtype = "d"
|
|
||||||
endif
|
|
||||||
|
|
||||||
exe "syn include @propaneTarget syntax/".b:propane_subtype.".vim"
|
|
||||||
|
|
||||||
syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contains=@propaneTarget keepend
|
|
||||||
|
|
||||||
syn match propaneComment "#.*"
|
|
||||||
syn match propaneOperator "->"
|
|
||||||
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
|
|
||||||
syn match propaneFieldOperator ":" contained
|
|
||||||
syn match propaneOperator "?"
|
|
||||||
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
|
||||||
|
|
||||||
syn region propaneRegex start="/" end="/" skip="\v\\\\|\\/"
|
|
||||||
|
|
||||||
hi def link propaneComment Comment
|
|
||||||
hi def link propaneKeyword Keyword
|
|
||||||
hi def link propaneRegex String
|
|
||||||
hi def link propaneOperator Operator
|
|
||||||
hi def link propaneFieldOperator Operator
|
|
||||||
hi def link propaneDelimiter Delimiter
|
|
||||||
hi def link propaneFieldAlias Identifier
|
|
||||||
@ -1,7 +1,6 @@
|
|||||||
require "erb"
|
require "erb"
|
||||||
require "set"
|
require "set"
|
||||||
require "stringio"
|
require "stringio"
|
||||||
require_relative "propane/assets"
|
|
||||||
require_relative "propane/cli"
|
require_relative "propane/cli"
|
||||||
require_relative "propane/code_point_range"
|
require_relative "propane/code_point_range"
|
||||||
require_relative "propane/fa"
|
require_relative "propane/fa"
|
||||||
@ -31,10 +30,10 @@ class Propane
|
|||||||
|
|
||||||
class << self
|
class << self
|
||||||
|
|
||||||
def run(input_file, output_file, log_file, options)
|
def run(input_file, output_file, log_file)
|
||||||
begin
|
begin
|
||||||
grammar = Grammar.new(File.read(input_file))
|
grammar = Grammar.new(File.read(input_file))
|
||||||
generator = Generator.new(grammar, output_file, log_file, options)
|
generator = Generator.new(grammar, output_file, log_file)
|
||||||
generator.generate
|
generator.generate
|
||||||
rescue Error => e
|
rescue Error => e
|
||||||
$stderr.puts e.message
|
$stderr.puts e.message
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
class Propane
|
|
||||||
module Assets
|
|
||||||
class << self
|
|
||||||
def get(name)
|
|
||||||
path = File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/#{name}")
|
|
||||||
File.binread(path)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@ -4,21 +4,15 @@ class Propane
|
|||||||
USAGE = <<EOF
|
USAGE = <<EOF
|
||||||
Usage: #{$0} [options] <input-file> <output-file>
|
Usage: #{$0} [options] <input-file> <output-file>
|
||||||
Options:
|
Options:
|
||||||
-h, --help Show this usage and exit.
|
--log LOG Write log file
|
||||||
--log LOG Write log file. This will show all parser states and their
|
--version Show program version and exit
|
||||||
associated shifts and reduces. It can be helpful when
|
-h, --help Show this usage and exit
|
||||||
debugging a grammar.
|
|
||||||
--version Show program version and exit.
|
|
||||||
-w Treat warnings as errors. This option will treat shift/reduce
|
|
||||||
conflicts as fatal errors and will print them to stderr in
|
|
||||||
addition to the log file.
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
class << self
|
class << self
|
||||||
|
|
||||||
def run(args)
|
def run(args)
|
||||||
params = []
|
params = []
|
||||||
options = {}
|
|
||||||
log_file = nil
|
log_file = nil
|
||||||
i = 0
|
i = 0
|
||||||
while i < args.size
|
while i < args.size
|
||||||
@ -30,13 +24,11 @@ EOF
|
|||||||
log_file = args[i]
|
log_file = args[i]
|
||||||
end
|
end
|
||||||
when "--version"
|
when "--version"
|
||||||
puts "propane version #{VERSION}"
|
puts "propane v#{VERSION}"
|
||||||
return 0
|
return 0
|
||||||
when "-h", "--help"
|
when "-h", "--help"
|
||||||
puts USAGE
|
puts USAGE
|
||||||
return 0
|
return 0
|
||||||
when "-w"
|
|
||||||
options[:warnings_as_errors] = true
|
|
||||||
when /^-/
|
when /^-/
|
||||||
$stderr.puts "Error: unknown option #{arg}"
|
$stderr.puts "Error: unknown option #{arg}"
|
||||||
return 1
|
return 1
|
||||||
@ -53,7 +45,7 @@ EOF
|
|||||||
$stderr.puts "Error: cannot read #{params[0]}"
|
$stderr.puts "Error: cannot read #{params[0]}"
|
||||||
return 2
|
return 2
|
||||||
end
|
end
|
||||||
Propane.run(*params, log_file, options)
|
Propane.run(*params, log_file)
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -2,7 +2,7 @@ class Propane
|
|||||||
|
|
||||||
class Generator
|
class Generator
|
||||||
|
|
||||||
def initialize(grammar, output_file, log_file, options)
|
def initialize(grammar, output_file, log_file)
|
||||||
@grammar = grammar
|
@grammar = grammar
|
||||||
@output_file = output_file
|
@output_file = output_file
|
||||||
if log_file
|
if log_file
|
||||||
@ -10,13 +10,13 @@ class Propane
|
|||||||
else
|
else
|
||||||
@log = StringIO.new
|
@log = StringIO.new
|
||||||
end
|
end
|
||||||
|
@classname = @grammar.classname || File.basename(output_file).sub(%r{[^a-zA-Z0-9].*}, "").capitalize
|
||||||
@language =
|
@language =
|
||||||
if output_file =~ /\.([a-z]+)$/
|
if output_file =~ /\.([a-z]+)$/
|
||||||
$1
|
$1
|
||||||
else
|
else
|
||||||
"d"
|
"d"
|
||||||
end
|
end
|
||||||
@options = options
|
|
||||||
process_grammar!
|
process_grammar!
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -26,8 +26,7 @@ class Propane
|
|||||||
extensions += %w[h]
|
extensions += %w[h]
|
||||||
end
|
end
|
||||||
extensions.each do |extension|
|
extensions.each do |extension|
|
||||||
template = Assets.get("parser.#{extension}.erb")
|
erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.#{extension}.erb")), trim_mode: "<>")
|
||||||
erb = ERB.new(template, trim_mode: "<>")
|
|
||||||
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
||||||
result = erb.result(binding.clone)
|
result = erb.result(binding.clone)
|
||||||
File.open(output_file, "wb") do |fh|
|
File.open(output_file, "wb") do |fh|
|
||||||
@ -43,8 +42,8 @@ class Propane
|
|||||||
# Assign default pattern mode to patterns without a mode assigned.
|
# Assign default pattern mode to patterns without a mode assigned.
|
||||||
found_default = false
|
found_default = false
|
||||||
@grammar.patterns.each do |pattern|
|
@grammar.patterns.each do |pattern|
|
||||||
if pattern.modes.empty?
|
if pattern.mode.nil?
|
||||||
pattern.modes << "default"
|
pattern.mode = "default"
|
||||||
found_default = true
|
found_default = true
|
||||||
end
|
end
|
||||||
pattern.ptypename ||= "default"
|
pattern.ptypename ||= "default"
|
||||||
@ -52,7 +51,6 @@ class Propane
|
|||||||
unless found_default
|
unless found_default
|
||||||
raise Error.new("No patterns found for default mode")
|
raise Error.new("No patterns found for default mode")
|
||||||
end
|
end
|
||||||
check_ptypes!
|
|
||||||
# Add EOF token.
|
# Add EOF token.
|
||||||
@grammar.tokens << Token.new("$EOF", nil, nil)
|
@grammar.tokens << Token.new("$EOF", nil, nil)
|
||||||
tokens_by_name = {}
|
tokens_by_name = {}
|
||||||
@ -68,14 +66,11 @@ class Propane
|
|||||||
tokens_by_name[token.name] = token
|
tokens_by_name[token.name] = token
|
||||||
end
|
end
|
||||||
# Check for user start rule.
|
# Check for user start rule.
|
||||||
unless @grammar.rules.find {|rule| rule.name == @grammar.start_rule}
|
unless @grammar.rules.find {|rule| rule.name == "Start"}
|
||||||
raise Error.new("Start rule `#{@grammar.start_rule}` not found")
|
raise Error.new("Start rule not found")
|
||||||
end
|
end
|
||||||
# Add "real" start rule.
|
# Add "real" start rule.
|
||||||
@grammar.rules.unshift(Rule.new("$Start", [@grammar.start_rule, "$EOF"], nil, nil, nil))
|
@grammar.rules.unshift(Rule.new("$Start", ["Start", "$EOF"], nil, nil, nil))
|
||||||
# Generate and add rules for optional components.
|
|
||||||
generate_optional_component_rules!(tokens_by_name)
|
|
||||||
# Build rule sets.
|
|
||||||
rule_sets = {}
|
rule_sets = {}
|
||||||
rule_set_id = @grammar.tokens.size
|
rule_set_id = @grammar.tokens.size
|
||||||
@grammar.rules.each_with_index do |rule, rule_id|
|
@grammar.rules.each_with_index do |rule, rule_id|
|
||||||
@ -124,55 +119,10 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
determine_possibly_empty_rulesets!(rule_sets)
|
determine_possibly_empty_rulesets!(rule_sets)
|
||||||
rule_sets.each do |name, rule_set|
|
|
||||||
rule_set.finalize(@grammar)
|
|
||||||
end
|
|
||||||
# Generate the lexer.
|
# Generate the lexer.
|
||||||
@lexer = Lexer.new(@grammar)
|
@lexer = Lexer.new(@grammar)
|
||||||
# Generate the parser.
|
# Generate the parser.
|
||||||
@parser = Parser.new(@grammar, rule_sets, @log, @options)
|
@parser = Parser.new(@grammar, rule_sets, @log)
|
||||||
end
|
|
||||||
|
|
||||||
# Check that any referenced ptypes have been defined.
|
|
||||||
def check_ptypes!
|
|
||||||
(@grammar.patterns + @grammar.tokens + @grammar.rules).each do |potor|
|
|
||||||
if potor.ptypename
|
|
||||||
unless @grammar.ptypes.include?(potor.ptypename)
|
|
||||||
raise Error.new("Error: Line #{potor.line_number}: ptype #{potor.ptypename} not declared. Declare with `ptype` statement.")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Generate and add rules for any optional components.
|
|
||||||
def generate_optional_component_rules!(tokens_by_name)
|
|
||||||
optional_rules_added = Set.new
|
|
||||||
@grammar.rules.each do |rule|
|
|
||||||
rule.components.each do |component|
|
|
||||||
if component =~ /^(.*)\?$/
|
|
||||||
c = $1
|
|
||||||
unless optional_rules_added.include?(component)
|
|
||||||
# Create two rules for the optional component: one empty and
|
|
||||||
# one just matching the component.
|
|
||||||
# We need to find the ptypename for the optional component in
|
|
||||||
# order to copy it to the generated rules.
|
|
||||||
if tokens_by_name[c]
|
|
||||||
# The optional component is a token.
|
|
||||||
ptypename = tokens_by_name[c].ptypename
|
|
||||||
else
|
|
||||||
# The optional component must be a rule, so find any instance
|
|
||||||
# of that rule that specifies a ptypename.
|
|
||||||
ptypename = @grammar.rules.reduce(nil) do |result, rule|
|
|
||||||
rule.name == c && rule.ptypename ? rule.ptypename : result
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@grammar.rules << Rule.new(component, [], nil, ptypename, rule.line_number)
|
|
||||||
@grammar.rules << Rule.new(component, [c], "$$ = $1;\n", ptypename, rule.line_number)
|
|
||||||
optional_rules_added << component
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Determine which grammar rules could expand to empty sequences.
|
# Determine which grammar rules could expand to empty sequences.
|
||||||
@ -248,25 +198,10 @@ class Propane
|
|||||||
code = code.gsub(/\$token\(([$\w]+)\)/) do |match|
|
code = code.gsub(/\$token\(([$\w]+)\)/) do |match|
|
||||||
"TOKEN_#{Token.code_name($1)}"
|
"TOKEN_#{Token.code_name($1)}"
|
||||||
end
|
end
|
||||||
code = code.gsub(/\$terminate\((.*)\);/) do |match|
|
|
||||||
user_terminate_code = $1
|
|
||||||
retval = rule ? "P_USER_TERMINATED" : "TERMINATE_TOKEN_ID"
|
|
||||||
case @language
|
|
||||||
when "c"
|
|
||||||
"context->user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
|
||||||
when "d"
|
|
||||||
"context.user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if parser
|
if parser
|
||||||
code = code.gsub(/\$\$/) do |match|
|
code = code.gsub(/\$\$/) do |match|
|
||||||
case @language
|
|
||||||
when "c"
|
|
||||||
"_pvalue->v_#{rule.ptypename}"
|
|
||||||
when "d"
|
|
||||||
"_pvalue.v_#{rule.ptypename}"
|
"_pvalue.v_#{rule.ptypename}"
|
||||||
end
|
end
|
||||||
end
|
|
||||||
code = code.gsub(/\$(\d+)/) do |match|
|
code = code.gsub(/\$(\d+)/) do |match|
|
||||||
index = $1.to_i
|
index = $1.to_i
|
||||||
case @language
|
case @language
|
||||||
@ -276,29 +211,8 @@ class Propane
|
|||||||
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
|
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
code = code.gsub(/\$\{(\w+)\}/) do |match|
|
|
||||||
aliasname = $1
|
|
||||||
if index = rule.aliases[aliasname]
|
|
||||||
case @language
|
|
||||||
when "c"
|
|
||||||
"state_values_stack_index(statevalues, -(int)n_states + #{index})->pvalue.v_#{rule.components[index].ptypename}"
|
|
||||||
when "d"
|
|
||||||
"statevalues[$-n_states+#{index}].pvalue.v_#{rule.components[index].ptypename}"
|
|
||||||
end
|
|
||||||
else
|
|
||||||
raise Error.new("Field alias '#{aliasname}' not found")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
else
|
else
|
||||||
code = code.gsub(/\$\$/) do |match|
|
code = code.gsub(/\$\$/) do |match|
|
||||||
if @grammar.ast
|
|
||||||
case @language
|
|
||||||
when "c"
|
|
||||||
"out_token_info->pvalue"
|
|
||||||
when "d"
|
|
||||||
"out_token_info.pvalue"
|
|
||||||
end
|
|
||||||
else
|
|
||||||
case @language
|
case @language
|
||||||
when "c"
|
when "c"
|
||||||
"out_token_info->pvalue.v_#{pattern.ptypename}"
|
"out_token_info->pvalue.v_#{pattern.ptypename}"
|
||||||
@ -306,7 +220,6 @@ class Propane
|
|||||||
"out_token_info.pvalue.v_#{pattern.ptypename}"
|
"out_token_info.pvalue.v_#{pattern.ptypename}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match|
|
code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match|
|
||||||
mode_name = $1
|
mode_name = $1
|
||||||
mode_id = @lexer.mode_id(mode_name)
|
mode_id = @lexer.mode_id(mode_name)
|
||||||
@ -330,7 +243,7 @@ class Propane
|
|||||||
# Start rule parser value type name and type string.
|
# Start rule parser value type name and type string.
|
||||||
def start_rule_type
|
def start_rule_type
|
||||||
start_rule = @grammar.rules.find do |rule|
|
start_rule = @grammar.rules.find do |rule|
|
||||||
rule.name == @grammar.start_rule
|
rule.name == "Start"
|
||||||
end
|
end
|
||||||
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
|
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
|
||||||
end
|
end
|
||||||
|
|||||||
@ -5,13 +5,10 @@ class Propane
|
|||||||
# Reserve identifiers beginning with a double-underscore for internal use.
|
# Reserve identifiers beginning with a double-underscore for internal use.
|
||||||
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
|
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
|
||||||
|
|
||||||
attr_reader :ast
|
attr_reader :classname
|
||||||
attr_reader :ast_prefix
|
|
||||||
attr_reader :ast_suffix
|
|
||||||
attr_reader :modulename
|
attr_reader :modulename
|
||||||
attr_reader :patterns
|
attr_reader :patterns
|
||||||
attr_reader :rules
|
attr_reader :rules
|
||||||
attr_reader :start_rule
|
|
||||||
attr_reader :tokens
|
attr_reader :tokens
|
||||||
attr_reader :code_blocks
|
attr_reader :code_blocks
|
||||||
attr_reader :ptypes
|
attr_reader :ptypes
|
||||||
@ -19,19 +16,15 @@ class Propane
|
|||||||
|
|
||||||
def initialize(input)
|
def initialize(input)
|
||||||
@patterns = []
|
@patterns = []
|
||||||
@start_rule = "Start"
|
|
||||||
@tokens = []
|
@tokens = []
|
||||||
@rules = []
|
@rules = []
|
||||||
@code_blocks = {}
|
@code_blocks = []
|
||||||
@line_number = 1
|
@line_number = 1
|
||||||
@next_line_number = @line_number
|
@next_line_number = @line_number
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
@input = input.gsub("\r\n", "\n")
|
@input = input.gsub("\r\n", "\n")
|
||||||
@ptypes = {"default" => "void *"}
|
@ptypes = {"default" => "void *"}
|
||||||
@prefix = "p_"
|
@prefix = "p_"
|
||||||
@ast = false
|
|
||||||
@ast_prefix = ""
|
|
||||||
@ast_suffix = ""
|
|
||||||
parse_grammar!
|
parse_grammar!
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -43,10 +36,6 @@ class Propane
|
|||||||
@tokens.size
|
@tokens.size
|
||||||
end
|
end
|
||||||
|
|
||||||
def terminate_token_id
|
|
||||||
@tokens.size + 1
|
|
||||||
end
|
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def parse_grammar!
|
def parse_grammar!
|
||||||
@ -58,14 +47,11 @@ class Propane
|
|||||||
def parse_statement!
|
def parse_statement!
|
||||||
if parse_white_space!
|
if parse_white_space!
|
||||||
elsif parse_comment_line!
|
elsif parse_comment_line!
|
||||||
elsif @modeline.nil? && parse_mode_label!
|
elsif @mode.nil? && parse_mode_label!
|
||||||
elsif parse_ast_statement!
|
|
||||||
elsif parse_ast_prefix_statement!
|
|
||||||
elsif parse_ast_suffix_statement!
|
|
||||||
elsif parse_module_statement!
|
elsif parse_module_statement!
|
||||||
|
elsif parse_class_statement!
|
||||||
elsif parse_ptype_statement!
|
elsif parse_ptype_statement!
|
||||||
elsif parse_pattern_statement!
|
elsif parse_pattern_statement!
|
||||||
elsif parse_start_statement!
|
|
||||||
elsif parse_token_statement!
|
elsif parse_token_statement!
|
||||||
elsif parse_tokenid_statement!
|
elsif parse_tokenid_statement!
|
||||||
elsif parse_drop_statement!
|
elsif parse_drop_statement!
|
||||||
@ -81,8 +67,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
def parse_mode_label!
|
def parse_mode_label!
|
||||||
if md = consume!(/(#{IDENTIFIER_REGEX}(?:\s*,\s*#{IDENTIFIER_REGEX})*)\s*:/)
|
if md = consume!(/(#{IDENTIFIER_REGEX})\s*:/)
|
||||||
@modeline = md[1]
|
@mode = md[1]
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -94,30 +80,22 @@ class Propane
|
|||||||
consume!(/#.*\n/)
|
consume!(/#.*\n/)
|
||||||
end
|
end
|
||||||
|
|
||||||
def parse_ast_statement!
|
|
||||||
if consume!(/ast\s*;/)
|
|
||||||
@ast = true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def parse_ast_prefix_statement!
|
|
||||||
if md = consume!(/ast_prefix\s+(\w+)\s*;/)
|
|
||||||
@ast_prefix = md[1]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def parse_ast_suffix_statement!
|
|
||||||
if md = consume!(/ast_suffix\s+(\w+)\s*;/)
|
|
||||||
@ast_suffix = md[1]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def parse_module_statement!
|
def parse_module_statement!
|
||||||
if consume!(/module\s+/)
|
if consume!(/module\s+/)
|
||||||
md = consume!(/([\w.]+)\s*/, "expected module name")
|
md = consume!(/([\w.]+)\s*/, "expected module name")
|
||||||
@modulename = md[1]
|
@modulename = md[1]
|
||||||
consume!(/;/, "expected `;'")
|
consume!(/;/, "expected `;'")
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def parse_class_statement!
|
||||||
|
if consume!(/class\s+/)
|
||||||
|
md = consume!(/([\w.]+)\s*/, "expected class name")
|
||||||
|
@classname = md[1]
|
||||||
|
consume!(/;/, "expected `;'")
|
||||||
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -126,9 +104,6 @@ class Propane
|
|||||||
if consume!(/ptype\s+/)
|
if consume!(/ptype\s+/)
|
||||||
name = "default"
|
name = "default"
|
||||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
|
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
|
||||||
if @ast
|
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
|
||||||
end
|
|
||||||
name = md[1]
|
name = md[1]
|
||||||
end
|
end
|
||||||
md = consume!(/([^;]+);/, "expected parser result type expression")
|
md = consume!(/([^;]+);/, "expected parser result type expression")
|
||||||
@ -141,21 +116,18 @@ class Propane
|
|||||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||||
name = md[1]
|
name = md[1]
|
||||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||||
if @ast
|
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
|
||||||
end
|
|
||||||
ptypename = md[1]
|
ptypename = md[1]
|
||||||
end
|
end
|
||||||
pattern = parse_pattern! || name
|
pattern = parse_pattern! || name
|
||||||
consume!(/\s+/)
|
consume!(/\s+/)
|
||||||
unless code = parse_code_block!
|
unless code = parse_code_block!
|
||||||
consume!(/;/, "expected `;' or code block")
|
consume!(/;/, "expected pattern or `;' or code block")
|
||||||
end
|
end
|
||||||
token = Token.new(name, ptypename, @line_number)
|
token = Token.new(name, ptypename, @line_number)
|
||||||
@tokens << token
|
@tokens << token
|
||||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||||
@patterns << pattern
|
@patterns << pattern
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -165,15 +137,12 @@ class Propane
|
|||||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||||
name = md[1]
|
name = md[1]
|
||||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||||
if @ast
|
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
|
||||||
end
|
|
||||||
ptypename = md[1]
|
ptypename = md[1]
|
||||||
end
|
end
|
||||||
consume!(/;/, "expected `;'");
|
consume!(/;/, "expected `;'");
|
||||||
token = Token.new(name, ptypename, @line_number)
|
token = Token.new(name, ptypename, @line_number)
|
||||||
@tokens << token
|
@tokens << token
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -186,8 +155,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
consume!(/\s+/)
|
consume!(/\s+/)
|
||||||
consume!(/;/, "expected `;'")
|
consume!(/;/, "expected `;'")
|
||||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, modes: get_modes_from_modeline)
|
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, mode: @mode)
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -195,20 +164,13 @@ class Propane
|
|||||||
def parse_rule_statement!
|
def parse_rule_statement!
|
||||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
|
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
|
||||||
rule_name, ptypename = *md[1, 2]
|
rule_name, ptypename = *md[1, 2]
|
||||||
if @ast && ptypename
|
md = consume!(/((?:#{IDENTIFIER_REGEX}\s*)*)\s*/, "expected rule component list")
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
|
||||||
end
|
|
||||||
md = consume!(/((?:#{IDENTIFIER_REGEX}\??(?::#{IDENTIFIER_REGEX})?\s*)*)\s*/, "expected rule component list")
|
|
||||||
components = md[1].strip.split(/\s+/)
|
components = md[1].strip.split(/\s+/)
|
||||||
if @ast
|
|
||||||
consume!(/;/, "expected `;'")
|
|
||||||
else
|
|
||||||
unless code = parse_code_block!
|
unless code = parse_code_block!
|
||||||
consume!(/;/, "expected `;' or code block")
|
consume!(/;/, "expected pattern or `;' or code block")
|
||||||
end
|
|
||||||
end
|
end
|
||||||
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -217,37 +179,21 @@ class Propane
|
|||||||
if pattern = parse_pattern!
|
if pattern = parse_pattern!
|
||||||
consume!(/\s+/)
|
consume!(/\s+/)
|
||||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||||
if @ast
|
|
||||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
|
||||||
end
|
|
||||||
ptypename = md[1]
|
ptypename = md[1]
|
||||||
end
|
end
|
||||||
unless code = parse_code_block!
|
unless code = parse_code_block!
|
||||||
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
||||||
end
|
end
|
||||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||||
@modeline = nil
|
@mode = nil
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def parse_start_statement!
|
|
||||||
if md = consume!(/start\s+(\w+)\s*;/)
|
|
||||||
@start_rule = md[1]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def parse_code_block_statement!
|
def parse_code_block_statement!
|
||||||
if md = consume!(/<<([a-z]*)(.*?)>>\n/m)
|
if code = parse_code_block!
|
||||||
name, code = md[1..2]
|
@code_blocks << code
|
||||||
code.sub!(/\A\n/, "")
|
@mode = nil
|
||||||
code += "\n" unless code.end_with?("\n")
|
|
||||||
if @code_blocks[name]
|
|
||||||
@code_blocks[name] += code
|
|
||||||
else
|
|
||||||
@code_blocks[name] = code
|
|
||||||
end
|
|
||||||
@modeline = nil
|
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -279,11 +225,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
def parse_code_block!
|
def parse_code_block!
|
||||||
if md = consume!(/<<(.*?)>>\n/m)
|
if md = consume!(/<<\n(.*?)^>>\n/m)
|
||||||
code = md[1]
|
md[1]
|
||||||
code.sub!(/\A\n/, "")
|
|
||||||
code += "\n" unless code.end_with?("\n")
|
|
||||||
code
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -315,14 +258,6 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_modes_from_modeline
|
|
||||||
if @modeline
|
|
||||||
Set[*@modeline.split(",").map(&:strip)]
|
|
||||||
else
|
|
||||||
Set.new
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -26,14 +26,8 @@ class Propane
|
|||||||
private
|
private
|
||||||
|
|
||||||
def build_tables!
|
def build_tables!
|
||||||
modenames = @grammar.patterns.reduce(Set.new) do |result, pattern|
|
@modes = @grammar.patterns.group_by do |pattern|
|
||||||
result + pattern.modes
|
pattern.mode
|
||||||
end
|
|
||||||
@modes = modenames.reduce({}) do |result, modename|
|
|
||||||
result[modename] = @grammar.patterns.select do |pattern|
|
|
||||||
pattern.modes.include?(modename)
|
|
||||||
end
|
|
||||||
result
|
|
||||||
end.transform_values do |patterns|
|
end.transform_values do |patterns|
|
||||||
{dfa: DFA.new(patterns)}
|
{dfa: DFA.new(patterns)}
|
||||||
end
|
end
|
||||||
|
|||||||
@ -7,15 +7,12 @@ class Propane
|
|||||||
attr_reader :reduce_table
|
attr_reader :reduce_table
|
||||||
attr_reader :rule_sets
|
attr_reader :rule_sets
|
||||||
|
|
||||||
def initialize(grammar, rule_sets, log, options)
|
def initialize(grammar, rule_sets, log)
|
||||||
@grammar = grammar
|
@grammar = grammar
|
||||||
@rule_sets = rule_sets
|
@rule_sets = rule_sets
|
||||||
@log = log
|
@log = log
|
||||||
@item_sets = []
|
@item_sets = []
|
||||||
@item_sets_set = {}
|
@item_sets_set = {}
|
||||||
@warnings = Set.new
|
|
||||||
@errors = Set.new
|
|
||||||
@options = options
|
|
||||||
start_item = Item.new(grammar.rules.first, 0)
|
start_item = Item.new(grammar.rules.first, 0)
|
||||||
eval_item_sets = Set[ItemSet.new([start_item])]
|
eval_item_sets = Set[ItemSet.new([start_item])]
|
||||||
|
|
||||||
@ -26,10 +23,10 @@ class Propane
|
|||||||
item_set.id = @item_sets.size
|
item_set.id = @item_sets.size
|
||||||
@item_sets << item_set
|
@item_sets << item_set
|
||||||
@item_sets_set[item_set] = item_set
|
@item_sets_set[item_set] = item_set
|
||||||
item_set.next_symbols.each do |next_symbol|
|
item_set.following_symbols.each do |following_symbol|
|
||||||
unless next_symbol.name == "$EOF"
|
unless following_symbol.name == "$EOF"
|
||||||
next_item_set = item_set.build_next_item_set(next_symbol)
|
following_set = item_set.build_following_item_set(following_symbol)
|
||||||
eval_item_sets << next_item_set
|
eval_item_sets << following_set
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -40,21 +37,8 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
build_reduce_actions!
|
build_reduce_actions!
|
||||||
build_tables!
|
|
||||||
write_log!
|
write_log!
|
||||||
errormessage = ""
|
build_tables!
|
||||||
if @errors.size > 0
|
|
||||||
errormessage += @errors.join("\n")
|
|
||||||
end
|
|
||||||
if @warnings.size > 0 && @options[:warnings_as_errors]
|
|
||||||
if errormessage != ""
|
|
||||||
errormessage += "\n"
|
|
||||||
end
|
|
||||||
errormessage += "Fatal errors (-w):\n" + @warnings.join("\n")
|
|
||||||
end
|
|
||||||
if errormessage != ""
|
|
||||||
raise Error.new(errormessage)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
@ -64,47 +48,48 @@ class Propane
|
|||||||
@shift_table = []
|
@shift_table = []
|
||||||
@reduce_table = []
|
@reduce_table = []
|
||||||
@item_sets.each do |item_set|
|
@item_sets.each do |item_set|
|
||||||
unless item_set.reduce_rules.empty?
|
shift_entries = item_set.following_symbols.map do |following_symbol|
|
||||||
item_set.shift_entries.each do |shift_entry|
|
state_id =
|
||||||
token = shift_entry[:symbol]
|
if following_symbol.name == "$EOF"
|
||||||
if item_set.reduce_actions
|
0
|
||||||
if rule = item_set.reduce_actions[token]
|
else
|
||||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
item_set.following_item_set[following_symbol].id
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
{
|
||||||
|
symbol_id: following_symbol.id,
|
||||||
|
state_id: state_id,
|
||||||
|
}
|
||||||
end
|
end
|
||||||
reduce_entries =
|
reduce_entries =
|
||||||
if rule = item_set.reduce_rule
|
case ra = item_set.reduce_actions
|
||||||
[{token_id: @grammar.invalid_token_id, rule_id: rule.id, rule: rule,
|
when Rule
|
||||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
[{token_id: @grammar.invalid_token_id, rule_id: ra.id,
|
||||||
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
rule_set_id: ra.rule_set.id, n_states: ra.components.size}]
|
||||||
elsif reduce_actions = item_set.reduce_actions
|
when Hash
|
||||||
reduce_actions.map do |token, rule|
|
ra.map do |token, rule|
|
||||||
{token: token, token_id: token.id, rule_id: rule.id, rule: rule,
|
{token_id: token.id, rule_id: rule.id,
|
||||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
rule_set_id: rule.rule_set.id, n_states: rule.components.size}
|
||||||
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
[]
|
[]
|
||||||
end
|
end
|
||||||
@state_table << {
|
@state_table << {
|
||||||
shift_index: @shift_table.size,
|
shift_index: @shift_table.size,
|
||||||
n_shifts: item_set.shift_entries.size,
|
n_shifts: shift_entries.size,
|
||||||
reduce_index: @reduce_table.size,
|
reduce_index: @reduce_table.size,
|
||||||
n_reduces: reduce_entries.size,
|
n_reduces: reduce_entries.size,
|
||||||
}
|
}
|
||||||
@shift_table += item_set.shift_entries
|
@shift_table += shift_entries
|
||||||
@reduce_table += reduce_entries
|
@reduce_table += reduce_entries
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def process_item_set(item_set)
|
def process_item_set(item_set)
|
||||||
item_set.next_symbols.each do |next_symbol|
|
item_set.following_symbols.each do |following_symbol|
|
||||||
unless next_symbol.name == "$EOF"
|
unless following_symbol.name == "$EOF"
|
||||||
next_item_set = @item_sets_set[item_set.build_next_item_set(next_symbol)]
|
following_set = @item_sets_set[item_set.build_following_item_set(following_symbol)]
|
||||||
item_set.next_item_set[next_symbol] = next_item_set
|
item_set.following_item_set[following_symbol] = following_set
|
||||||
next_item_set.in_sets << item_set
|
following_set.in_sets << item_set
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -114,109 +99,7 @@ class Propane
|
|||||||
# @return [void]
|
# @return [void]
|
||||||
def build_reduce_actions!
|
def build_reduce_actions!
|
||||||
@item_sets.each do |item_set|
|
@item_sets.each do |item_set|
|
||||||
build_shift_entries(item_set)
|
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set)
|
||||||
build_reduce_actions_for_item_set(item_set)
|
|
||||||
end
|
|
||||||
item_sets_to_process = @item_sets.select do |item_set|
|
|
||||||
# We need lookahead reduce actions if:
|
|
||||||
# 1) There is more than one possible rule to reduce. In this case the
|
|
||||||
# lookahead token can help choose which rule to reduce.
|
|
||||||
# 2) There is at least one shift action and one reduce action for
|
|
||||||
# this item set. In this case the lookahead reduce actions are
|
|
||||||
# needed to test for a Shift/Reduce conflict.
|
|
||||||
item_set.reduce_rules.size > 1 ||
|
|
||||||
(item_set.reduce_rules.size > 0 && item_set.shift_entries.size > 0)
|
|
||||||
end
|
|
||||||
if RbConfig::CONFIG["host_os"] =~ /linux/
|
|
||||||
item_sets_by_id = {}
|
|
||||||
item_sets_to_process.each do |item_set|
|
|
||||||
item_sets_by_id[item_set.object_id] = item_set
|
|
||||||
end
|
|
||||||
tokens_by_id = {}
|
|
||||||
@grammar.tokens.each do |token|
|
|
||||||
tokens_by_id[token.object_id] = token
|
|
||||||
end
|
|
||||||
rules_by_id = {}
|
|
||||||
@grammar.rules.each do |rule|
|
|
||||||
rules_by_id[rule.object_id] = rule
|
|
||||||
end
|
|
||||||
n_threads = Util.determine_n_threads
|
|
||||||
semaphore = Mutex.new
|
|
||||||
queue = Queue.new
|
|
||||||
threads = {}
|
|
||||||
n_threads.times do
|
|
||||||
piper, pipew = IO.pipe
|
|
||||||
thread = Thread.new do
|
|
||||||
loop do
|
|
||||||
item_set = nil
|
|
||||||
semaphore.synchronize do
|
|
||||||
item_set = item_sets_to_process.slice!(0)
|
|
||||||
end
|
|
||||||
break if item_set.nil?
|
|
||||||
fork do
|
|
||||||
piper.close
|
|
||||||
build_lookahead_reduce_actions_for_item_set(item_set, pipew)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
queue.push(Thread.current)
|
|
||||||
end
|
|
||||||
threads[thread] = [piper, pipew]
|
|
||||||
end
|
|
||||||
until threads.empty?
|
|
||||||
thread = queue.pop
|
|
||||||
piper, pipew = threads[thread]
|
|
||||||
pipew.close
|
|
||||||
thread_txt = piper.read
|
|
||||||
thread_txt.each_line do |line|
|
|
||||||
if line.start_with?("RA,")
|
|
||||||
parts = line.split(",")
|
|
||||||
item_set_id, token_id, rule_id = parts[1..3].map(&:to_i)
|
|
||||||
item_set = item_sets_by_id[item_set_id]
|
|
||||||
unless item_set
|
|
||||||
raise "Internal error: could not find item set from thread"
|
|
||||||
end
|
|
||||||
token = tokens_by_id[token_id]
|
|
||||||
unless item_set
|
|
||||||
raise "Internal error: could not find token from thread"
|
|
||||||
end
|
|
||||||
rule = rules_by_id[rule_id]
|
|
||||||
unless item_set
|
|
||||||
raise "Internal error: could not find rule from thread"
|
|
||||||
end
|
|
||||||
item_set.reduce_actions ||= {}
|
|
||||||
item_set.reduce_actions[token] = rule
|
|
||||||
elsif line.start_with?("Error: ")
|
|
||||||
@errors << line.chomp
|
|
||||||
else
|
|
||||||
raise "Internal error: unhandled thread line #{line}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
thread.join
|
|
||||||
threads.delete(thread)
|
|
||||||
end
|
|
||||||
else
|
|
||||||
# Fall back to single threaded algorithm.
|
|
||||||
item_sets_to_process.each do |item_set|
|
|
||||||
item_set.reduce_actions = build_lookahead_reduce_actions_for_item_set(item_set)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Build the shift entries for a single item set.
|
|
||||||
#
|
|
||||||
# @return [void]
|
|
||||||
def build_shift_entries(item_set)
|
|
||||||
item_set.shift_entries = item_set.next_symbols.map do |next_symbol|
|
|
||||||
state_id =
|
|
||||||
if next_symbol.name == "$EOF"
|
|
||||||
0
|
|
||||||
else
|
|
||||||
item_set.next_item_set[next_symbol].id
|
|
||||||
end
|
|
||||||
{
|
|
||||||
symbol: next_symbol,
|
|
||||||
state_id: state_id,
|
|
||||||
}
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -225,45 +108,40 @@ class Propane
|
|||||||
# @param item_set [ItemSet]
|
# @param item_set [ItemSet]
|
||||||
# ItemSet (parser state)
|
# ItemSet (parser state)
|
||||||
#
|
#
|
||||||
# @return [void]
|
# @return [nil, Rule, Hash]
|
||||||
|
# If no reduce actions are possible for the given item set, nil.
|
||||||
|
# If only one reduce action is possible for the given item set, the Rule
|
||||||
|
# to reduce.
|
||||||
|
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
|
||||||
def build_reduce_actions_for_item_set(item_set)
|
def build_reduce_actions_for_item_set(item_set)
|
||||||
# To build the reduce actions, we start by looking at any
|
# To build the reduce actions, we start by looking at any
|
||||||
# "complete" items, i.e., items where the parse position is at the
|
# "complete" items, i.e., items where the parse position is at the
|
||||||
# end of a rule. These are the only rules that are candidates for
|
# end of a rule. These are the only rules that are candidates for
|
||||||
# reduction in the current ItemSet.
|
# reduction in the current ItemSet.
|
||||||
item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||||
|
|
||||||
if item_set.reduce_rules.size == 1
|
# If there are no rules to reduce for this ItemSet, we're done here.
|
||||||
item_set.reduce_rule = item_set.reduce_rules.first
|
return nil if reduce_rules.size == 0
|
||||||
end
|
|
||||||
end
|
# If there is exactly one rule to reduce for this ItemSet, then do not
|
||||||
|
# figure out the lookaheads; just reduce it.
|
||||||
|
return reduce_rules.first if reduce_rules.size == 1
|
||||||
|
|
||||||
|
# Otherwise, we have more than one possible rule to reduce.
|
||||||
|
|
||||||
# Build the reduce actions for a single item set (parser state).
|
|
||||||
#
|
|
||||||
# @param item_set [ItemSet]
|
|
||||||
# ItemSet (parser state)
|
|
||||||
# @param fh [File]
|
|
||||||
# Output file handle for multiprocessing mode.
|
|
||||||
#
|
|
||||||
# @return [Hash]
|
|
||||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
|
||||||
def build_lookahead_reduce_actions_for_item_set(item_set, fh = nil)
|
|
||||||
# We will be looking for all possible tokens that can follow instances of
|
# We will be looking for all possible tokens that can follow instances of
|
||||||
# these rules. Rather than looking through the entire grammar for the
|
# these rules. Rather than looking through the entire grammar for the
|
||||||
# possible following tokens, we will only look in the item sets leading
|
# possible following tokens, we will only look in the item sets leading
|
||||||
# up to this one. This restriction gives us a more precise lookahead set,
|
# up to this one. This restriction gives us a more precise lookahead set,
|
||||||
# and allows us to parse LALR grammars.
|
# and allows us to parse LALR grammars.
|
||||||
item_sets = Set[item_set] + item_set.leading_item_sets
|
item_sets = item_set.leading_item_sets
|
||||||
item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||||
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
||||||
lookahead_tokens_for_rule.each do |lookahead_token|
|
lookahead_tokens_for_rule.each do |lookahead_token|
|
||||||
if existing_reduce_rule = reduce_actions[lookahead_token]
|
if existing_reduce_rule = reduce_actions[lookahead_token]
|
||||||
error = "Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number}) for lookahead token #{lookahead_token}"
|
raise Error.new("Error: reduce/reduce conflict between rule #{existing_reduce_rule.id} (#{existing_reduce_rule.name}) and rule #{reduce_rule.id} (#{reduce_rule.name})")
|
||||||
@errors << error
|
|
||||||
fh.puts(error) if fh
|
|
||||||
end
|
end
|
||||||
reduce_actions[lookahead_token] = reduce_rule
|
reduce_actions[lookahead_token] = reduce_rule
|
||||||
fh.puts "RA,#{item_set.object_id},#{lookahead_token.object_id},#{reduce_rule.object_id}" if fh
|
|
||||||
end
|
end
|
||||||
reduce_actions
|
reduce_actions
|
||||||
end
|
end
|
||||||
@ -303,14 +181,13 @@ class Propane
|
|||||||
# tokens to form the lookahead token set.
|
# tokens to form the lookahead token set.
|
||||||
item_sets.each do |item_set|
|
item_sets.each do |item_set|
|
||||||
item_set.items.each do |item|
|
item_set.items.each do |item|
|
||||||
if item.next_symbol == rule_set
|
if item.following_symbol == rule_set
|
||||||
(1..).each do |offset|
|
(1..).each do |offset|
|
||||||
case symbol = item.next_symbol(offset)
|
case symbol = item.following_symbol(offset)
|
||||||
when nil
|
when nil
|
||||||
rule_set = item.rule.rule_set
|
rule_set = item.rule.rule_set
|
||||||
unless checked_rule_sets.include?(rule_set)
|
unless checked_rule_sets.include?(rule_set)
|
||||||
rule_sets_to_check_after << rule_set
|
rule_sets_to_check_after << rule_set
|
||||||
checked_rule_sets << rule_set
|
|
||||||
end
|
end
|
||||||
break
|
break
|
||||||
when Token
|
when Token
|
||||||
@ -363,26 +240,20 @@ class Propane
|
|||||||
@log.puts
|
@log.puts
|
||||||
@log.puts " Incoming states: #{incoming_ids.join(", ")}"
|
@log.puts " Incoming states: #{incoming_ids.join(", ")}"
|
||||||
@log.puts " Outgoing states:"
|
@log.puts " Outgoing states:"
|
||||||
item_set.next_item_set.each do |next_symbol, next_item_set|
|
item_set.following_item_set.each do |following_symbol, following_item_set|
|
||||||
@log.puts " #{next_symbol.name} => #{next_item_set.id}"
|
@log.puts " #{following_symbol.name} => #{following_item_set.id}"
|
||||||
end
|
end
|
||||||
@log.puts
|
@log.puts
|
||||||
@log.puts " Reduce actions:"
|
@log.puts " Reduce actions:"
|
||||||
if item_set.reduce_rule
|
case item_set.reduce_actions
|
||||||
@log.puts " * => rule #{item_set.reduce_rule.id}, rule set #{@rule_sets[item_set.reduce_rule.name].id} (#{item_set.reduce_rule.name})"
|
when Rule
|
||||||
elsif item_set.reduce_actions
|
@log.puts " * => rule #{item_set.reduce_actions.id}, rule set #{@rule_sets[item_set.reduce_actions.name].id} (#{item_set.reduce_actions.name})"
|
||||||
|
when Hash
|
||||||
item_set.reduce_actions.each do |token, rule|
|
item_set.reduce_actions.each do |token, rule|
|
||||||
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
|
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
if @warnings.size > 0
|
|
||||||
@log.puts
|
|
||||||
@log.puts "Warnings:"
|
|
||||||
@warnings.each do |warning|
|
|
||||||
@log.puts " #{warning}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -22,7 +22,6 @@ class Propane
|
|||||||
def initialize(rule, position)
|
def initialize(rule, position)
|
||||||
@rule = rule
|
@rule = rule
|
||||||
@position = position
|
@position = position
|
||||||
@_hash = [@rule, @position].hash
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Hash function.
|
# Hash function.
|
||||||
@ -30,7 +29,7 @@ class Propane
|
|||||||
# @return [Integer]
|
# @return [Integer]
|
||||||
# Hash code.
|
# Hash code.
|
||||||
def hash
|
def hash
|
||||||
@_hash
|
[@rule, @position].hash
|
||||||
end
|
end
|
||||||
|
|
||||||
# Compare Item objects.
|
# Compare Item objects.
|
||||||
@ -57,7 +56,7 @@ class Propane
|
|||||||
|
|
||||||
# Return the set of Items obtained by "closing" the current item.
|
# Return the set of Items obtained by "closing" the current item.
|
||||||
#
|
#
|
||||||
# If the next symbol for the current item is another Rule name, then
|
# If the following symbol for the current item is another Rule name, then
|
||||||
# this method will return all Items for that Rule with a position of 0.
|
# this method will return all Items for that Rule with a position of 0.
|
||||||
# Otherwise, an empty Array is returned.
|
# Otherwise, an empty Array is returned.
|
||||||
#
|
#
|
||||||
@ -82,17 +81,17 @@ class Propane
|
|||||||
@position == @rule.components.size
|
@position == @rule.components.size
|
||||||
end
|
end
|
||||||
|
|
||||||
# Get the next symbol for the Item.
|
# Get the following symbol for the Item.
|
||||||
#
|
#
|
||||||
# That is, the symbol which is after the parse position marker in the
|
# That is, the symbol which follows the parse position marker in the
|
||||||
# current Item.
|
# current Item.
|
||||||
#
|
#
|
||||||
# @param offset [Integer]
|
# @param offset [Integer]
|
||||||
# Offset from current parse position to examine.
|
# Offset from current parse position to examine.
|
||||||
#
|
#
|
||||||
# @return [Token, RuleSet, nil]
|
# @return [Token, RuleSet, nil]
|
||||||
# Next symbol for the Item.
|
# Following symbol for the Item.
|
||||||
def next_symbol(offset = 0)
|
def following_symbol(offset = 0)
|
||||||
@rule.components[@position + offset]
|
@rule.components[@position + offset]
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -109,25 +108,25 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Get whether this Item's next symbol is the given symbol.
|
# Get whether this Item is followed by the provided symbol.
|
||||||
#
|
#
|
||||||
# @param symbol [Token, RuleSet]
|
# @param symbol [Token, RuleSet]
|
||||||
# Symbol to query.
|
# Symbol to query.
|
||||||
#
|
#
|
||||||
# @return [Boolean]
|
# @return [Boolean]
|
||||||
# Whether this Item's next symbol is the given symbol.
|
# Whether this Item is followed by the provided symbol.
|
||||||
def next_symbol?(symbol)
|
def followed_by?(symbol)
|
||||||
next_symbol == symbol
|
following_symbol == symbol
|
||||||
end
|
end
|
||||||
|
|
||||||
# Get the next item for this Item.
|
# Get the following item for this Item.
|
||||||
#
|
#
|
||||||
# That is, the Item formed by moving the parse position marker one place
|
# That is, the Item formed by moving the parse position marker one place
|
||||||
# forward from its position in this Item.
|
# forward from its position in this Item.
|
||||||
#
|
#
|
||||||
# @return [Item]
|
# @return [Item]
|
||||||
# The next item for this Item.
|
# The following item for this Item.
|
||||||
def next_item
|
def following_item
|
||||||
Item.new(@rule, @position + 1)
|
Item.new(@rule, @position + 1)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,7 @@ class Propane
|
|||||||
class Parser
|
class Parser
|
||||||
|
|
||||||
# Represent a parser "item set", which is a set of possible items that the
|
# Represent a parser "item set", which is a set of possible items that the
|
||||||
# parser could currently be parsing. This is equivalent to a parser state.
|
# parser could currently be parsing.
|
||||||
class ItemSet
|
class ItemSet
|
||||||
|
|
||||||
# @return [Set<Item>]
|
# @return [Set<Item>]
|
||||||
@ -14,58 +14,45 @@ class Propane
|
|||||||
attr_accessor :id
|
attr_accessor :id
|
||||||
|
|
||||||
# @return [Hash]
|
# @return [Hash]
|
||||||
# Maps a next symbol to its ItemSet.
|
# Maps a following symbol to its ItemSet.
|
||||||
attr_reader :next_item_set
|
attr_reader :following_item_set
|
||||||
|
|
||||||
# @return [Set<ItemSet>]
|
# @return [Set<ItemSet>]
|
||||||
# ItemSets leading to this item set.
|
# ItemSets leading to this item set.
|
||||||
attr_reader :in_sets
|
attr_reader :in_sets
|
||||||
|
|
||||||
# @return [nil, Rule]
|
# @return [nil, Rule, Hash]
|
||||||
# Rule to reduce if there is only one possibility.
|
# Reduce actions, mapping lookahead tokens to rules.
|
||||||
attr_accessor :reduce_rule
|
|
||||||
|
|
||||||
# @return [Set<Rule>]
|
|
||||||
# Set of rules that could be reduced in this parser state.
|
|
||||||
attr_accessor :reduce_rules
|
|
||||||
|
|
||||||
# @return [nil, Hash]
|
|
||||||
# Reduce actions, mapping lookahead tokens to rules, if there is
|
|
||||||
# more than one rule that could be reduced.
|
|
||||||
attr_accessor :reduce_actions
|
attr_accessor :reduce_actions
|
||||||
|
|
||||||
# @return [Array<Hash>]
|
|
||||||
# Shift table entries.
|
|
||||||
attr_accessor :shift_entries
|
|
||||||
|
|
||||||
# Build an ItemSet.
|
# Build an ItemSet.
|
||||||
#
|
#
|
||||||
# @param items [Array<Item>]
|
# @param items [Array<Item>]
|
||||||
# Items in this ItemSet.
|
# Items in this ItemSet.
|
||||||
def initialize(items)
|
def initialize(items)
|
||||||
@items = Set.new(items)
|
@items = Set.new(items)
|
||||||
@next_item_set = {}
|
@following_item_set = {}
|
||||||
@in_sets = Set.new
|
@in_sets = Set.new
|
||||||
close!
|
close!
|
||||||
end
|
end
|
||||||
|
|
||||||
# Get the set of next symbols for all Items in this ItemSet.
|
# Get the set of following symbols for all Items in this ItemSet.
|
||||||
#
|
#
|
||||||
# @return [Set<Token, RuleSet>]
|
# @return [Set<Token, RuleSet>]
|
||||||
# Set of next symbols for all Items in this ItemSet.
|
# Set of following symbols for all Items in this ItemSet.
|
||||||
def next_symbols
|
def following_symbols
|
||||||
@_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
|
Set.new(@items.map(&:following_symbol).compact)
|
||||||
end
|
end
|
||||||
|
|
||||||
# Build a next ItemSet for the given next symbol.
|
# Build a following ItemSet for the given following symbol.
|
||||||
#
|
#
|
||||||
# @param symbol [Token, RuleSet]
|
# @param symbol [Token, RuleSet]
|
||||||
# Next symbol to build the next ItemSet for.
|
# Following symbol to build the following ItemSet for.
|
||||||
#
|
#
|
||||||
# @return [ItemSet]
|
# @return [ItemSet]
|
||||||
# Next ItemSet for the given next symbol.
|
# Following ItemSet for the given following symbol.
|
||||||
def build_next_item_set(symbol)
|
def build_following_item_set(symbol)
|
||||||
ItemSet.new(items_with_next(symbol).map(&:next_item))
|
ItemSet.new(items_followed_by(symbol).map(&:following_item))
|
||||||
end
|
end
|
||||||
|
|
||||||
# Hash function.
|
# Hash function.
|
||||||
@ -100,26 +87,13 @@ class Propane
|
|||||||
|
|
||||||
# Set of ItemSets that lead to this ItemSet.
|
# Set of ItemSets that lead to this ItemSet.
|
||||||
#
|
#
|
||||||
|
# This set includes this ItemSet.
|
||||||
|
#
|
||||||
# @return [Set<ItemSet>]
|
# @return [Set<ItemSet>]
|
||||||
# Set of all ItemSets that lead up to this ItemSet.
|
# Set of all ItemSets that lead up to this ItemSet.
|
||||||
def leading_item_sets
|
def leading_item_sets
|
||||||
@_leading_item_sets ||=
|
@in_sets.reduce(Set[self]) do |result, item_set|
|
||||||
begin
|
result + item_set.leading_item_sets
|
||||||
result = Set.new
|
|
||||||
eval_sets = Set[self]
|
|
||||||
evaled = Set.new
|
|
||||||
while eval_sets.size > 0
|
|
||||||
eval_set = eval_sets.first
|
|
||||||
eval_sets.delete(eval_set)
|
|
||||||
evaled << eval_set
|
|
||||||
eval_set.in_sets.each do |in_set|
|
|
||||||
result << in_set
|
|
||||||
unless evaled.include?(in_set)
|
|
||||||
eval_sets << in_set
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
result
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -153,16 +127,16 @@ class Propane
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Get the Items with the given next symbol.
|
# Get the Items followed by the given following symbol.
|
||||||
#
|
#
|
||||||
# @param symbol [Token, RuleSet]
|
# @param symbol [Token, RuleSet]
|
||||||
# Next symbol.
|
# Following symbol.
|
||||||
#
|
#
|
||||||
# @return [Array<Item>]
|
# @return [Array<Item>]
|
||||||
# Items with the given next symbol.
|
# Items followed by the given following symbol.
|
||||||
def items_with_next(symbol)
|
def items_followed_by(symbol)
|
||||||
@items.select do |item|
|
@items.select do |item|
|
||||||
item.next_symbol?(symbol)
|
item.followed_by?(symbol)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@ -26,9 +26,9 @@ class Propane
|
|||||||
# Regex NFA for matching the pattern.
|
# Regex NFA for matching the pattern.
|
||||||
attr_reader :nfa
|
attr_reader :nfa
|
||||||
|
|
||||||
# @return [Set]
|
# @return [String, nil]
|
||||||
# Lexer modes for this pattern.
|
# Lexer mode for this pattern.
|
||||||
attr_accessor :modes
|
attr_accessor :mode
|
||||||
|
|
||||||
# @return [String, nil]
|
# @return [String, nil]
|
||||||
# Parser value type name.
|
# Parser value type name.
|
||||||
@ -46,16 +46,16 @@ class Propane
|
|||||||
# Token to be returned by this pattern.
|
# Token to be returned by this pattern.
|
||||||
# @option options [Integer, nil] :line_number
|
# @option options [Integer, nil] :line_number
|
||||||
# Line number where the token was defined in the input grammar.
|
# Line number where the token was defined in the input grammar.
|
||||||
# @option options [String, nil] :modes
|
# @option options [String, nil] :mode
|
||||||
# Lexer modes for this pattern.
|
# Lexer mode for this pattern.
|
||||||
def initialize(options)
|
def initialize(options)
|
||||||
@code = options[:code]
|
@code = options[:code]
|
||||||
@pattern = options[:pattern]
|
@pattern = options[:pattern]
|
||||||
@token = options[:token]
|
@token = options[:token]
|
||||||
@line_number = options[:line_number]
|
@line_number = options[:line_number]
|
||||||
@modes = options[:modes]
|
@mode = options[:mode]
|
||||||
@ptypename = options[:ptypename]
|
@ptypename = options[:ptypename]
|
||||||
regex = Regex.new(@pattern, @line_number)
|
regex = Regex.new(@pattern)
|
||||||
regex.nfa.end_state.accepts = self
|
regex.nfa.end_state.accepts = self
|
||||||
@nfa = regex.nfa
|
@nfa = regex.nfa
|
||||||
end
|
end
|
||||||
|
|||||||
@ -4,13 +4,12 @@ class Propane
|
|||||||
attr_reader :unit
|
attr_reader :unit
|
||||||
attr_reader :nfa
|
attr_reader :nfa
|
||||||
|
|
||||||
def initialize(pattern, line_number)
|
def initialize(pattern)
|
||||||
@pattern = pattern.dup
|
@pattern = pattern.dup
|
||||||
@line_number = line_number
|
|
||||||
@unit = parse_alternates
|
@unit = parse_alternates
|
||||||
@nfa = @unit.to_nfa
|
@nfa = @unit.to_nfa
|
||||||
if @pattern != ""
|
if @pattern != ""
|
||||||
raise Error.new(%[Line #{@line_number}: unexpected "#{@pattern}" in pattern])
|
raise Error.new(%[Unexpected "#{@pattern}" in pattern])
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -42,7 +41,7 @@ class Propane
|
|||||||
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
||||||
au.replace_last!(mu)
|
au.replace_last!(mu)
|
||||||
else
|
else
|
||||||
raise Error.new("Line #{@line_number}: #{c} follows nothing")
|
raise Error.new("#{c} follows nothing")
|
||||||
end
|
end
|
||||||
when "|"
|
when "|"
|
||||||
au.new_alternate!
|
au.new_alternate!
|
||||||
@ -60,7 +59,7 @@ class Propane
|
|||||||
def parse_group
|
def parse_group
|
||||||
au = parse_alternates
|
au = parse_alternates
|
||||||
if @pattern[0] != ")"
|
if @pattern[0] != ")"
|
||||||
raise Error.new("Line #{@line_number}: unterminated group in pattern")
|
raise Error.new("Unterminated group in pattern")
|
||||||
end
|
end
|
||||||
@pattern.slice!(0)
|
@pattern.slice!(0)
|
||||||
au
|
au
|
||||||
@ -71,7 +70,7 @@ class Propane
|
|||||||
index = 0
|
index = 0
|
||||||
loop do
|
loop do
|
||||||
if @pattern == ""
|
if @pattern == ""
|
||||||
raise Error.new("Line #{@line_number}: unterminated character class")
|
raise Error.new("Unterminated character class")
|
||||||
end
|
end
|
||||||
c = @pattern.slice!(0)
|
c = @pattern.slice!(0)
|
||||||
if c == "]"
|
if c == "]"
|
||||||
@ -85,13 +84,13 @@ class Propane
|
|||||||
elsif c == "-" && @pattern[0] != "]"
|
elsif c == "-" && @pattern[0] != "]"
|
||||||
begin_cu = ccu.last_unit
|
begin_cu = ccu.last_unit
|
||||||
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
||||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
raise Error.new("Character range must be between single characters")
|
||||||
end
|
end
|
||||||
if @pattern[0] == "\\"
|
if @pattern[0] == "\\"
|
||||||
@pattern.slice!(0)
|
@pattern.slice!(0)
|
||||||
end_cu = parse_backslash
|
end_cu = parse_backslash
|
||||||
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
||||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
raise Error.new("Character range must be between single characters")
|
||||||
end
|
end
|
||||||
max_code_point = end_cu.code_point
|
max_code_point = end_cu.code_point
|
||||||
else
|
else
|
||||||
@ -117,7 +116,7 @@ class Propane
|
|||||||
elsif max_count.to_s != ""
|
elsif max_count.to_s != ""
|
||||||
max_count = max_count.to_i
|
max_count = max_count.to_i
|
||||||
if max_count < min_count
|
if max_count < min_count
|
||||||
raise Error.new("Line #{@line_number}: maximum repetition count cannot be less than minimum repetition count")
|
raise Error.new("Maximum repetition count cannot be less than minimum repetition count")
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
max_count = nil
|
max_count = nil
|
||||||
@ -125,33 +124,18 @@ class Propane
|
|||||||
@pattern = pattern
|
@pattern = pattern
|
||||||
[min_count, max_count]
|
[min_count, max_count]
|
||||||
else
|
else
|
||||||
raise Error.new("Line #{@line_number}: unexpected match count following {")
|
raise Error.new("Unexpected match count at #{@pattern}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def parse_backslash
|
def parse_backslash
|
||||||
if @pattern == ""
|
if @pattern == ""
|
||||||
raise Error.new("Line #{@line_number}: error: unfollowed \\")
|
raise Error.new("Error: unfollowed \\")
|
||||||
else
|
else
|
||||||
c = @pattern.slice!(0)
|
c = @pattern.slice!(0)
|
||||||
case c
|
case c
|
||||||
when "a"
|
|
||||||
CharacterRangeUnit.new("\a")
|
|
||||||
when "b"
|
|
||||||
CharacterRangeUnit.new("\b")
|
|
||||||
when "d"
|
when "d"
|
||||||
CharacterRangeUnit.new("0", "9")
|
CharacterRangeUnit.new("0", "9")
|
||||||
when "D"
|
|
||||||
ccu = CharacterClassUnit.new
|
|
||||||
ccu << CharacterRangeUnit.new("0", "9")
|
|
||||||
ccu.negate = true
|
|
||||||
ccu
|
|
||||||
when "f"
|
|
||||||
CharacterRangeUnit.new("\f")
|
|
||||||
when "n"
|
|
||||||
CharacterRangeUnit.new("\n")
|
|
||||||
when "r"
|
|
||||||
CharacterRangeUnit.new("\r")
|
|
||||||
when "s"
|
when "s"
|
||||||
ccu = CharacterClassUnit.new
|
ccu = CharacterClassUnit.new
|
||||||
ccu << CharacterRangeUnit.new(" ")
|
ccu << CharacterRangeUnit.new(" ")
|
||||||
@ -161,35 +145,6 @@ class Propane
|
|||||||
ccu << CharacterRangeUnit.new("\f")
|
ccu << CharacterRangeUnit.new("\f")
|
||||||
ccu << CharacterRangeUnit.new("\v")
|
ccu << CharacterRangeUnit.new("\v")
|
||||||
ccu
|
ccu
|
||||||
when "S"
|
|
||||||
ccu = CharacterClassUnit.new
|
|
||||||
ccu << CharacterRangeUnit.new(" ")
|
|
||||||
ccu << CharacterRangeUnit.new("\t")
|
|
||||||
ccu << CharacterRangeUnit.new("\r")
|
|
||||||
ccu << CharacterRangeUnit.new("\n")
|
|
||||||
ccu << CharacterRangeUnit.new("\f")
|
|
||||||
ccu << CharacterRangeUnit.new("\v")
|
|
||||||
ccu.negate = true
|
|
||||||
ccu
|
|
||||||
when "t"
|
|
||||||
CharacterRangeUnit.new("\t")
|
|
||||||
when "v"
|
|
||||||
CharacterRangeUnit.new("\v")
|
|
||||||
when "w"
|
|
||||||
ccu = CharacterClassUnit.new
|
|
||||||
ccu << CharacterRangeUnit.new("_")
|
|
||||||
ccu << CharacterRangeUnit.new("0", "9")
|
|
||||||
ccu << CharacterRangeUnit.new("a", "z")
|
|
||||||
ccu << CharacterRangeUnit.new("A", "Z")
|
|
||||||
ccu
|
|
||||||
when "W"
|
|
||||||
ccu = CharacterClassUnit.new
|
|
||||||
ccu << CharacterRangeUnit.new("_")
|
|
||||||
ccu << CharacterRangeUnit.new("0", "9")
|
|
||||||
ccu << CharacterRangeUnit.new("a", "z")
|
|
||||||
ccu << CharacterRangeUnit.new("A", "Z")
|
|
||||||
ccu.negate = true
|
|
||||||
ccu
|
|
||||||
else
|
else
|
||||||
CharacterRangeUnit.new(c)
|
CharacterRangeUnit.new(c)
|
||||||
end
|
end
|
||||||
|
|||||||
@ -92,20 +92,17 @@ class Propane
|
|||||||
@units = []
|
@units = []
|
||||||
@negate = false
|
@negate = false
|
||||||
end
|
end
|
||||||
def method_missing(*args, &block)
|
def initialize
|
||||||
@units.__send__(*args, &block)
|
@units = []
|
||||||
|
end
|
||||||
|
def method_missing(*args)
|
||||||
|
@units.__send__(*args)
|
||||||
end
|
end
|
||||||
def <<(thing)
|
def <<(thing)
|
||||||
if thing.is_a?(CharacterClassUnit)
|
if thing.is_a?(CharacterClassUnit)
|
||||||
if thing.negate
|
|
||||||
CodePointRange.invert_ranges(thing.map(&:code_point_range)).each do |cpr|
|
|
||||||
CharacterRangeUnit.new(cpr.first, cpr.last)
|
|
||||||
end
|
|
||||||
else
|
|
||||||
thing.each do |ccu_unit|
|
thing.each do |ccu_unit|
|
||||||
@units << ccu_unit
|
@units << ccu_unit
|
||||||
end
|
end
|
||||||
end
|
|
||||||
else
|
else
|
||||||
@units << thing
|
@units << thing
|
||||||
end
|
end
|
||||||
|
|||||||
@ -6,10 +6,6 @@ class Propane
|
|||||||
# Rule components.
|
# Rule components.
|
||||||
attr_reader :components
|
attr_reader :components
|
||||||
|
|
||||||
# @return [Hash]
|
|
||||||
# Field aliases.
|
|
||||||
attr_reader :aliases
|
|
||||||
|
|
||||||
# @return [String]
|
# @return [String]
|
||||||
# User code associated with the rule.
|
# User code associated with the rule.
|
||||||
attr_reader :code
|
attr_reader :code
|
||||||
@ -34,11 +30,6 @@ class Propane
|
|||||||
# The RuleSet that this Rule is a part of.
|
# The RuleSet that this Rule is a part of.
|
||||||
attr_accessor :rule_set
|
attr_accessor :rule_set
|
||||||
|
|
||||||
# @return [Array<Integer>]
|
|
||||||
# Map this rule's components to their positions in the parent RuleSet's
|
|
||||||
# node field pointer array. This is used for AST construction.
|
|
||||||
attr_accessor :rule_set_node_field_index_map
|
|
||||||
|
|
||||||
# Construct a Rule.
|
# Construct a Rule.
|
||||||
#
|
#
|
||||||
# @param name [String]
|
# @param name [String]
|
||||||
@ -53,20 +44,7 @@ class Propane
|
|||||||
# Line number where the rule was defined in the input grammar.
|
# Line number where the rule was defined in the input grammar.
|
||||||
def initialize(name, components, code, ptypename, line_number)
|
def initialize(name, components, code, ptypename, line_number)
|
||||||
@name = name
|
@name = name
|
||||||
@aliases = {}
|
@components = components
|
||||||
@components = components.each_with_index.map do |component, i|
|
|
||||||
if component =~ /(\S+):(\S+)/
|
|
||||||
c, aliasname = $1, $2
|
|
||||||
if @aliases[aliasname]
|
|
||||||
raise Error.new("Error: duplicate field alias `#{aliasname}` for rule #{name} defined on line #{line_number}")
|
|
||||||
end
|
|
||||||
@aliases[aliasname] = i
|
|
||||||
c
|
|
||||||
else
|
|
||||||
component
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@rule_set_node_field_index_map = components.map {0}
|
|
||||||
@code = code
|
@code = code
|
||||||
@ptypename = ptypename
|
@ptypename = ptypename
|
||||||
@line_number = line_number
|
@line_number = line_number
|
||||||
@ -82,14 +60,6 @@ class Propane
|
|||||||
@components.empty?
|
@components.empty?
|
||||||
end
|
end
|
||||||
|
|
||||||
# Return whether this is an optional Rule.
|
|
||||||
#
|
|
||||||
# @return [Boolean]
|
|
||||||
# Whether this is an optional Rule.
|
|
||||||
def optional?
|
|
||||||
@name.end_with?("?")
|
|
||||||
end
|
|
||||||
|
|
||||||
# Represent the Rule as a String.
|
# Represent the Rule as a String.
|
||||||
#
|
#
|
||||||
# @return [String]
|
# @return [String]
|
||||||
@ -98,17 +68,6 @@ class Propane
|
|||||||
"#{@name} -> #{@components.map(&:name).join(" ")}"
|
"#{@name} -> #{@components.map(&:name).join(" ")}"
|
||||||
end
|
end
|
||||||
|
|
||||||
# Check whether the rule set node field index map is just a 1:1 mapping.
|
|
||||||
#
|
|
||||||
# @return [Boolean]
|
|
||||||
# Boolean indicating whether the rule set node field index map is just a
|
|
||||||
# 1:1 mapping.
|
|
||||||
def flat_rule_set_node_field_index_map?
|
|
||||||
@rule_set_node_field_index_map.each_with_index.all? do |v, i|
|
|
||||||
v == i
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,12 +1,7 @@
|
|||||||
class Propane
|
class Propane
|
||||||
|
|
||||||
# A RuleSet collects all grammar rules of the same name.
|
|
||||||
class RuleSet
|
class RuleSet
|
||||||
|
|
||||||
# @return [Array<Hash>]
|
|
||||||
# AST fields.
|
|
||||||
attr_reader :ast_fields
|
|
||||||
|
|
||||||
# @return [Integer]
|
# @return [Integer]
|
||||||
# ID of the RuleSet.
|
# ID of the RuleSet.
|
||||||
attr_reader :id
|
attr_reader :id
|
||||||
@ -56,24 +51,6 @@ class Propane
|
|||||||
@could_be_empty
|
@could_be_empty
|
||||||
end
|
end
|
||||||
|
|
||||||
# Return whether this is an optional RuleSet.
|
|
||||||
#
|
|
||||||
# @return [Boolean]
|
|
||||||
# Whether this is an optional RuleSet.
|
|
||||||
def optional?
|
|
||||||
@name.end_with?("?")
|
|
||||||
end
|
|
||||||
|
|
||||||
# For optional rule sets, return the underlying component that is optional.
|
|
||||||
def option_target
|
|
||||||
@rules.each do |rule|
|
|
||||||
if rule.components.size > 0
|
|
||||||
return rule.components[0]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
raise "Optional rule target not found"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Build the start token set for the RuleSet.
|
# Build the start token set for the RuleSet.
|
||||||
#
|
#
|
||||||
# @return [Set<Token>]
|
# @return [Set<Token>]
|
||||||
@ -98,72 +75,6 @@ class Propane
|
|||||||
@_start_token_set
|
@_start_token_set
|
||||||
end
|
end
|
||||||
|
|
||||||
# Finalize a RuleSet after adding all Rules to it.
|
|
||||||
def finalize(grammar)
|
|
||||||
if grammar.ast
|
|
||||||
build_ast_fields(grammar)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
private
|
|
||||||
|
|
||||||
# Build the set of AST fields for this RuleSet.
|
|
||||||
#
|
|
||||||
# This is an Array of Hashes. Each entry in the Array corresponds to a
|
|
||||||
# field location in the AST node. The entry is a Hash. It could have one or
|
|
||||||
# two keys. It will always have the field name with a positional suffix as
|
|
||||||
# a key. It may also have the field name without the positional suffix if
|
|
||||||
# that field only exists in one position across all Rules in the RuleSet.
|
|
||||||
#
|
|
||||||
# @return [void]
|
|
||||||
def build_ast_fields(grammar)
|
|
||||||
field_ast_node_indexes = {}
|
|
||||||
field_indexes_across_all_rules = {}
|
|
||||||
# Stores the index into @ast_fields by field alias name.
|
|
||||||
field_aliases = {}
|
|
||||||
@ast_fields = []
|
|
||||||
@rules.each do |rule|
|
|
||||||
rule.components.each_with_index do |component, i|
|
|
||||||
if component.is_a?(RuleSet) && component.optional?
|
|
||||||
component = component.option_target
|
|
||||||
end
|
|
||||||
if component.is_a?(Token)
|
|
||||||
node_name = "Token"
|
|
||||||
else
|
|
||||||
node_name = component.name
|
|
||||||
end
|
|
||||||
struct_name = "#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
|
||||||
field_name = "p#{node_name}#{i + 1}"
|
|
||||||
unless field_ast_node_indexes[field_name]
|
|
||||||
field_ast_node_indexes[field_name] = @ast_fields.size
|
|
||||||
@ast_fields << {field_name => struct_name}
|
|
||||||
end
|
|
||||||
rule.aliases.each do |alias_name, index|
|
|
||||||
if index == i
|
|
||||||
alias_ast_fields_index = field_ast_node_indexes[field_name]
|
|
||||||
if field_aliases[alias_name] && field_aliases[alias_name] != alias_ast_fields_index
|
|
||||||
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}` in rule #{rule.name} defined on line #{rule.line_number}")
|
|
||||||
end
|
|
||||||
field_aliases[alias_name] = alias_ast_fields_index
|
|
||||||
@ast_fields[alias_ast_fields_index][alias_name] = @ast_fields[alias_ast_fields_index].first[1]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
field_indexes_across_all_rules[node_name] ||= Set.new
|
|
||||||
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
|
||||||
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
field_indexes_across_all_rules.each do |node_name, indexes_across_all_rules|
|
|
||||||
if indexes_across_all_rules.size == 1
|
|
||||||
# If this field was only seen in one position across all rules,
|
|
||||||
# then add an alias to the positional field name that does not
|
|
||||||
# include the position.
|
|
||||||
@ast_fields[indexes_across_all_rules.first]["p#{node_name}"] =
|
|
||||||
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -10,32 +10,6 @@ class Propane
|
|||||||
"#{s}\n* #{message} *\n#{s}\n"
|
"#{s}\n* #{message} *\n#{s}\n"
|
||||||
end
|
end
|
||||||
|
|
||||||
# Determine the number of threads to use.
|
|
||||||
#
|
|
||||||
# @return [Integer]
|
|
||||||
# The number of threads to use.
|
|
||||||
def determine_n_threads
|
|
||||||
# Try to figure out how many threads are available on the host hardware.
|
|
||||||
begin
|
|
||||||
case RbConfig::CONFIG["host_os"]
|
|
||||||
when /linux/
|
|
||||||
return File.read("/proc/cpuinfo").scan(/^processor\s*:/).size
|
|
||||||
when /mswin|mingw|msys/
|
|
||||||
if `wmic cpu get NumberOfLogicalProcessors -value` =~ /NumberOfLogicalProcessors=(\d+)/
|
|
||||||
return $1.to_i
|
|
||||||
end
|
|
||||||
when /darwin/
|
|
||||||
if `sysctl -n hw.ncpu` =~ /(\d+)/
|
|
||||||
return $1.to_i
|
|
||||||
end
|
|
||||||
end
|
|
||||||
rescue
|
|
||||||
end
|
|
||||||
|
|
||||||
# If we can't figure it out, default to 4.
|
|
||||||
4
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,3 +1,3 @@
|
|||||||
class Propane
|
class Propane
|
||||||
VERSION = "2.3.0"
|
VERSION = "0.1.0"
|
||||||
end
|
end
|
||||||
|
|||||||
2
propane.sh
Executable file
2
propane.sh
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
exec bundle exec ruby -Ilib bin/propane "$@"
|
||||||
@ -1,6 +1,5 @@
|
|||||||
#!/usr/bin/env ruby
|
#!/usr/bin/env ruby
|
||||||
|
|
||||||
require "erb"
|
|
||||||
require "fileutils"
|
require "fileutils"
|
||||||
require "digest/md5"
|
require "digest/md5"
|
||||||
|
|
||||||
@ -14,24 +13,6 @@ START_FILE = "bin/#{PROG_NAME}"
|
|||||||
LIB_DIR = "lib"
|
LIB_DIR = "lib"
|
||||||
DIST = "dist"
|
DIST = "dist"
|
||||||
|
|
||||||
ASSETS_TEMPLATE = <<EOF
|
|
||||||
class Propane
|
|
||||||
module Assets
|
|
||||||
class << self
|
|
||||||
def get(name)
|
|
||||||
case name
|
|
||||||
<% Dir.glob("assets/*").each do |asset_file| %>
|
|
||||||
when <%= File.basename(asset_file).inspect %>
|
|
||||||
<%= File.binread(asset_file).inspect %>
|
|
||||||
<% end %>
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
EOF
|
|
||||||
|
|
||||||
assets_module = ERB.new(ASSETS_TEMPLATE, trim_mode: "<>").result
|
|
||||||
files_processed = {}
|
files_processed = {}
|
||||||
combined_file = []
|
combined_file = []
|
||||||
|
|
||||||
@ -44,12 +25,8 @@ combine_files = lambda do |file|
|
|||||||
if File.exist?(path)
|
if File.exist?(path)
|
||||||
unless files_processed[path]
|
unless files_processed[path]
|
||||||
files_processed[path] = true
|
files_processed[path] = true
|
||||||
if require_name == "propane/assets"
|
|
||||||
combined_file << assets_module
|
|
||||||
else
|
|
||||||
combine_files[path]
|
combine_files[path]
|
||||||
end
|
end
|
||||||
end
|
|
||||||
else
|
else
|
||||||
raise "require path #{path.inspect} not found"
|
raise "require path #{path.inspect} not found"
|
||||||
end
|
end
|
||||||
|
|||||||
@ -1,151 +0,0 @@
|
|||||||
ast;
|
|
||||||
ast_prefix P;
|
|
||||||
|
|
||||||
<<header
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
uint64_t i64;
|
|
||||||
const uint8_t * s;
|
|
||||||
double dou;
|
|
||||||
} TokenVal;
|
|
||||||
>>
|
|
||||||
|
|
||||||
ptype TokenVal;
|
|
||||||
|
|
||||||
# Keywords.
|
|
||||||
token byte;
|
|
||||||
token def;
|
|
||||||
token int;
|
|
||||||
token long;
|
|
||||||
token module;
|
|
||||||
token return;
|
|
||||||
token short;
|
|
||||||
token size_t;
|
|
||||||
token ssize_t;
|
|
||||||
token ubyte;
|
|
||||||
token uint;
|
|
||||||
token ulong;
|
|
||||||
token ushort;
|
|
||||||
|
|
||||||
# Symbols.
|
|
||||||
token arrow /->/;
|
|
||||||
token comma /,/;
|
|
||||||
token lbrace /\{/;
|
|
||||||
token lparen /\(/;
|
|
||||||
token rbrace /\}/;
|
|
||||||
token rparen /\)/;
|
|
||||||
token semicolon /;/;
|
|
||||||
|
|
||||||
# Integer literals.
|
|
||||||
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
|
||||||
$$.i64 = 64u;
|
|
||||||
>>
|
|
||||||
|
|
||||||
# Identifier.
|
|
||||||
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
|
||||||
$$.s = match;
|
|
||||||
$mode(default);
|
|
||||||
return $token(ident);
|
|
||||||
>>
|
|
||||||
|
|
||||||
# Comments.
|
|
||||||
drop /#.*/;
|
|
||||||
|
|
||||||
# Whitespace.
|
|
||||||
drop /[ \r\n]*/;
|
|
||||||
|
|
||||||
start Module;
|
|
||||||
|
|
||||||
# Assignment operators - right associative
|
|
||||||
Expression -> Expression_Or:exp0;
|
|
||||||
|
|
||||||
# Logical OR operator - left associative
|
|
||||||
Expression_Or -> Expression_And:exp0;
|
|
||||||
|
|
||||||
# Logical AND operator - left associative
|
|
||||||
Expression_And -> Expression_Comp:exp0;
|
|
||||||
|
|
||||||
# Equality operators - left associative
|
|
||||||
Expression_Comp -> Expression_Relational:exp0;
|
|
||||||
|
|
||||||
# Relational operators - left associative
|
|
||||||
Expression_Relational -> Expression_REMatch:exp0;
|
|
||||||
|
|
||||||
# Regular expression - left associative
|
|
||||||
Expression_REMatch -> Expression_BinOr:exp0;
|
|
||||||
|
|
||||||
# Binary OR operator - left associative
|
|
||||||
Expression_BinOr -> Expression_Xor:exp0;
|
|
||||||
|
|
||||||
# Binary XOR operator - left associative
|
|
||||||
Expression_Xor -> Expression_BinAnd:exp0;
|
|
||||||
|
|
||||||
# Binary AND operator - left associative
|
|
||||||
Expression_BinAnd -> Expression_BitShift:exp0;
|
|
||||||
|
|
||||||
# Bit shift operators - left associative
|
|
||||||
Expression_BitShift -> Expression_Plus:exp0;
|
|
||||||
|
|
||||||
# Add/subtract operators - left associative
|
|
||||||
Expression_Plus -> Expression_Mul:exp0;
|
|
||||||
|
|
||||||
# Multiplication/divide/modulus operators - left associative
|
|
||||||
Expression_Mul -> Expression_Range:exp0;
|
|
||||||
|
|
||||||
# Range construction operators - left associative
|
|
||||||
Expression_Range -> Expression_UnaryPrefix:exp0;
|
|
||||||
|
|
||||||
# Unary prefix operators
|
|
||||||
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
|
||||||
|
|
||||||
# Postfix operators
|
|
||||||
Expression_Dot -> Expression_Ident:exp0;
|
|
||||||
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
|
||||||
|
|
||||||
# Literals, identifiers, and parenthesized expressions
|
|
||||||
Expression_Ident -> Literal;
|
|
||||||
Expression_Ident -> ident;
|
|
||||||
|
|
||||||
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
|
||||||
|
|
||||||
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
|
||||||
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
|
||||||
|
|
||||||
FunctionReturnType -> arrow Type;
|
|
||||||
|
|
||||||
Literal -> LiteralInteger;
|
|
||||||
LiteralInteger -> hex_int_l;
|
|
||||||
|
|
||||||
Module -> ModuleStatement? ModuleItems;
|
|
||||||
|
|
||||||
ModuleItem -> FunctionDefinition;
|
|
||||||
|
|
||||||
ModuleItems -> ;
|
|
||||||
ModuleItems -> ModuleItems ModuleItem;
|
|
||||||
|
|
||||||
ModulePath -> ident;
|
|
||||||
|
|
||||||
ModuleStatement -> module ModulePath semicolon;
|
|
||||||
|
|
||||||
ReturnStatement -> return Expression?:exp0 semicolon;
|
|
||||||
|
|
||||||
Statements -> ;
|
|
||||||
Statements -> Statements Statement;
|
|
||||||
Statement -> Expression semicolon;
|
|
||||||
Statement -> ReturnStatement;
|
|
||||||
|
|
||||||
Type -> TypeBase;
|
|
||||||
|
|
||||||
TypeBase -> byte;
|
|
||||||
TypeBase -> ubyte;
|
|
||||||
TypeBase -> short;
|
|
||||||
TypeBase -> ushort;
|
|
||||||
TypeBase -> int;
|
|
||||||
TypeBase -> uint;
|
|
||||||
TypeBase -> long;
|
|
||||||
TypeBase -> ulong;
|
|
||||||
TypeBase -> size_t;
|
|
||||||
TypeBase -> ssize_t;
|
|
||||||
@ -1,177 +0,0 @@
|
|||||||
ast;
|
|
||||||
ast_prefix P;
|
|
||||||
|
|
||||||
<<
|
|
||||||
import std.bigint;
|
|
||||||
|
|
||||||
private string stringvalue;
|
|
||||||
|
|
||||||
union TokenVal
|
|
||||||
{
|
|
||||||
BigInt bi;
|
|
||||||
string s;
|
|
||||||
double dou;
|
|
||||||
}
|
|
||||||
>>
|
|
||||||
|
|
||||||
ptype TokenVal;
|
|
||||||
|
|
||||||
# Keywords.
|
|
||||||
token byte;
|
|
||||||
token def;
|
|
||||||
token int;
|
|
||||||
token long;
|
|
||||||
token module;
|
|
||||||
token return;
|
|
||||||
token short;
|
|
||||||
token size_t;
|
|
||||||
token ssize_t;
|
|
||||||
token ubyte;
|
|
||||||
token uint;
|
|
||||||
token ulong;
|
|
||||||
token ushort;
|
|
||||||
|
|
||||||
# Symbols.
|
|
||||||
token arrow /->/;
|
|
||||||
token comma /,/;
|
|
||||||
token lbrace /\{/;
|
|
||||||
token lparen /\(/;
|
|
||||||
token rbrace /\}/;
|
|
||||||
token rparen /\)/;
|
|
||||||
token semicolon /;/;
|
|
||||||
|
|
||||||
# Integer literals.
|
|
||||||
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
|
||||||
$$.bi = BigInt(match[0..3]);
|
|
||||||
foreach (c; match[3..$])
|
|
||||||
{
|
|
||||||
if (('0' <= c) && (c <= '9'))
|
|
||||||
{
|
|
||||||
$$.bi *= 0x10;
|
|
||||||
$$.bi += (c - '0');
|
|
||||||
}
|
|
||||||
if (('a' <= c) && (c <= 'f'))
|
|
||||||
{
|
|
||||||
$$.bi *= 0x10;
|
|
||||||
$$.bi += (c - 'a' + 10);
|
|
||||||
}
|
|
||||||
if (('A' <= c) && (c <= 'F'))
|
|
||||||
{
|
|
||||||
$$.bi *= 0x10;
|
|
||||||
$$.bi += (c - 'A' + 10);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
>>
|
|
||||||
|
|
||||||
# Identifier.
|
|
||||||
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
|
||||||
if (match[0] == '$')
|
|
||||||
{
|
|
||||||
$$.s = match[1..$];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
$$.s = match;
|
|
||||||
}
|
|
||||||
$mode(default);
|
|
||||||
return $token(ident);
|
|
||||||
>>
|
|
||||||
|
|
||||||
# Comments.
|
|
||||||
drop /#.*/;
|
|
||||||
|
|
||||||
# Whitespace.
|
|
||||||
drop /[ \r\n]*/;
|
|
||||||
|
|
||||||
start Module;
|
|
||||||
|
|
||||||
# Assignment operators - right associative
|
|
||||||
Expression -> Expression_Or:exp0;
|
|
||||||
|
|
||||||
# Logical OR operator - left associative
|
|
||||||
Expression_Or -> Expression_And:exp0;
|
|
||||||
|
|
||||||
# Logical AND operator - left associative
|
|
||||||
Expression_And -> Expression_Comp:exp0;
|
|
||||||
|
|
||||||
# Equality operators - left associative
|
|
||||||
Expression_Comp -> Expression_Relational:exp0;
|
|
||||||
|
|
||||||
# Relational operators - left associative
|
|
||||||
Expression_Relational -> Expression_REMatch:exp0;
|
|
||||||
|
|
||||||
# Regular expression - left associative
|
|
||||||
Expression_REMatch -> Expression_BinOr:exp0;
|
|
||||||
|
|
||||||
# Binary OR operator - left associative
|
|
||||||
Expression_BinOr -> Expression_Xor:exp0;
|
|
||||||
|
|
||||||
# Binary XOR operator - left associative
|
|
||||||
Expression_Xor -> Expression_BinAnd:exp0;
|
|
||||||
|
|
||||||
# Binary AND operator - left associative
|
|
||||||
Expression_BinAnd -> Expression_BitShift:exp0;
|
|
||||||
|
|
||||||
# Bit shift operators - left associative
|
|
||||||
Expression_BitShift -> Expression_Plus:exp0;
|
|
||||||
|
|
||||||
# Add/subtract operators - left associative
|
|
||||||
Expression_Plus -> Expression_Mul:exp0;
|
|
||||||
|
|
||||||
# Multiplication/divide/modulus operators - left associative
|
|
||||||
Expression_Mul -> Expression_Range:exp0;
|
|
||||||
|
|
||||||
# Range construction operators - left associative
|
|
||||||
Expression_Range -> Expression_UnaryPrefix:exp0;
|
|
||||||
|
|
||||||
# Unary prefix operators
|
|
||||||
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
|
||||||
|
|
||||||
# Postfix operators
|
|
||||||
Expression_Dot -> Expression_Ident:exp0;
|
|
||||||
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
|
||||||
|
|
||||||
# Literals, identifiers, and parenthesized expressions
|
|
||||||
Expression_Ident -> Literal;
|
|
||||||
Expression_Ident -> ident;
|
|
||||||
|
|
||||||
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
|
||||||
|
|
||||||
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
|
||||||
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
|
||||||
|
|
||||||
FunctionReturnType -> arrow Type;
|
|
||||||
|
|
||||||
Literal -> LiteralInteger;
|
|
||||||
LiteralInteger -> hex_int_l;
|
|
||||||
|
|
||||||
Module -> ModuleStatement? ModuleItems;
|
|
||||||
|
|
||||||
ModuleItem -> FunctionDefinition;
|
|
||||||
|
|
||||||
ModuleItems -> ;
|
|
||||||
ModuleItems -> ModuleItems ModuleItem;
|
|
||||||
|
|
||||||
ModulePath -> ident;
|
|
||||||
|
|
||||||
ModuleStatement -> module ModulePath semicolon;
|
|
||||||
|
|
||||||
ReturnStatement -> return Expression?:exp0 semicolon;
|
|
||||||
|
|
||||||
Statements -> ;
|
|
||||||
Statements -> Statements Statement;
|
|
||||||
Statement -> Expression semicolon;
|
|
||||||
Statement -> ReturnStatement;
|
|
||||||
|
|
||||||
Type -> TypeBase;
|
|
||||||
|
|
||||||
TypeBase -> byte;
|
|
||||||
TypeBase -> ubyte;
|
|
||||||
TypeBase -> short;
|
|
||||||
TypeBase -> ushort;
|
|
||||||
TypeBase -> int;
|
|
||||||
TypeBase -> uint;
|
|
||||||
TypeBase -> long;
|
|
||||||
TypeBase -> ulong;
|
|
||||||
TypeBase -> size_t;
|
|
||||||
TypeBase -> ssize_t;
|
|
||||||
@ -1,183 +0,0 @@
|
|||||||
<<header
|
|
||||||
#include "json_types.h"
|
|
||||||
#include "testutils.h"
|
|
||||||
>>
|
|
||||||
<<
|
|
||||||
#include "math.h"
|
|
||||||
#include <stdbool.h>
|
|
||||||
static str_t string_value;
|
|
||||||
>>
|
|
||||||
|
|
||||||
ptype JSONValue *;
|
|
||||||
|
|
||||||
drop /\s+/;
|
|
||||||
token lbrace /\{/;
|
|
||||||
token rbrace /\}/;
|
|
||||||
token lbracket /\[/;
|
|
||||||
token rbracket /\]/;
|
|
||||||
token comma /,/;
|
|
||||||
token colon /:/;
|
|
||||||
token number /-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?/ <<
|
|
||||||
double n = 0.0;
|
|
||||||
bool negative = false;
|
|
||||||
size_t i = 0u;
|
|
||||||
if (match[i] == '-')
|
|
||||||
{
|
|
||||||
negative = true;
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
while ('0' <= match[i] && match[i] <= '9')
|
|
||||||
{
|
|
||||||
n *= 10.0;
|
|
||||||
n += (match[i] - '0');
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
if (match[i] == '.')
|
|
||||||
{
|
|
||||||
i++;
|
|
||||||
double mult = 0.1;
|
|
||||||
while ('0' <= match[i] && match[i] <= '9')
|
|
||||||
{
|
|
||||||
n += mult * (match[i] - '0');
|
|
||||||
mult /= 10.0;
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (match[i] == 'e' || match[i] == 'E')
|
|
||||||
{
|
|
||||||
bool exp_negative = false;
|
|
||||||
i++;
|
|
||||||
if (match[i] == '-')
|
|
||||||
{
|
|
||||||
exp_negative = true;
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
else if (match[i] == '+')
|
|
||||||
{
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
long exp = 0.0;
|
|
||||||
while ('0' <= match[i] && match[i] <= '9')
|
|
||||||
{
|
|
||||||
exp *= 10;
|
|
||||||
exp += (match[i] - '0');
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
if (exp_negative)
|
|
||||||
{
|
|
||||||
exp = -exp;
|
|
||||||
}
|
|
||||||
n = pow(n, exp);
|
|
||||||
}
|
|
||||||
if (negative)
|
|
||||||
{
|
|
||||||
n = -n;
|
|
||||||
}
|
|
||||||
$$ = JSONValue_new(JSON_NUMBER);
|
|
||||||
$$->number = n;
|
|
||||||
>>
|
|
||||||
token true <<
|
|
||||||
$$ = JSONValue_new(JSON_TRUE);
|
|
||||||
>>
|
|
||||||
token false <<
|
|
||||||
$$ = JSONValue_new(JSON_FALSE);
|
|
||||||
>>
|
|
||||||
token null <<
|
|
||||||
$$ = JSONValue_new(JSON_NULL);
|
|
||||||
>>
|
|
||||||
/"/ <<
|
|
||||||
$mode(string);
|
|
||||||
str_init(&string_value, "");
|
|
||||||
>>
|
|
||||||
string: token string /"/ <<
|
|
||||||
$$ = JSONValue_new(JSON_STRING);
|
|
||||||
$$->string = string_value;
|
|
||||||
$mode(default);
|
|
||||||
>>
|
|
||||||
string: /\\"/ <<
|
|
||||||
str_append(&string_value, "\"");
|
|
||||||
>>
|
|
||||||
string: /\\\\/ <<
|
|
||||||
str_append(&string_value, "\\");
|
|
||||||
>>
|
|
||||||
string: /\\\// <<
|
|
||||||
str_append(&string_value, "/");
|
|
||||||
>>
|
|
||||||
string: /\\b/ <<
|
|
||||||
str_append(&string_value, "\b");
|
|
||||||
>>
|
|
||||||
string: /\\f/ <<
|
|
||||||
str_append(&string_value, "\f");
|
|
||||||
>>
|
|
||||||
string: /\\n/ <<
|
|
||||||
str_append(&string_value, "\n");
|
|
||||||
>>
|
|
||||||
string: /\\r/ <<
|
|
||||||
str_append(&string_value, "\r");
|
|
||||||
>>
|
|
||||||
string: /\\t/ <<
|
|
||||||
str_append(&string_value, "\t");
|
|
||||||
>>
|
|
||||||
string: /\\u[0-9a-fA-F]{4}/ <<
|
|
||||||
/* Not actually going to encode the code point for this example... */
|
|
||||||
char s[] = {'{', match[2], match[3], match[4], match[5], '}', 0};
|
|
||||||
str_append(&string_value, s);
|
|
||||||
>>
|
|
||||||
string: /[^\\]/ <<
|
|
||||||
char s[] = {match[0], 0};
|
|
||||||
str_append(&string_value, s);
|
|
||||||
>>
|
|
||||||
Start -> Value <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> string <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> number <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> Object <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> Array <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> true <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> false <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Value -> null <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Object -> lbrace rbrace <<
|
|
||||||
$$ = JSONObject_new();
|
|
||||||
>>
|
|
||||||
Object -> lbrace KeyValues rbrace <<
|
|
||||||
$$ = $2;
|
|
||||||
>>
|
|
||||||
KeyValues -> KeyValue <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
KeyValues -> KeyValues comma KeyValue <<
|
|
||||||
JSONObject_append($1, $3->object.entries[0].name, $3->object.entries[0].value);
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
KeyValue -> string colon Value <<
|
|
||||||
$$ = JSONObject_new();
|
|
||||||
JSONObject_append($$, str_cstr(&$1->string), $3);
|
|
||||||
>>
|
|
||||||
Array -> lbracket rbracket <<
|
|
||||||
$$ = JSONArray_new();
|
|
||||||
>>
|
|
||||||
Array -> lbracket Values rbracket <<
|
|
||||||
$$ = $2;
|
|
||||||
>>
|
|
||||||
Values -> Value <<
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
Values -> Values comma Value <<
|
|
||||||
JSONArray_append($1, $3);
|
|
||||||
$$ = $1;
|
|
||||||
>>
|
|
||||||
@ -1,64 +0,0 @@
|
|||||||
#include "json_types.h"
|
|
||||||
#include <string.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
JSONValue * JSONValue_new(size_t id)
|
|
||||||
{
|
|
||||||
JSONValue * jv = calloc(1, sizeof(JSONValue));
|
|
||||||
jv->id = id;
|
|
||||||
return jv;
|
|
||||||
}
|
|
||||||
|
|
||||||
JSONValue * JSONObject_new(void)
|
|
||||||
{
|
|
||||||
JSONValue * jv = JSONValue_new(JSON_OBJECT);
|
|
||||||
jv->object.size = 0u;
|
|
||||||
return jv;
|
|
||||||
}
|
|
||||||
|
|
||||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value)
|
|
||||||
{
|
|
||||||
size_t const size = object->object.size;
|
|
||||||
for (size_t i = 0u; i < size; i++)
|
|
||||||
{
|
|
||||||
if (strcmp(name, object->object.entries[i].name) == 0)
|
|
||||||
{
|
|
||||||
object->object.entries[i].value = value;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
size_t const new_size = size + 1;
|
|
||||||
void * new_entries = malloc(sizeof(object->object.entries[0]) * new_size);
|
|
||||||
if (size > 0)
|
|
||||||
{
|
|
||||||
memcpy(new_entries, object->object.entries, size * sizeof(object->object.entries[0]));
|
|
||||||
free(object->object.entries);
|
|
||||||
}
|
|
||||||
object->object.entries = new_entries;
|
|
||||||
object->object.entries[size].name = name;
|
|
||||||
object->object.entries[size].value = value;
|
|
||||||
object->object.size = new_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
JSONValue * JSONArray_new(void)
|
|
||||||
{
|
|
||||||
JSONValue * jv = JSONValue_new(JSON_ARRAY);
|
|
||||||
jv->array.size = 0u;
|
|
||||||
return jv;
|
|
||||||
}
|
|
||||||
|
|
||||||
void JSONArray_append(JSONValue * array, JSONValue * value)
|
|
||||||
{
|
|
||||||
size_t const size = array->array.size;
|
|
||||||
size_t const new_size = size + 1;
|
|
||||||
JSONValue ** new_entries = malloc(sizeof(JSONValue *) * new_size);
|
|
||||||
if (array->array.size > 0)
|
|
||||||
{
|
|
||||||
memcpy(new_entries, array->array.entries, sizeof(JSONValue *) * size);
|
|
||||||
free(array->array.entries);
|
|
||||||
}
|
|
||||||
array->array.entries = new_entries;
|
|
||||||
array->array.entries[size] = value;
|
|
||||||
array->array.size = new_size;
|
|
||||||
}
|
|
||||||
@ -1,46 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
#define JSON_OBJECT 0u
|
|
||||||
#define JSON_ARRAY 1u
|
|
||||||
#define JSON_NUMBER 2u
|
|
||||||
#define JSON_STRING 3u
|
|
||||||
#define JSON_TRUE 4u
|
|
||||||
#define JSON_FALSE 5u
|
|
||||||
#define JSON_NULL 6u
|
|
||||||
|
|
||||||
typedef struct JSONValue_s
|
|
||||||
{
|
|
||||||
size_t id;
|
|
||||||
union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
size_t size;
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
char const * name;
|
|
||||||
struct JSONValue_s * value;
|
|
||||||
} * entries;
|
|
||||||
} object;
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
size_t size;
|
|
||||||
struct JSONValue_s ** entries;
|
|
||||||
} array;
|
|
||||||
double number;
|
|
||||||
str_t string;
|
|
||||||
};
|
|
||||||
} JSONValue;
|
|
||||||
|
|
||||||
JSONValue * JSONValue_new(size_t id);
|
|
||||||
|
|
||||||
JSONValue * JSONObject_new(void);
|
|
||||||
|
|
||||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value);
|
|
||||||
|
|
||||||
JSONValue * JSONArray_new(void);
|
|
||||||
|
|
||||||
void JSONArray_append(JSONValue * array, JSONValue * value);
|
|
||||||
@ -5,6 +5,7 @@ class Propane
|
|||||||
# Comment line
|
# Comment line
|
||||||
|
|
||||||
module a.b;
|
module a.b;
|
||||||
|
class Foobar;
|
||||||
ptype XYZ * ;
|
ptype XYZ * ;
|
||||||
|
|
||||||
token while;
|
token while;
|
||||||
@ -29,6 +30,7 @@ B -> <<
|
|||||||
>>
|
>>
|
||||||
EOF
|
EOF
|
||||||
grammar = Grammar.new(input)
|
grammar = Grammar.new(input)
|
||||||
|
expect(grammar.classname).to eq "Foobar"
|
||||||
expect(grammar.modulename).to eq "a.b"
|
expect(grammar.modulename).to eq "a.b"
|
||||||
expect(grammar.ptype).to eq "XYZ *"
|
expect(grammar.ptype).to eq "XYZ *"
|
||||||
expect(grammar.ptypes).to eq("default" => "XYZ *")
|
expect(grammar.ptypes).to eq("default" => "XYZ *")
|
||||||
@ -36,44 +38,44 @@ EOF
|
|||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "while"}
|
o = grammar.tokens.find {|token| token.name == "while"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.line_number).to eq 6
|
expect(o.line_number).to eq 7
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.pattern).to eq "while"
|
expect(o.pattern).to eq "while"
|
||||||
expect(o.line_number).to eq 6
|
expect(o.line_number).to eq 7
|
||||||
expect(o.code).to be_nil
|
expect(o.code).to be_nil
|
||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "id"}
|
o = grammar.tokens.find {|token| token.name == "id"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.line_number).to eq 9
|
expect(o.line_number).to eq 10
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*"
|
expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*"
|
||||||
expect(o.line_number).to eq 9
|
expect(o.line_number).to eq 10
|
||||||
expect(o.code).to be_nil
|
expect(o.code).to be_nil
|
||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "token_with_code"}
|
o = grammar.tokens.find {|token| token.name == "token_with_code"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.line_number).to eq 11
|
expect(o.line_number).to eq 12
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.pattern).to eq "token_with_code"
|
expect(o.pattern).to eq "token_with_code"
|
||||||
expect(o.line_number).to eq 11
|
expect(o.line_number).to eq 12
|
||||||
expect(o.code).to eq "Code for the token\n"
|
expect(o.code).to eq "Code for the token\n"
|
||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"}
|
o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.line_number).to eq 15
|
expect(o.line_number).to eq 16
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to be_nil
|
expect(o).to be_nil
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.line_number).to eq 17
|
expect(o.line_number).to eq 18
|
||||||
expect(o.token).to be_nil
|
expect(o.token).to be_nil
|
||||||
expect(o.code).to be_nil
|
expect(o.code).to be_nil
|
||||||
|
|
||||||
@ -82,19 +84,19 @@ EOF
|
|||||||
o = grammar.rules[0]
|
o = grammar.rules[0]
|
||||||
expect(o.name).to eq "A"
|
expect(o.name).to eq "A"
|
||||||
expect(o.components).to eq %w[B]
|
expect(o.components).to eq %w[B]
|
||||||
expect(o.line_number).to eq 19
|
expect(o.line_number).to eq 20
|
||||||
expect(o.code).to eq " a = 42;\n"
|
expect(o.code).to eq " a = 42;\n"
|
||||||
|
|
||||||
o = grammar.rules[1]
|
o = grammar.rules[1]
|
||||||
expect(o.name).to eq "B"
|
expect(o.name).to eq "B"
|
||||||
expect(o.components).to eq %w[C while id]
|
expect(o.components).to eq %w[C while id]
|
||||||
expect(o.line_number).to eq 22
|
expect(o.line_number).to eq 23
|
||||||
expect(o.code).to be_nil
|
expect(o.code).to be_nil
|
||||||
|
|
||||||
o = grammar.rules[2]
|
o = grammar.rules[2]
|
||||||
expect(o.name).to eq "B"
|
expect(o.name).to eq "B"
|
||||||
expect(o.components).to eq []
|
expect(o.components).to eq []
|
||||||
expect(o.line_number).to eq 23
|
expect(o.line_number).to eq 24
|
||||||
expect(o.code).to eq " b = 0;\n"
|
expect(o.code).to eq " b = 0;\n"
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -151,30 +153,30 @@ EOF
|
|||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to be_empty
|
expect(o.mode).to be_nil
|
||||||
|
|
||||||
o = grammar.tokens.find {|token| token.name == "b"}
|
o = grammar.tokens.find {|token| token.name == "b"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to eq Set["m1"]
|
expect(o.mode).to eq "m1"
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to be_empty
|
expect(o.mode).to be_nil
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to eq Set["m2"]
|
expect(o.mode).to eq "m2"
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to be_empty
|
expect(o.mode).to be_nil
|
||||||
|
|
||||||
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
||||||
expect(o).to_not be_nil
|
expect(o).to_not be_nil
|
||||||
expect(o.modes).to eq Set["m3"]
|
expect(o.mode).to eq "m3"
|
||||||
end
|
end
|
||||||
|
|
||||||
it "allows assigning ptypes to tokens and rules" do
|
it "allows assigning ptypes to tokens and rules" do
|
||||||
|
|||||||
@ -126,74 +126,6 @@ EOF
|
|||||||
]
|
]
|
||||||
expect(run(<<EOF, ";")).to eq expected
|
expect(run(<<EOF, ";")).to eq expected
|
||||||
token semicolon /;/;
|
token semicolon /;/;
|
||||||
EOF
|
|
||||||
end
|
|
||||||
|
|
||||||
it "matches a negated character class" do
|
|
||||||
expected = [
|
|
||||||
["pattern", "/abc/"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "/abc/")).to eq expected
|
|
||||||
token pattern /\\/[^\\s]*\\//;
|
|
||||||
EOF
|
|
||||||
end
|
|
||||||
|
|
||||||
it "matches special character classes " do
|
|
||||||
expected = [
|
|
||||||
["a", "abc123_FOO"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "abc123_FOO")).to eq expected
|
|
||||||
token a /\\w+/;
|
|
||||||
EOF
|
|
||||||
expected = [
|
|
||||||
["b", "FROG*%$#"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "FROG*%$#")).to eq expected
|
|
||||||
token b /FROG\\D{1,4}/;
|
|
||||||
EOF
|
|
||||||
expected = [
|
|
||||||
["c", "$883366"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "$883366")).to eq expected
|
|
||||||
token c /$\\d+/;
|
|
||||||
EOF
|
|
||||||
expected = [
|
|
||||||
["d", "^&$@"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "^&$@")).to eq expected
|
|
||||||
token d /^\\W+/;
|
|
||||||
EOF
|
|
||||||
expected = [
|
|
||||||
["a", "abc123_FOO"],
|
|
||||||
[nil, " "],
|
|
||||||
["b", "FROG*%$#"],
|
|
||||||
[nil, " "],
|
|
||||||
["c", "$883366"],
|
|
||||||
[nil, " "],
|
|
||||||
["d", "^&$@"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "abc123_FOO FROG*%$# $883366 ^&$@")).to eq expected
|
|
||||||
token a /\\w+/;
|
|
||||||
token b /FROG\\D{1,4}/;
|
|
||||||
token c /$\\d+/;
|
|
||||||
token d /^\\W+/;
|
|
||||||
drop /\\s+/;
|
|
||||||
EOF
|
|
||||||
end
|
|
||||||
|
|
||||||
it "matches a negated character class with a nested inner negated character class" do
|
|
||||||
expected = [
|
|
||||||
["t", "$&*"],
|
|
||||||
]
|
|
||||||
expect(run(<<EOF, "$&*")).to eq expected
|
|
||||||
token t /[^%\\W]+/;
|
|
||||||
EOF
|
|
||||||
end
|
|
||||||
|
|
||||||
it "\\s matches a newline" do
|
|
||||||
expected = [["s", "\n"]]
|
|
||||||
expect(run(<<EOF, "\n")).to eq expected
|
|
||||||
token s /\\s/;
|
|
||||||
EOF
|
EOF
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -2,14 +2,14 @@ class Propane
|
|||||||
RSpec.describe Regex do
|
RSpec.describe Regex do
|
||||||
|
|
||||||
it "parses an empty expression" do
|
it "parses an empty expression" do
|
||||||
regex = Regex.new("", 1)
|
regex = Regex.new("")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0].size).to eq 0
|
expect(regex.unit.alternates[0].size).to eq 0
|
||||||
end
|
end
|
||||||
|
|
||||||
it "parses a single character unit expression" do
|
it "parses a single character unit expression" do
|
||||||
regex = Regex.new("a", 1)
|
regex = Regex.new("a")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -19,7 +19,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a group with a single character unit expression" do
|
it "parses a group with a single character unit expression" do
|
||||||
regex = Regex.new("(a)", 1)
|
regex = Regex.new("(a)")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -33,7 +33,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a *" do
|
it "parses a *" do
|
||||||
regex = Regex.new("a*", 1)
|
regex = Regex.new("a*")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -47,7 +47,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a +" do
|
it "parses a +" do
|
||||||
regex = Regex.new("a+", 1)
|
regex = Regex.new("a+")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -61,7 +61,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a ?" do
|
it "parses a ?" do
|
||||||
regex = Regex.new("a?", 1)
|
regex = Regex.new("a?")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -75,7 +75,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a multiplicity count" do
|
it "parses a multiplicity count" do
|
||||||
regex = Regex.new("a{5}", 1)
|
regex = Regex.new("a{5}")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -89,7 +89,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a minimum-only multiplicity count" do
|
it "parses a minimum-only multiplicity count" do
|
||||||
regex = Regex.new("a{5,}", 1)
|
regex = Regex.new("a{5,}")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -103,7 +103,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a minimum and maximum multiplicity count" do
|
it "parses a minimum and maximum multiplicity count" do
|
||||||
regex = Regex.new("a{5,8}", 1)
|
regex = Regex.new("a{5,8}")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -118,7 +118,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped *" do
|
it "parses an escaped *" do
|
||||||
regex = Regex.new("a\\*", 1)
|
regex = Regex.new("a\\*")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -131,7 +131,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped +" do
|
it "parses an escaped +" do
|
||||||
regex = Regex.new("a\\+", 1)
|
regex = Regex.new("a\\+")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -144,7 +144,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses an escaped \\" do
|
it "parses an escaped \\" do
|
||||||
regex = Regex.new("\\\\d", 1)
|
regex = Regex.new("\\\\d")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -157,7 +157,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a character class" do
|
it "parses a character class" do
|
||||||
regex = Regex.new("[a-z_]", 1)
|
regex = Regex.new("[a-z_]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -175,7 +175,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a negated character class" do
|
it "parses a negated character class" do
|
||||||
regex = Regex.new("[^xyz]", 1)
|
regex = Regex.new("[^xyz]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -189,25 +189,8 @@ class Propane
|
|||||||
expect(ccu[0].first).to eq "x".ord
|
expect(ccu[0].first).to eq "x".ord
|
||||||
end
|
end
|
||||||
|
|
||||||
it "parses a negated character class with inner character classes" do
|
|
||||||
regex = Regex.new("[^x\\sz]", 1)
|
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
|
||||||
seq_unit = regex.unit.alternates[0]
|
|
||||||
expect(seq_unit.size).to eq 1
|
|
||||||
expect(seq_unit[0]).to be_a Regex::CharacterClassUnit
|
|
||||||
ccu = seq_unit[0]
|
|
||||||
expect(ccu.negate).to be_truthy
|
|
||||||
expect(ccu.size).to eq 8
|
|
||||||
expect(ccu[0]).to be_a Regex::CharacterRangeUnit
|
|
||||||
expect(ccu[0].first).to eq "x".ord
|
|
||||||
expect(ccu[1].first).to eq " ".ord
|
|
||||||
expect(ccu[7].first).to eq "z".ord
|
|
||||||
end
|
|
||||||
|
|
||||||
it "parses - as a plain character at beginning of a character class" do
|
it "parses - as a plain character at beginning of a character class" do
|
||||||
regex = Regex.new("[-9]", 1)
|
regex = Regex.new("[-9]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -221,7 +204,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character at end of a character class" do
|
it "parses - as a plain character at end of a character class" do
|
||||||
regex = Regex.new("[0-]", 1)
|
regex = Regex.new("[0-]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -237,7 +220,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character at beginning of a negated character class" do
|
it "parses - as a plain character at beginning of a negated character class" do
|
||||||
regex = Regex.new("[^-9]", 1)
|
regex = Regex.new("[^-9]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -252,7 +235,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses . as a plain character in a character class" do
|
it "parses . as a plain character in a character class" do
|
||||||
regex = Regex.new("[.]", 1)
|
regex = Regex.new("[.]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -267,7 +250,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses - as a plain character when escaped in middle of character class" do
|
it "parses - as a plain character when escaped in middle of character class" do
|
||||||
regex = Regex.new("[0\\-9]", 1)
|
regex = Regex.new("[0\\-9]")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -286,7 +269,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses alternates" do
|
it "parses alternates" do
|
||||||
regex = Regex.new("ab|c", 1)
|
regex = Regex.new("ab|c")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 2
|
expect(regex.unit.alternates.size).to eq 2
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -296,7 +279,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses a ." do
|
it "parses a ." do
|
||||||
regex = Regex.new("a.b", 1)
|
regex = Regex.new("a.b")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 1
|
expect(regex.unit.alternates.size).to eq 1
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
@ -307,7 +290,7 @@ class Propane
|
|||||||
end
|
end
|
||||||
|
|
||||||
it "parses something complex" do
|
it "parses something complex" do
|
||||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+", 1)
|
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+")
|
||||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||||
expect(regex.unit.alternates.size).to eq 4
|
expect(regex.unit.alternates.size).to eq 4
|
||||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||||
|
|||||||
1005
spec/propane_spec.rb
1005
spec/propane_spec.rb
File diff suppressed because it is too large
Load Diff
@ -1,18 +1,5 @@
|
|||||||
unless ENV["dist_specs"]
|
|
||||||
require "bundler/setup"
|
require "bundler/setup"
|
||||||
require "simplecov"
|
require "propane"
|
||||||
|
|
||||||
SimpleCov.start do
|
|
||||||
add_filter "/spec/"
|
|
||||||
add_filter "/.bundle/"
|
|
||||||
if ENV["partial_specs"]
|
|
||||||
command_name "RSpec-partial"
|
|
||||||
else
|
|
||||||
command_name "RSpec"
|
|
||||||
end
|
|
||||||
project_name "Propane"
|
|
||||||
merge_timeout 3600
|
|
||||||
end
|
|
||||||
|
|
||||||
RSpec.configure do |config|
|
RSpec.configure do |config|
|
||||||
# Enable flags like --only-failures and --next-failure
|
# Enable flags like --only-failures and --next-failure
|
||||||
@ -22,6 +9,3 @@ unless ENV["dist_specs"]
|
|||||||
c.syntax = :expect
|
c.syntax = :expect
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
require "propane"
|
|
||||||
|
|||||||
@ -1,55 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "a, ((b)), b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start->pItems1 != NULL);
|
|
||||||
assert(start->pItems != NULL);
|
|
||||||
Items * items = start->pItems;
|
|
||||||
assert(items->pItem != NULL);
|
|
||||||
assert(items->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
|
||||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
|
||||||
assert(items->pItemsMore != NULL);
|
|
||||||
ItemsMore * itemsmore = items->pItemsMore;
|
|
||||||
assert(itemsmore->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
|
||||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
|
||||||
assert(itemsmore->pItemsMore != NULL);
|
|
||||||
itemsmore = itemsmore->pItemsMore;
|
|
||||||
assert(itemsmore->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
|
||||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
|
||||||
assert(itemsmore->pItemsMore == NULL);
|
|
||||||
|
|
||||||
input = "";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pItems == NULL);
|
|
||||||
|
|
||||||
input = "2 1";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pItems != NULL);
|
|
||||||
assert(start->pItems->pItem != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "a, ((b)), b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start.pItems1 !is null);
|
|
||||||
assert(start.pItems !is null);
|
|
||||||
Items * items = start.pItems;
|
|
||||||
assert(items.pItem !is null);
|
|
||||||
assert(items.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
|
||||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
|
||||||
assert(items.pItemsMore !is null);
|
|
||||||
ItemsMore * itemsmore = items.pItemsMore;
|
|
||||||
assert(itemsmore.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
|
||||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
|
||||||
assert(itemsmore.pItemsMore !is null);
|
|
||||||
itemsmore = itemsmore.pItemsMore;
|
|
||||||
assert(itemsmore.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
|
||||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
|
||||||
assert(itemsmore.pItemsMore is null);
|
|
||||||
|
|
||||||
input = "";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pItems is null);
|
|
||||||
|
|
||||||
input = "2 1";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pItems !is null);
|
|
||||||
assert(start.pItems.pItem !is null);
|
|
||||||
assert(start.pItems.pItem.pDual !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
|
||||||
}
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "\na\nb\nc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(TOKEN_a, start->first->pToken->token);
|
|
||||||
assert_eq(TOKEN_b, start->second->pToken->token);
|
|
||||||
assert_eq(TOKEN_c, start->third->pToken->token);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "\na\nb\nc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(TOKEN_a, start.first.pToken.token);
|
|
||||||
assert_eq(TOKEN_b, start.second.pToken.token);
|
|
||||||
assert_eq(TOKEN_c, start.third.pToken.token);
|
|
||||||
}
|
|
||||||
@ -1,102 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "\na\n bb ccc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(2, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
|
||||||
assert_eq(3, start->pT1->pA->position.row);
|
|
||||||
assert_eq(3, start->pT1->pA->position.col);
|
|
||||||
assert_eq(3, start->pT1->pA->end_position.row);
|
|
||||||
assert_eq(8, start->pT1->pA->end_position.col);
|
|
||||||
assert_eq(2, start->pT1->position.row);
|
|
||||||
assert_eq(1, start->pT1->position.col);
|
|
||||||
assert_eq(3, start->pT1->end_position.row);
|
|
||||||
assert_eq(8, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(2, start->position.row);
|
|
||||||
assert_eq(1, start->position.col);
|
|
||||||
assert_eq(3, start->end_position.row);
|
|
||||||
assert_eq(8, start->end_position.col);
|
|
||||||
|
|
||||||
input = "a\nbb";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
|
||||||
assert_eq(2, start->pT1->pA->position.row);
|
|
||||||
assert_eq(1, start->pT1->pA->position.col);
|
|
||||||
assert_eq(2, start->pT1->pA->end_position.row);
|
|
||||||
assert_eq(2, start->pT1->pA->end_position.col);
|
|
||||||
assert_eq(1, start->pT1->position.row);
|
|
||||||
assert_eq(1, start->pT1->position.col);
|
|
||||||
assert_eq(2, start->pT1->end_position.row);
|
|
||||||
assert_eq(2, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->position.row);
|
|
||||||
assert_eq(1, start->position.col);
|
|
||||||
assert_eq(2, start->end_position.row);
|
|
||||||
assert_eq(2, start->end_position.col);
|
|
||||||
|
|
||||||
input = "a\nc\nc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
|
||||||
assert(p_position_valid(start->pT1->pA->position));
|
|
||||||
assert_eq(2, start->pT1->pA->position.row);
|
|
||||||
assert_eq(1, start->pT1->pA->position.col);
|
|
||||||
assert_eq(3, start->pT1->pA->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pA->end_position.col);
|
|
||||||
assert_eq(1, start->pT1->position.row);
|
|
||||||
assert_eq(1, start->pT1->position.col);
|
|
||||||
assert_eq(3, start->pT1->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->position.row);
|
|
||||||
assert_eq(1, start->position.col);
|
|
||||||
assert_eq(3, start->end_position.row);
|
|
||||||
assert_eq(1, start->end_position.col);
|
|
||||||
|
|
||||||
input = "a";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
|
||||||
assert(!p_position_valid(start->pT1->pA->position));
|
|
||||||
assert_eq(1, start->pT1->position.row);
|
|
||||||
assert_eq(1, start->pT1->position.col);
|
|
||||||
assert_eq(1, start->pT1->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->position.row);
|
|
||||||
assert_eq(1, start->position.col);
|
|
||||||
assert_eq(1, start->end_position.row);
|
|
||||||
assert_eq(1, start->end_position.col);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,104 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "\na\n bb ccc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(2, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
|
||||||
assert(start.pT1.pA.position.valid);
|
|
||||||
assert_eq(3, start.pT1.pA.position.row);
|
|
||||||
assert_eq(3, start.pT1.pA.position.col);
|
|
||||||
assert_eq(3, start.pT1.pA.end_position.row);
|
|
||||||
assert_eq(8, start.pT1.pA.end_position.col);
|
|
||||||
assert_eq(2, start.pT1.position.row);
|
|
||||||
assert_eq(1, start.pT1.position.col);
|
|
||||||
assert_eq(3, start.pT1.end_position.row);
|
|
||||||
assert_eq(8, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(2, start.position.row);
|
|
||||||
assert_eq(1, start.position.col);
|
|
||||||
assert_eq(3, start.end_position.row);
|
|
||||||
assert_eq(8, start.end_position.col);
|
|
||||||
|
|
||||||
input = "a\nbb";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
|
||||||
assert(start.pT1.pA.position.valid);
|
|
||||||
assert_eq(2, start.pT1.pA.position.row);
|
|
||||||
assert_eq(1, start.pT1.pA.position.col);
|
|
||||||
assert_eq(2, start.pT1.pA.end_position.row);
|
|
||||||
assert_eq(2, start.pT1.pA.end_position.col);
|
|
||||||
assert_eq(1, start.pT1.position.row);
|
|
||||||
assert_eq(1, start.pT1.position.col);
|
|
||||||
assert_eq(2, start.pT1.end_position.row);
|
|
||||||
assert_eq(2, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.position.row);
|
|
||||||
assert_eq(1, start.position.col);
|
|
||||||
assert_eq(2, start.end_position.row);
|
|
||||||
assert_eq(2, start.end_position.col);
|
|
||||||
|
|
||||||
input = "a\nc\nc";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
|
||||||
assert(start.pT1.pA.position.valid);
|
|
||||||
assert_eq(2, start.pT1.pA.position.row);
|
|
||||||
assert_eq(1, start.pT1.pA.position.col);
|
|
||||||
assert_eq(3, start.pT1.pA.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pA.end_position.col);
|
|
||||||
assert_eq(1, start.pT1.position.row);
|
|
||||||
assert_eq(1, start.pT1.position.col);
|
|
||||||
assert_eq(3, start.pT1.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.position.row);
|
|
||||||
assert_eq(1, start.position.col);
|
|
||||||
assert_eq(3, start.end_position.row);
|
|
||||||
assert_eq(1, start.end_position.col);
|
|
||||||
|
|
||||||
input = "a";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
|
||||||
assert(!start.pT1.pA.position.valid);
|
|
||||||
assert_eq(1, start.pT1.position.row);
|
|
||||||
assert_eq(1, start.pT1.position.col);
|
|
||||||
assert_eq(1, start.pT1.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.position.row);
|
|
||||||
assert_eq(1, start.position.col);
|
|
||||||
assert_eq(1, start.end_position.row);
|
|
||||||
assert_eq(1, start.end_position.col);
|
|
||||||
}
|
|
||||||
@ -1,415 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main(int argc, char * argv[])
|
|
||||||
{
|
|
||||||
const char * input =
|
|
||||||
"# 0\n"
|
|
||||||
"def byte_val() -> byte\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 1\n"
|
|
||||||
"def short_val() -> short\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x4242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 2\n"
|
|
||||||
"def int_val() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42424242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 3\n"
|
|
||||||
"def long_val() -> long\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x4242_4242_4242_4242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 4\n"
|
|
||||||
"def ssize_t_val() -> ssize_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42424242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 5\n"
|
|
||||||
"def byte_to_short() -> short\n"
|
|
||||||
"{\n"
|
|
||||||
" return byte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 6\n"
|
|
||||||
"def byte_to_int() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return byte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 7\n"
|
|
||||||
"def byte_to_long() -> long\n"
|
|
||||||
"{\n"
|
|
||||||
" return byte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 8\n"
|
|
||||||
"def byte_to_ssize_t() -> ssize_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return byte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 9\n"
|
|
||||||
"def short_to_byte() -> byte\n"
|
|
||||||
"{\n"
|
|
||||||
" return short_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 10\n"
|
|
||||||
"def short_to_int() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return short_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 11\n"
|
|
||||||
"def short_to_long() -> long\n"
|
|
||||||
"{\n"
|
|
||||||
" return short_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 12\n"
|
|
||||||
"def short_to_ssize_t() -> ssize_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return short_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 13\n"
|
|
||||||
"def int_to_byte() -> byte\n"
|
|
||||||
"{\n"
|
|
||||||
" return int_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 14\n"
|
|
||||||
"def int_to_short() -> short\n"
|
|
||||||
"{\n"
|
|
||||||
" return int_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 15\n"
|
|
||||||
"def int_to_long() -> long\n"
|
|
||||||
"{\n"
|
|
||||||
" return int_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 16\n"
|
|
||||||
"def int_to_ssize_t() -> ssize_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return int_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 17\n"
|
|
||||||
"def long_to_byte() -> byte\n"
|
|
||||||
"{\n"
|
|
||||||
" return long_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 18\n"
|
|
||||||
"def long_to_short() -> short\n"
|
|
||||||
"{\n"
|
|
||||||
" return long_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 19\n"
|
|
||||||
"def long_to_int() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return long_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 20\n"
|
|
||||||
"def long_to_ssize_t() -> ssize_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return long_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 21\n"
|
|
||||||
"def ssize_t_to_byte() -> byte\n"
|
|
||||||
"{\n"
|
|
||||||
" return ssize_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 22\n"
|
|
||||||
"def ssize_t_to_short() -> short\n"
|
|
||||||
"{\n"
|
|
||||||
" return ssize_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 23\n"
|
|
||||||
"def ssize_t_to_int() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return ssize_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 24\n"
|
|
||||||
"def ssize_t_to_long() -> long\n"
|
|
||||||
"{\n"
|
|
||||||
" return ssize_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 25\n"
|
|
||||||
"def ubyte_val() -> ubyte\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 26\n"
|
|
||||||
"def ushort_val() -> ushort\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x4242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 27\n"
|
|
||||||
"def uint_val() -> uint\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42424242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 28\n"
|
|
||||||
"def ulong_val() -> ulong\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x4242_4242_4242_4242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 29\n"
|
|
||||||
"def size_t_val() -> size_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return 0x42424242;\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 30\n"
|
|
||||||
"def ubyte_to_ushort() -> ushort\n"
|
|
||||||
"{\n"
|
|
||||||
" return ubyte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 31\n"
|
|
||||||
"def ubyte_to_uint() -> uint\n"
|
|
||||||
"{\n"
|
|
||||||
" return ubyte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 32\n"
|
|
||||||
"def ubyte_to_ulong() -> ulong\n"
|
|
||||||
"{\n"
|
|
||||||
" return ubyte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 33\n"
|
|
||||||
"def ubyte_to_size_t() -> size_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return ubyte_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 34\n"
|
|
||||||
"def ushort_to_ubyte() -> ubyte\n"
|
|
||||||
"{\n"
|
|
||||||
" return ushort_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 35\n"
|
|
||||||
"def ushort_to_uint() -> uint\n"
|
|
||||||
"{\n"
|
|
||||||
" return ushort_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 36\n"
|
|
||||||
"def ushort_to_ulong() -> ulong\n"
|
|
||||||
"{\n"
|
|
||||||
" return ushort_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 37\n"
|
|
||||||
"def ushort_to_size_t() -> size_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return ushort_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 38\n"
|
|
||||||
"def uint_to_ubyte() -> ubyte\n"
|
|
||||||
"{\n"
|
|
||||||
" return uint_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 39\n"
|
|
||||||
"def uint_to_ushort() -> ushort\n"
|
|
||||||
"{\n"
|
|
||||||
" return uint_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 40\n"
|
|
||||||
"def uint_to_ulong() -> ulong\n"
|
|
||||||
"{\n"
|
|
||||||
" return uint_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 41\n"
|
|
||||||
"def uint_to_size_t() -> size_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return uint_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 42\n"
|
|
||||||
"def ulong_to_ubyte() -> ubyte\n"
|
|
||||||
"{\n"
|
|
||||||
" return ulong_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 43\n"
|
|
||||||
"def ulong_to_ushort() -> ushort\n"
|
|
||||||
"{\n"
|
|
||||||
" return ulong_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 44\n"
|
|
||||||
"def ulong_to_uint() -> uint\n"
|
|
||||||
"{\n"
|
|
||||||
" return ulong_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 45\n"
|
|
||||||
"def ulong_to_size_t() -> size_t\n"
|
|
||||||
"{\n"
|
|
||||||
" return ulong_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 46\n"
|
|
||||||
"def size_t_to_ubyte() -> ubyte\n"
|
|
||||||
"{\n"
|
|
||||||
" return size_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 47\n"
|
|
||||||
"def size_t_to_ushort() -> ushort\n"
|
|
||||||
"{\n"
|
|
||||||
" return size_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 48\n"
|
|
||||||
"def size_t_to_int() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return size_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 49\n"
|
|
||||||
"def size_t_to_ulong() -> ulong\n"
|
|
||||||
"{\n"
|
|
||||||
" return size_t_val();\n"
|
|
||||||
"}\n"
|
|
||||||
"\n"
|
|
||||||
"# 50\n"
|
|
||||||
"def main() -> int\n"
|
|
||||||
"{\n"
|
|
||||||
" return int_val();\n"
|
|
||||||
"}\n";
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
const char * name;
|
|
||||||
p_token_t token;
|
|
||||||
} expected[] = {
|
|
||||||
{"byte_val", TOKEN_byte},
|
|
||||||
{"short_val", TOKEN_short},
|
|
||||||
{"int_val", TOKEN_int},
|
|
||||||
{"long_val", TOKEN_long},
|
|
||||||
{"ssize_t_val", TOKEN_ssize_t},
|
|
||||||
{"byte_to_short", TOKEN_short},
|
|
||||||
{"byte_to_int", TOKEN_int},
|
|
||||||
{"byte_to_long", TOKEN_long},
|
|
||||||
{"byte_to_ssize_t", TOKEN_ssize_t},
|
|
||||||
{"short_to_byte", TOKEN_byte},
|
|
||||||
{"short_to_int", TOKEN_int},
|
|
||||||
{"short_to_long", TOKEN_long},
|
|
||||||
{"short_to_ssize_t", TOKEN_ssize_t},
|
|
||||||
{"int_to_byte", TOKEN_byte},
|
|
||||||
{"int_to_short", TOKEN_short},
|
|
||||||
{"int_to_long", TOKEN_long},
|
|
||||||
{"int_to_ssize_t", TOKEN_ssize_t},
|
|
||||||
{"long_to_byte", TOKEN_byte},
|
|
||||||
{"long_to_short", TOKEN_short},
|
|
||||||
{"long_to_int", TOKEN_int},
|
|
||||||
{"long_to_ssize_t", TOKEN_ssize_t},
|
|
||||||
{"ssize_t_to_byte", TOKEN_byte},
|
|
||||||
{"ssize_t_to_short", TOKEN_short},
|
|
||||||
{"ssize_t_to_int", TOKEN_int},
|
|
||||||
{"ssize_t_to_long", TOKEN_long},
|
|
||||||
{"ubyte_val", TOKEN_ubyte},
|
|
||||||
{"ushort_val", TOKEN_ushort},
|
|
||||||
{"uint_val", TOKEN_uint},
|
|
||||||
{"ulong_val", TOKEN_ulong},
|
|
||||||
{"size_t_val", TOKEN_size_t},
|
|
||||||
{"ubyte_to_ushort", TOKEN_ushort},
|
|
||||||
{"ubyte_to_uint", TOKEN_uint},
|
|
||||||
{"ubyte_to_ulong", TOKEN_ulong},
|
|
||||||
{"ubyte_to_size_t", TOKEN_size_t},
|
|
||||||
{"ushort_to_ubyte", TOKEN_ubyte},
|
|
||||||
{"ushort_to_uint", TOKEN_uint},
|
|
||||||
{"ushort_to_ulong", TOKEN_ulong},
|
|
||||||
{"ushort_to_size_t", TOKEN_size_t},
|
|
||||||
{"uint_to_ubyte", TOKEN_ubyte},
|
|
||||||
{"uint_to_ushort", TOKEN_ushort},
|
|
||||||
{"uint_to_ulong", TOKEN_ulong},
|
|
||||||
{"uint_to_size_t", TOKEN_size_t},
|
|
||||||
{"ulong_to_ubyte", TOKEN_ubyte},
|
|
||||||
{"ulong_to_ushort", TOKEN_ushort},
|
|
||||||
{"ulong_to_uint", TOKEN_uint},
|
|
||||||
{"ulong_to_size_t", TOKEN_size_t},
|
|
||||||
{"size_t_to_ubyte", TOKEN_ubyte},
|
|
||||||
{"size_t_to_ushort", TOKEN_ushort},
|
|
||||||
{"size_t_to_int", TOKEN_int},
|
|
||||||
{"size_t_to_ulong", TOKEN_ulong},
|
|
||||||
{"main", TOKEN_int},
|
|
||||||
};
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (const uint8_t *)input, strlen(input));
|
|
||||||
size_t result = p_parse(&context);
|
|
||||||
assert_eq(P_SUCCESS, result);
|
|
||||||
PModule * pmod = p_result(&context);
|
|
||||||
PModuleItems * pmis = pmod->pModuleItems;
|
|
||||||
PFunctionDefinition ** pfds;
|
|
||||||
size_t n_pfds = 0u;
|
|
||||||
while (pmis != NULL)
|
|
||||||
{
|
|
||||||
PModuleItem * pmi = pmis->pModuleItem;
|
|
||||||
if (pmi->pFunctionDefinition != NULL)
|
|
||||||
{
|
|
||||||
n_pfds++;
|
|
||||||
}
|
|
||||||
pmis = pmis->pModuleItems;
|
|
||||||
}
|
|
||||||
pfds = malloc(n_pfds * sizeof(PModuleItems *));
|
|
||||||
pmis = pmod->pModuleItems;
|
|
||||||
size_t pfd_i = n_pfds;
|
|
||||||
while (pmis != NULL)
|
|
||||||
{
|
|
||||||
PModuleItem * pmi = pmis->pModuleItem;
|
|
||||||
PFunctionDefinition * pfd = pmi->pFunctionDefinition;
|
|
||||||
if (pfd != NULL)
|
|
||||||
{
|
|
||||||
pfd_i--;
|
|
||||||
assert(pfd_i < n_pfds);
|
|
||||||
pfds[pfd_i] = pfd;
|
|
||||||
}
|
|
||||||
pmis = pmis->pModuleItems;
|
|
||||||
}
|
|
||||||
assert_eq(51, n_pfds);
|
|
||||||
for (size_t i = 0; i < n_pfds; i++)
|
|
||||||
{
|
|
||||||
if (strncmp(expected[i].name, (const char *)pfds[i]->name->pvalue.s, strlen(expected[i].name)) != 0 ||
|
|
||||||
(expected[i].token != pfds[i]->returntype->pType->pTypeBase->pToken1->token))
|
|
||||||
{
|
|
||||||
fprintf(stderr, "Index %lu: expected %s/%u, got %u\n", i, expected[i].name, expected[i].token, pfds[i]->returntype->pType->pTypeBase->pToken1->token);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,408 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "
|
|
||||||
# 0
|
|
||||||
def byte_val() -> byte
|
|
||||||
{
|
|
||||||
return 0x42;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 1
|
|
||||||
def short_val() -> short
|
|
||||||
{
|
|
||||||
return 0x4242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 2
|
|
||||||
def int_val() -> int
|
|
||||||
{
|
|
||||||
return 0x42424242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 3
|
|
||||||
def long_val() -> long
|
|
||||||
{
|
|
||||||
return 0x4242_4242_4242_4242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 4
|
|
||||||
def ssize_t_val() -> ssize_t
|
|
||||||
{
|
|
||||||
return 0x42424242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 5
|
|
||||||
def byte_to_short() -> short
|
|
||||||
{
|
|
||||||
return byte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 6
|
|
||||||
def byte_to_int() -> int
|
|
||||||
{
|
|
||||||
return byte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 7
|
|
||||||
def byte_to_long() -> long
|
|
||||||
{
|
|
||||||
return byte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 8
|
|
||||||
def byte_to_ssize_t() -> ssize_t
|
|
||||||
{
|
|
||||||
return byte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 9
|
|
||||||
def short_to_byte() -> byte
|
|
||||||
{
|
|
||||||
return short_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 10
|
|
||||||
def short_to_int() -> int
|
|
||||||
{
|
|
||||||
return short_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 11
|
|
||||||
def short_to_long() -> long
|
|
||||||
{
|
|
||||||
return short_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 12
|
|
||||||
def short_to_ssize_t() -> ssize_t
|
|
||||||
{
|
|
||||||
return short_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 13
|
|
||||||
def int_to_byte() -> byte
|
|
||||||
{
|
|
||||||
return int_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 14
|
|
||||||
def int_to_short() -> short
|
|
||||||
{
|
|
||||||
return int_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 15
|
|
||||||
def int_to_long() -> long
|
|
||||||
{
|
|
||||||
return int_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 16
|
|
||||||
def int_to_ssize_t() -> ssize_t
|
|
||||||
{
|
|
||||||
return int_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 17
|
|
||||||
def long_to_byte() -> byte
|
|
||||||
{
|
|
||||||
return long_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 18
|
|
||||||
def long_to_short() -> short
|
|
||||||
{
|
|
||||||
return long_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 19
|
|
||||||
def long_to_int() -> int
|
|
||||||
{
|
|
||||||
return long_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 20
|
|
||||||
def long_to_ssize_t() -> ssize_t
|
|
||||||
{
|
|
||||||
return long_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 21
|
|
||||||
def ssize_t_to_byte() -> byte
|
|
||||||
{
|
|
||||||
return ssize_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 22
|
|
||||||
def ssize_t_to_short() -> short
|
|
||||||
{
|
|
||||||
return ssize_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 23
|
|
||||||
def ssize_t_to_int() -> int
|
|
||||||
{
|
|
||||||
return ssize_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 24
|
|
||||||
def ssize_t_to_long() -> long
|
|
||||||
{
|
|
||||||
return ssize_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 25
|
|
||||||
def ubyte_val() -> ubyte
|
|
||||||
{
|
|
||||||
return 0x42;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 26
|
|
||||||
def ushort_val() -> ushort
|
|
||||||
{
|
|
||||||
return 0x4242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 27
|
|
||||||
def uint_val() -> uint
|
|
||||||
{
|
|
||||||
return 0x42424242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 28
|
|
||||||
def ulong_val() -> ulong
|
|
||||||
{
|
|
||||||
return 0x4242_4242_4242_4242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 29
|
|
||||||
def size_t_val() -> size_t
|
|
||||||
{
|
|
||||||
return 0x42424242;
|
|
||||||
}
|
|
||||||
|
|
||||||
# 30
|
|
||||||
def ubyte_to_ushort() -> ushort
|
|
||||||
{
|
|
||||||
return ubyte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 31
|
|
||||||
def ubyte_to_uint() -> uint
|
|
||||||
{
|
|
||||||
return ubyte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 32
|
|
||||||
def ubyte_to_ulong() -> ulong
|
|
||||||
{
|
|
||||||
return ubyte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 33
|
|
||||||
def ubyte_to_size_t() -> size_t
|
|
||||||
{
|
|
||||||
return ubyte_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 34
|
|
||||||
def ushort_to_ubyte() -> ubyte
|
|
||||||
{
|
|
||||||
return ushort_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 35
|
|
||||||
def ushort_to_uint() -> uint
|
|
||||||
{
|
|
||||||
return ushort_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 36
|
|
||||||
def ushort_to_ulong() -> ulong
|
|
||||||
{
|
|
||||||
return ushort_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 37
|
|
||||||
def ushort_to_size_t() -> size_t
|
|
||||||
{
|
|
||||||
return ushort_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 38
|
|
||||||
def uint_to_ubyte() -> ubyte
|
|
||||||
{
|
|
||||||
return uint_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 39
|
|
||||||
def uint_to_ushort() -> ushort
|
|
||||||
{
|
|
||||||
return uint_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 40
|
|
||||||
def uint_to_ulong() -> ulong
|
|
||||||
{
|
|
||||||
return uint_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 41
|
|
||||||
def uint_to_size_t() -> size_t
|
|
||||||
{
|
|
||||||
return uint_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 42
|
|
||||||
def ulong_to_ubyte() -> ubyte
|
|
||||||
{
|
|
||||||
return ulong_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 43
|
|
||||||
def ulong_to_ushort() -> ushort
|
|
||||||
{
|
|
||||||
return ulong_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 44
|
|
||||||
def ulong_to_uint() -> uint
|
|
||||||
{
|
|
||||||
return ulong_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 45
|
|
||||||
def ulong_to_size_t() -> size_t
|
|
||||||
{
|
|
||||||
return ulong_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 46
|
|
||||||
def size_t_to_ubyte() -> ubyte
|
|
||||||
{
|
|
||||||
return size_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 47
|
|
||||||
def size_t_to_ushort() -> ushort
|
|
||||||
{
|
|
||||||
return size_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 48
|
|
||||||
def size_t_to_int() -> int
|
|
||||||
{
|
|
||||||
return size_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 49
|
|
||||||
def size_t_to_ulong() -> ulong
|
|
||||||
{
|
|
||||||
return size_t_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
# 50
|
|
||||||
def main() -> int
|
|
||||||
{
|
|
||||||
return int_val();
|
|
||||||
}
|
|
||||||
";
|
|
||||||
struct Expected
|
|
||||||
{
|
|
||||||
string name;
|
|
||||||
p_token_t token;
|
|
||||||
}
|
|
||||||
Expected[] expected = [
|
|
||||||
Expected("byte_val", TOKEN_byte),
|
|
||||||
Expected("short_val", TOKEN_short),
|
|
||||||
Expected("int_val", TOKEN_int),
|
|
||||||
Expected("long_val", TOKEN_long),
|
|
||||||
Expected("ssize_t_val", TOKEN_ssize_t),
|
|
||||||
Expected("byte_to_short", TOKEN_short),
|
|
||||||
Expected("byte_to_int", TOKEN_int),
|
|
||||||
Expected("byte_to_long", TOKEN_long),
|
|
||||||
Expected("byte_to_ssize_t", TOKEN_ssize_t),
|
|
||||||
Expected("short_to_byte", TOKEN_byte),
|
|
||||||
Expected("short_to_int", TOKEN_int),
|
|
||||||
Expected("short_to_long", TOKEN_long),
|
|
||||||
Expected("short_to_ssize_t", TOKEN_ssize_t),
|
|
||||||
Expected("int_to_byte", TOKEN_byte),
|
|
||||||
Expected("int_to_short", TOKEN_short),
|
|
||||||
Expected("int_to_long", TOKEN_long),
|
|
||||||
Expected("int_to_ssize_t", TOKEN_ssize_t),
|
|
||||||
Expected("long_to_byte", TOKEN_byte),
|
|
||||||
Expected("long_to_short", TOKEN_short),
|
|
||||||
Expected("long_to_int", TOKEN_int),
|
|
||||||
Expected("long_to_ssize_t", TOKEN_ssize_t),
|
|
||||||
Expected("ssize_t_to_byte", TOKEN_byte),
|
|
||||||
Expected("ssize_t_to_short", TOKEN_short),
|
|
||||||
Expected("ssize_t_to_int", TOKEN_int),
|
|
||||||
Expected("ssize_t_to_long", TOKEN_long),
|
|
||||||
Expected("ubyte_val", TOKEN_ubyte),
|
|
||||||
Expected("ushort_val", TOKEN_ushort),
|
|
||||||
Expected("uint_val", TOKEN_uint),
|
|
||||||
Expected("ulong_val", TOKEN_ulong),
|
|
||||||
Expected("size_t_val", TOKEN_size_t),
|
|
||||||
Expected("ubyte_to_ushort", TOKEN_ushort),
|
|
||||||
Expected("ubyte_to_uint", TOKEN_uint),
|
|
||||||
Expected("ubyte_to_ulong", TOKEN_ulong),
|
|
||||||
Expected("ubyte_to_size_t", TOKEN_size_t),
|
|
||||||
Expected("ushort_to_ubyte", TOKEN_ubyte),
|
|
||||||
Expected("ushort_to_uint", TOKEN_uint),
|
|
||||||
Expected("ushort_to_ulong", TOKEN_ulong),
|
|
||||||
Expected("ushort_to_size_t", TOKEN_size_t),
|
|
||||||
Expected("uint_to_ubyte", TOKEN_ubyte),
|
|
||||||
Expected("uint_to_ushort", TOKEN_ushort),
|
|
||||||
Expected("uint_to_ulong", TOKEN_ulong),
|
|
||||||
Expected("uint_to_size_t", TOKEN_size_t),
|
|
||||||
Expected("ulong_to_ubyte", TOKEN_ubyte),
|
|
||||||
Expected("ulong_to_ushort", TOKEN_ushort),
|
|
||||||
Expected("ulong_to_uint", TOKEN_uint),
|
|
||||||
Expected("ulong_to_size_t", TOKEN_size_t),
|
|
||||||
Expected("size_t_to_ubyte", TOKEN_ubyte),
|
|
||||||
Expected("size_t_to_ushort", TOKEN_ushort),
|
|
||||||
Expected("size_t_to_int", TOKEN_int),
|
|
||||||
Expected("size_t_to_ulong", TOKEN_ulong),
|
|
||||||
Expected("main", TOKEN_int),
|
|
||||||
];
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
size_t result = p_parse(&context);
|
|
||||||
assert_eq(P_SUCCESS, result);
|
|
||||||
PModule * pmod = p_result(&context);
|
|
||||||
PModuleItems * pmis = pmod.pModuleItems;
|
|
||||||
PFunctionDefinition *[] pfds;
|
|
||||||
while (pmis !is null)
|
|
||||||
{
|
|
||||||
PModuleItem * pmi = pmis.pModuleItem;
|
|
||||||
if (pmi is null)
|
|
||||||
{
|
|
||||||
stderr.writeln("pmi is null!!!?");
|
|
||||||
assert(0);
|
|
||||||
}
|
|
||||||
PFunctionDefinition * pfd = pmi.pFunctionDefinition;
|
|
||||||
if (pfd !is null)
|
|
||||||
{
|
|
||||||
pfds = [pfd] ~ pfds;
|
|
||||||
}
|
|
||||||
pmis = pmis.pModuleItems;
|
|
||||||
}
|
|
||||||
assert_eq(51, pfds.length);
|
|
||||||
for (size_t i = 0; i < pfds.length; i++)
|
|
||||||
{
|
|
||||||
if ((expected[i].name != pfds[i].name.pvalue.s) ||
|
|
||||||
(expected[i].token != pfds[i].returntype.pType.pTypeBase.pToken1.token))
|
|
||||||
{
|
|
||||||
stderr.writeln("Index ", i, ": expected ", expected[i].name, "/", expected[i].token, ", got ", pfds[i].name.pvalue.s, "/", pfds[i].returntype.pType.pTypeBase.pToken1.token);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,55 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "a, ((b)), b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
PStartS * start = p_result(&context);
|
|
||||||
assert(start->pItems1 != NULL);
|
|
||||||
assert(start->pItems != NULL);
|
|
||||||
PItemsS * items = start->pItems;
|
|
||||||
assert(items->pItem != NULL);
|
|
||||||
assert(items->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
|
||||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
|
||||||
assert(items->pItemsMore != NULL);
|
|
||||||
PItemsMoreS * itemsmore = items->pItemsMore;
|
|
||||||
assert(itemsmore->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
|
||||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
|
||||||
assert(itemsmore->pItemsMore != NULL);
|
|
||||||
itemsmore = itemsmore->pItemsMore;
|
|
||||||
assert(itemsmore->pItem != NULL);
|
|
||||||
assert(itemsmore->pItem->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
|
||||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
|
||||||
assert(itemsmore->pItemsMore == NULL);
|
|
||||||
|
|
||||||
input = "";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pItems == NULL);
|
|
||||||
|
|
||||||
input = "2 1";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pItems != NULL);
|
|
||||||
assert(start->pItems->pItem != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
|
||||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "a, ((b)), b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
PStartS * start = p_result(&context);
|
|
||||||
assert(start.pItems1 !is null);
|
|
||||||
assert(start.pItems !is null);
|
|
||||||
PItemsS * items = start.pItems;
|
|
||||||
assert(items.pItem !is null);
|
|
||||||
assert(items.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
|
||||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
|
||||||
assert(items.pItemsMore !is null);
|
|
||||||
PItemsMoreS * itemsmore = items.pItemsMore;
|
|
||||||
assert(itemsmore.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
|
||||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
|
||||||
assert(itemsmore.pItemsMore !is null);
|
|
||||||
itemsmore = itemsmore.pItemsMore;
|
|
||||||
assert(itemsmore.pItem !is null);
|
|
||||||
assert(itemsmore.pItem.pToken1 !is null);
|
|
||||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
|
||||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
|
||||||
assert(itemsmore.pItemsMore is null);
|
|
||||||
|
|
||||||
input = "";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pItems is null);
|
|
||||||
|
|
||||||
input = "2 1";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pItems !is null);
|
|
||||||
assert(start.pItems.pItem !is null);
|
|
||||||
assert(start.pItems.pItem.pDual !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
|
||||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
|
||||||
}
|
|
||||||
@ -1,84 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "abbccc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
|
||||||
assert_eq(1, start->pT1->position.row);
|
|
||||||
assert_eq(1, start->pT1->position.col);
|
|
||||||
assert_eq(1, start->pT1->end_position.row);
|
|
||||||
assert_eq(1, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT2->pToken->position.row);
|
|
||||||
assert_eq(2, start->pT2->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT2->pToken->end_position.row);
|
|
||||||
assert_eq(3, start->pT2->pToken->end_position.col);
|
|
||||||
assert_eq(1, start->pT2->position.row);
|
|
||||||
assert_eq(2, start->pT2->position.col);
|
|
||||||
assert_eq(1, start->pT2->end_position.row);
|
|
||||||
assert_eq(3, start->pT2->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->pT3->pToken->position.row);
|
|
||||||
assert_eq(4, start->pT3->pToken->position.col);
|
|
||||||
assert_eq(1, start->pT3->pToken->end_position.row);
|
|
||||||
assert_eq(6, start->pT3->pToken->end_position.col);
|
|
||||||
assert_eq(1, start->pT3->position.row);
|
|
||||||
assert_eq(4, start->pT3->position.col);
|
|
||||||
assert_eq(1, start->pT3->end_position.row);
|
|
||||||
assert_eq(6, start->pT3->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start->position.row);
|
|
||||||
assert_eq(1, start->position.col);
|
|
||||||
assert_eq(1, start->end_position.row);
|
|
||||||
assert_eq(6, start->end_position.col);
|
|
||||||
|
|
||||||
input = "\n\n bb\nc\ncc\n\n a";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(3, start->pT1->pToken->position.row);
|
|
||||||
assert_eq(3, start->pT1->pToken->position.col);
|
|
||||||
assert_eq(3, start->pT1->pToken->end_position.row);
|
|
||||||
assert_eq(4, start->pT1->pToken->end_position.col);
|
|
||||||
assert_eq(3, start->pT1->position.row);
|
|
||||||
assert_eq(3, start->pT1->position.col);
|
|
||||||
assert_eq(3, start->pT1->end_position.row);
|
|
||||||
assert_eq(4, start->pT1->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(4, start->pT2->pToken->position.row);
|
|
||||||
assert_eq(1, start->pT2->pToken->position.col);
|
|
||||||
assert_eq(5, start->pT2->pToken->end_position.row);
|
|
||||||
assert_eq(2, start->pT2->pToken->end_position.col);
|
|
||||||
assert_eq(4, start->pT2->position.row);
|
|
||||||
assert_eq(1, start->pT2->position.col);
|
|
||||||
assert_eq(5, start->pT2->end_position.row);
|
|
||||||
assert_eq(2, start->pT2->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(7, start->pT3->pToken->position.row);
|
|
||||||
assert_eq(6, start->pT3->pToken->position.col);
|
|
||||||
assert_eq(7, start->pT3->pToken->end_position.row);
|
|
||||||
assert_eq(6, start->pT3->pToken->end_position.col);
|
|
||||||
assert_eq(7, start->pT3->position.row);
|
|
||||||
assert_eq(6, start->pT3->position.col);
|
|
||||||
assert_eq(7, start->pT3->end_position.row);
|
|
||||||
assert_eq(6, start->pT3->end_position.col);
|
|
||||||
|
|
||||||
assert_eq(3, start->position.row);
|
|
||||||
assert_eq(3, start->position.col);
|
|
||||||
assert_eq(7, start->end_position.row);
|
|
||||||
assert_eq(6, start->end_position.col);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "abbccc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
|
||||||
assert_eq(1, start.pT1.position.row);
|
|
||||||
assert_eq(1, start.pT1.position.col);
|
|
||||||
assert_eq(1, start.pT1.end_position.row);
|
|
||||||
assert_eq(1, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT2.pToken.position.row);
|
|
||||||
assert_eq(2, start.pT2.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT2.pToken.end_position.row);
|
|
||||||
assert_eq(3, start.pT2.pToken.end_position.col);
|
|
||||||
assert_eq(1, start.pT2.position.row);
|
|
||||||
assert_eq(2, start.pT2.position.col);
|
|
||||||
assert_eq(1, start.pT2.end_position.row);
|
|
||||||
assert_eq(3, start.pT2.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.pT3.pToken.position.row);
|
|
||||||
assert_eq(4, start.pT3.pToken.position.col);
|
|
||||||
assert_eq(1, start.pT3.pToken.end_position.row);
|
|
||||||
assert_eq(6, start.pT3.pToken.end_position.col);
|
|
||||||
assert_eq(1, start.pT3.position.row);
|
|
||||||
assert_eq(4, start.pT3.position.col);
|
|
||||||
assert_eq(1, start.pT3.end_position.row);
|
|
||||||
assert_eq(6, start.pT3.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(1, start.position.row);
|
|
||||||
assert_eq(1, start.position.col);
|
|
||||||
assert_eq(1, start.end_position.row);
|
|
||||||
assert_eq(6, start.end_position.col);
|
|
||||||
|
|
||||||
input = "\n\n bb\nc\ncc\n\n a";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
|
|
||||||
assert_eq(3, start.pT1.pToken.position.row);
|
|
||||||
assert_eq(3, start.pT1.pToken.position.col);
|
|
||||||
assert_eq(3, start.pT1.pToken.end_position.row);
|
|
||||||
assert_eq(4, start.pT1.pToken.end_position.col);
|
|
||||||
assert_eq(3, start.pT1.position.row);
|
|
||||||
assert_eq(3, start.pT1.position.col);
|
|
||||||
assert_eq(3, start.pT1.end_position.row);
|
|
||||||
assert_eq(4, start.pT1.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(4, start.pT2.pToken.position.row);
|
|
||||||
assert_eq(1, start.pT2.pToken.position.col);
|
|
||||||
assert_eq(5, start.pT2.pToken.end_position.row);
|
|
||||||
assert_eq(2, start.pT2.pToken.end_position.col);
|
|
||||||
assert_eq(4, start.pT2.position.row);
|
|
||||||
assert_eq(1, start.pT2.position.col);
|
|
||||||
assert_eq(5, start.pT2.end_position.row);
|
|
||||||
assert_eq(2, start.pT2.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(7, start.pT3.pToken.position.row);
|
|
||||||
assert_eq(6, start.pT3.pToken.position.col);
|
|
||||||
assert_eq(7, start.pT3.pToken.end_position.row);
|
|
||||||
assert_eq(6, start.pT3.pToken.end_position.col);
|
|
||||||
assert_eq(7, start.pT3.position.row);
|
|
||||||
assert_eq(6, start.pT3.position.col);
|
|
||||||
assert_eq(7, start.pT3.end_position.row);
|
|
||||||
assert_eq(6, start.pT3.end_position.col);
|
|
||||||
|
|
||||||
assert_eq(3, start.position.row);
|
|
||||||
assert_eq(3, start.position.col);
|
|
||||||
assert_eq(7, start.end_position.row);
|
|
||||||
assert_eq(6, start.end_position.col);
|
|
||||||
}
|
|
||||||
@ -12,31 +12,28 @@ int main()
|
|||||||
input = "a\n123\na a";
|
input = "a\n123\na a";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 3);
|
assert(p_position(&context).row == 2);
|
||||||
assert(p_position(&context).col == 4);
|
assert(p_position(&context).col == 3);
|
||||||
assert(p_token(&context) == TOKEN_a);
|
assert(context.token == TOKEN_a);
|
||||||
|
|
||||||
input = "12";
|
input = "12";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 1);
|
assert(p_position(&context).row == 0);
|
||||||
assert(p_position(&context).col == 1);
|
assert(p_position(&context).col == 0);
|
||||||
assert(p_token(&context) == TOKEN_num);
|
assert(context.token == TOKEN_num);
|
||||||
|
|
||||||
input = "a 12\n\nab";
|
input = "a 12\n\nab";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||||
assert(p_position(&context).row == 3);
|
assert(p_position(&context).row == 2);
|
||||||
assert(p_position(&context).col == 2);
|
assert(p_position(&context).col == 1);
|
||||||
|
|
||||||
input = "a 12\n\na\n\n77\na \xAA";
|
input = "a 12\n\na\n\n77\na \xAA";
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||||
assert(p_position(&context).row == 6);
|
assert(p_position(&context).row == 5);
|
||||||
assert(p_position(&context).col == 5);
|
assert(p_position(&context).col == 4);
|
||||||
|
|
||||||
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
|
||||||
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,25 +16,22 @@ unittest
|
|||||||
input = "a\n123\na a";
|
input = "a\n123\na a";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(3, 4));
|
assert(p_position(&context) == p_position_t(2, 3));
|
||||||
assert(p_token(&context) == TOKEN_a);
|
assert(context.token == TOKEN_a);
|
||||||
|
|
||||||
input = "12";
|
input = "12";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(1, 1));
|
assert(p_position(&context) == p_position_t(0, 0));
|
||||||
assert(p_token(&context) == TOKEN_num);
|
assert(context.token == TOKEN_num);
|
||||||
|
|
||||||
input = "a 12\n\nab";
|
input = "a 12\n\nab";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||||
assert(p_position(&context) == p_position_t(3, 2));
|
assert(p_position(&context) == p_position_t(2, 1));
|
||||||
|
|
||||||
input = "a 12\n\na\n\n77\na \xAA";
|
input = "a 12\n\na\n\n77\na \xAA";
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||||
assert(p_position(&context) == p_position_t(6, 5));
|
assert(p_position(&context) == p_position_t(5, 4));
|
||||||
|
|
||||||
assert(p_token_names[TOKEN_a] == "a");
|
|
||||||
assert(p_token_names[TOKEN_num] == "num");
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,13 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "foo1\nbar2";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "foo1\nbar2";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
}
|
|
||||||
@ -41,68 +41,50 @@ int main()
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 0u);
|
||||||
assert(token_info.position.col == 1u);
|
assert(token_info.position.col == 0u);
|
||||||
assert(token_info.end_position.row == 1u);
|
|
||||||
assert(token_info.end_position.col == 1u);
|
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 0u);
|
||||||
assert(token_info.position.col == 3u);
|
assert(token_info.position.col == 2u);
|
||||||
assert(token_info.end_position.row == 1u);
|
|
||||||
assert(token_info.end_position.col == 3u);
|
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_plus);
|
assert(token_info.token == TOKEN_plus);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 0u);
|
||||||
assert(token_info.position.col == 5u);
|
assert(token_info.position.col == 4u);
|
||||||
assert(token_info.end_position.row == 1u);
|
|
||||||
assert(token_info.end_position.col == 5u);
|
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 0u);
|
||||||
assert(token_info.position.col == 7u);
|
assert(token_info.position.col == 6u);
|
||||||
assert(token_info.end_position.row == 1u);
|
|
||||||
assert(token_info.end_position.col == 7u);
|
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_times);
|
assert(token_info.token == TOKEN_times);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 2u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 1u);
|
assert(token_info.position.col == 0u);
|
||||||
assert(token_info.end_position.row == 2u);
|
|
||||||
assert(token_info.end_position.col == 3u);
|
|
||||||
assert(token_info.length == 3u);
|
assert(token_info.length == 3u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 2u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 5u);
|
assert(token_info.position.col == 4u);
|
||||||
assert(token_info.end_position.row == 2u);
|
|
||||||
assert(token_info.end_position.col == 5u);
|
|
||||||
assert(token_info.length == 1u);
|
assert(token_info.length == 1u);
|
||||||
assert(token_info.token == TOKEN_plus);
|
assert(token_info.token == TOKEN_plus);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 2u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 7u);
|
assert(token_info.position.col == 6u);
|
||||||
assert(token_info.end_position.row == 2u);
|
|
||||||
assert(token_info.end_position.col == 9u);
|
|
||||||
assert(token_info.length == 3u);
|
assert(token_info.length == 3u);
|
||||||
assert(token_info.token == TOKEN_int);
|
assert(token_info.token == TOKEN_int);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 2u);
|
assert(token_info.position.row == 1u);
|
||||||
assert(token_info.position.col == 10u);
|
assert(token_info.position.col == 9u);
|
||||||
assert(token_info.end_position.row == 2u);
|
|
||||||
assert(token_info.end_position.col == 10u);
|
|
||||||
assert(token_info.length == 0u);
|
assert(token_info.length == 0u);
|
||||||
assert(token_info.token == TOKEN___EOF);
|
assert(token_info.token == TOKEN___EOF);
|
||||||
|
|
||||||
p_context_init(&context, (uint8_t const *)"", 0u);
|
p_context_init(&context, (uint8_t const *)"", 0u);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info.position.row == 1u);
|
assert(token_info.position.row == 0u);
|
||||||
assert(token_info.position.col == 1u);
|
assert(token_info.position.col == 0u);
|
||||||
assert(token_info.end_position.row == 1u);
|
|
||||||
assert(token_info.end_position.col == 1u);
|
|
||||||
assert(token_info.length == 0u);
|
assert(token_info.length == 0u);
|
||||||
assert(token_info.token == TOKEN___EOF);
|
assert(token_info.token == TOKEN___EOF);
|
||||||
|
|
||||||
|
|||||||
@ -47,23 +47,23 @@ unittest
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 1, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(0, 0), 1, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 3), p_position_t(1, 3), 1, TOKEN_plus));
|
assert(token_info == p_token_info_t(p_position_t(0, 2), 1, TOKEN_plus));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 5), p_position_t(1, 5), 1, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(0, 4), 1, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 7), p_position_t(1, 7), 1, TOKEN_times));
|
assert(token_info == p_token_info_t(p_position_t(0, 6), 1, TOKEN_times));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(2, 1), p_position_t(2, 3), 3, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(1, 0), 3, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(2, 5), p_position_t(2, 5), 1, TOKEN_plus));
|
assert(token_info == p_token_info_t(p_position_t(1, 4), 1, TOKEN_plus));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(2, 7), p_position_t(2, 9), 3, TOKEN_int));
|
assert(token_info == p_token_info_t(p_position_t(1, 6), 3, TOKEN_int));
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(2, 10), p_position_t(2, 10), 0, TOKEN___EOF));
|
assert(token_info == p_token_info_t(p_position_t(1, 9), 0, TOKEN___EOF));
|
||||||
|
|
||||||
p_context_init(&context, "");
|
p_context_init(&context, "");
|
||||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 0, TOKEN___EOF));
|
assert(token_info == p_token_info_t(p_position_t(0, 0), 0, TOKEN___EOF));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,20 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "abc.def";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
printf("pass1\n");
|
|
||||||
|
|
||||||
input = "abc . abc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
printf("pass2\n");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = `abc.def`;
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
writeln("pass1");
|
|
||||||
|
|
||||||
input = `abc . abc`;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
writeln("pass2");
|
|
||||||
}
|
|
||||||
@ -1,13 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "\a\b\t\n\v\f\rt";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "\a\b\t\n\v\f\rt";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
}
|
|
||||||
@ -1,45 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start->a == NULL);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert_eq(TOKEN_b, start->pToken2->token);
|
|
||||||
assert(start->pR3 == NULL);
|
|
||||||
assert(start->pR == NULL);
|
|
||||||
assert(start->r == NULL);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->a != NULL);
|
|
||||||
assert_eq(TOKEN_a, start->pToken1->token);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert(start->pR3 != NULL);
|
|
||||||
assert(start->pR != NULL);
|
|
||||||
assert(start->r != NULL);
|
|
||||||
assert(start->pR == start->pR3);
|
|
||||||
assert(start->pR == start->r);
|
|
||||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
|
||||||
|
|
||||||
input = "bdc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->a == NULL);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert(start->r != NULL);
|
|
||||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,46 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start.pToken1 is null);
|
|
||||||
assert(start.pToken2 !is null);
|
|
||||||
assert_eq(TOKEN_b, start.pToken2.token);
|
|
||||||
assert(start.pR3 is null);
|
|
||||||
assert(start.pR is null);
|
|
||||||
assert(start.r is null);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pToken1 != null);
|
|
||||||
assert_eq(TOKEN_a, start.pToken1.token);
|
|
||||||
assert(start.pToken2 != null);
|
|
||||||
assert(start.pR3 != null);
|
|
||||||
assert(start.pR != null);
|
|
||||||
assert(start.r != null);
|
|
||||||
assert(start.pR == start.pR3);
|
|
||||||
assert(start.pR == start.r);
|
|
||||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
|
||||||
|
|
||||||
input = "bdc";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pToken1 is null);
|
|
||||||
assert(start.pToken2 !is null);
|
|
||||||
assert(start.pR !is null);
|
|
||||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
|
||||||
}
|
|
||||||
@ -1,22 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abdc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,23 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abdc";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
}
|
|
||||||
@ -1,42 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start->pToken1 == NULL);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert_eq(TOKEN_b, start->pToken2->token);
|
|
||||||
assert(start->pR3 == NULL);
|
|
||||||
assert(start->pR == NULL);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pToken1 != NULL);
|
|
||||||
assert_eq(TOKEN_a, start->pToken1->token);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert(start->pR3 != NULL);
|
|
||||||
assert(start->pR != NULL);
|
|
||||||
assert(start->pR == start->pR3);
|
|
||||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
|
||||||
|
|
||||||
input = "bdc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start->pToken1 == NULL);
|
|
||||||
assert(start->pToken2 != NULL);
|
|
||||||
assert(start->pR != NULL);
|
|
||||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,43 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "b";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
Start * start = p_result(&context);
|
|
||||||
assert(start.pToken1 is null);
|
|
||||||
assert(start.pToken2 !is null);
|
|
||||||
assert_eq(TOKEN_b, start.pToken2.token);
|
|
||||||
assert(start.pR3 is null);
|
|
||||||
assert(start.pR is null);
|
|
||||||
|
|
||||||
input = "abcd";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pToken1 != null);
|
|
||||||
assert_eq(TOKEN_a, start.pToken1.token);
|
|
||||||
assert(start.pToken2 != null);
|
|
||||||
assert(start.pR3 != null);
|
|
||||||
assert(start.pR != null);
|
|
||||||
assert(start.pR == start.pR3);
|
|
||||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
|
||||||
|
|
||||||
input = "bdc";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
start = p_result(&context);
|
|
||||||
assert(start.pToken1 is null);
|
|
||||||
assert(start.pToken2 !is null);
|
|
||||||
assert(start.pR !is null);
|
|
||||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
|
||||||
}
|
|
||||||
@ -8,8 +8,8 @@ int main()
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context).row == 1);
|
assert(p_position(&context).row == 0);
|
||||||
assert(p_position(&context).col == 2);
|
assert(p_position(&context).col == 1);
|
||||||
assert(context.token == TOKEN___EOF);
|
assert(context.token == TOKEN___EOF);
|
||||||
|
|
||||||
input = "a b";
|
input = "a b";
|
||||||
|
|||||||
@ -12,7 +12,7 @@ unittest
|
|||||||
p_context_t context;
|
p_context_t context;
|
||||||
p_context_init(&context, input);
|
p_context_init(&context, input);
|
||||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||||
assert(p_position(&context) == p_position_t(1, 2));
|
assert(p_position(&context) == p_position_t(0, 1));
|
||||||
assert(context.token == TOKEN___EOF);
|
assert(context.token == TOKEN___EOF);
|
||||||
|
|
||||||
input = "a b";
|
input = "a b";
|
||||||
|
|||||||
@ -1,56 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include "json_types.h"
|
|
||||||
#include <string.h>
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "{}";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
assert(p_result(&context)->id == JSON_OBJECT);
|
|
||||||
|
|
||||||
input = "[]";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
assert(p_result(&context)->id == JSON_ARRAY);
|
|
||||||
|
|
||||||
input = "-45.6";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
assert(p_result(&context)->id == JSON_NUMBER);
|
|
||||||
assert(p_result(&context)->number == -45.6);
|
|
||||||
|
|
||||||
input = "2E-2";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
assert(p_result(&context)->id == JSON_NUMBER);
|
|
||||||
assert(p_result(&context)->number == 0.02);
|
|
||||||
|
|
||||||
input = "{\"hi\":true}";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
JSONValue * o = p_result(&context);
|
|
||||||
assert(o->id == JSON_OBJECT);
|
|
||||||
assert_eq(1, o->object.size);
|
|
||||||
assert(strcmp(o->object.entries[0].name, "hi") == 0);
|
|
||||||
assert(o->object.entries[0].value->id == JSON_TRUE);
|
|
||||||
|
|
||||||
input = "{\"ff\": false, \"nn\": null}";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
o = p_result(&context);
|
|
||||||
assert(o->id == JSON_OBJECT);
|
|
||||||
assert_eq(2, o->object.size);
|
|
||||||
assert(strcmp(o->object.entries[0].name, "ff") == 0);
|
|
||||||
assert(o->object.entries[0].value->id == JSON_FALSE);
|
|
||||||
assert(strcmp(o->object.entries[1].name, "nn") == 0);
|
|
||||||
assert(o->object.entries[1].value->id == JSON_NULL);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "hi";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
Top * top = p_result(&context);
|
|
||||||
assert(top->pToken != NULL);
|
|
||||||
assert_eq(TOKEN_hi, top->pToken->token);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
import testutils;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "hi";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert_eq(P_SUCCESS, p_parse(&context));
|
|
||||||
Top * top = p_result(&context);
|
|
||||||
assert(top.pToken !is null);
|
|
||||||
assert_eq(TOKEN_hi, top.pToken.token);
|
|
||||||
}
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "aacc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abc";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
|
||||||
assert(p_user_terminate_code(&context) == 4200);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "aacc";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "abc";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
|
||||||
assert(p_user_terminate_code(&context) == 4200);
|
|
||||||
}
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
#include "testparser.h"
|
|
||||||
#include <assert.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
char const * input = "a";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "b";
|
|
||||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
|
||||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
|
||||||
assert(p_user_terminate_code(&context) == 8675309);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
import testparser;
|
|
||||||
import std.stdio;
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unittest
|
|
||||||
{
|
|
||||||
string input = "a";
|
|
||||||
p_context_t context;
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_SUCCESS);
|
|
||||||
|
|
||||||
input = "b";
|
|
||||||
p_context_init(&context, input);
|
|
||||||
assert(p_parse(&context) == P_USER_TERMINATED);
|
|
||||||
assert(p_user_terminate_code(&context) == 8675309);
|
|
||||||
}
|
|
||||||
@ -1,9 +1,6 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include "testutils.h"
|
|
||||||
|
|
||||||
void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line)
|
void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_t line)
|
||||||
{
|
{
|
||||||
@ -13,26 +10,3 @@ void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_
|
|||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void str_init(str_t * str, char const * cs)
|
|
||||||
{
|
|
||||||
size_t length = strlen(cs);
|
|
||||||
str->cs = malloc(length + 1u);
|
|
||||||
strcpy(str->cs, cs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void str_append(str_t * str, char const * cs)
|
|
||||||
{
|
|
||||||
size_t length = strlen(str->cs);
|
|
||||||
size_t length2 = strlen(cs);
|
|
||||||
char * new_cs = malloc(length + length2 + 1u);
|
|
||||||
memcpy(new_cs, str->cs, length);
|
|
||||||
strcpy(&new_cs[length], cs);
|
|
||||||
free(str->cs);
|
|
||||||
str->cs = new_cs;
|
|
||||||
}
|
|
||||||
|
|
||||||
void str_free(str_t * str)
|
|
||||||
{
|
|
||||||
free(str->cs);
|
|
||||||
}
|
|
||||||
|
|||||||
@ -5,15 +5,3 @@ void assert_eq_size_t_i(size_t expected, size_t actual, char const * file, size_
|
|||||||
#define assert_eq(expected, actual) \
|
#define assert_eq(expected, actual) \
|
||||||
assert_eq_size_t_i(expected, actual, __FILE__, __LINE__)
|
assert_eq_size_t_i(expected, actual, __FILE__, __LINE__)
|
||||||
|
|
||||||
typedef struct
|
|
||||||
{
|
|
||||||
char * cs;
|
|
||||||
} str_t;
|
|
||||||
|
|
||||||
void str_init(str_t * str, char const * cs);
|
|
||||||
void str_append(str_t * str, char const * cs);
|
|
||||||
void str_free(str_t * str);
|
|
||||||
static inline char * str_cstr(str_t * str)
|
|
||||||
{
|
|
||||||
return str->cs;
|
|
||||||
}
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user