Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d466189982 |
38
.github/workflows/run-tests.yml
vendored
38
.github/workflows/run-tests.yml
vendored
@ -1,38 +0,0 @@
|
||||
name: Run Propane Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
ruby-version: ['3.4']
|
||||
|
||||
steps:
|
||||
- name: Install dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get update && sudo apt-get install -y gcc gdc ldc
|
||||
|
||||
- name: Install dependencies (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: brew install gcc ldc
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: ${{ matrix.ruby-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bundle install
|
||||
|
||||
- name: Run tests
|
||||
run: rake all
|
||||
119
CHANGELOG.md
119
CHANGELOG.md
@ -1,122 +1,3 @@
|
||||
## v2.3.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add \D, \S, \w, \W special character classes
|
||||
|
||||
### Improvements
|
||||
|
||||
- Include line numbers for pattern errors
|
||||
- Improve performance in a few places
|
||||
- Parallelize parser table generation on Linux hosts
|
||||
- Add github workflow to run unit tests
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix a couple clang warnings for C backend
|
||||
- Fix C backend not fully initializing pvalues when multiple ptypes are used with different sizes.
|
||||
- Fix some user guide examples
|
||||
|
||||
## v2.2.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix GC issue for D backend when AST is enabled (#36)
|
||||
|
||||
## v2.2.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Allow multiple lexer modes to be specified for a lexer pattern (#35)
|
||||
- Document p_decode_code_point() API function (#34)
|
||||
|
||||
## v2.1.1
|
||||
|
||||
### Fixes
|
||||
|
||||
- Field aliases for AST node fields could alias incorrect field when multiple rule alternatives present for one rule set (#33)
|
||||
|
||||
## v2.1.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Report rule name and line number for conflicting AST node field positions errors (#32)
|
||||
|
||||
## v2.0.0
|
||||
|
||||
### Improvements
|
||||
|
||||
- Log conflicting rules on reduce/reduce conflict (#31)
|
||||
- Use 1-based row and column values for position values (#30)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix named optional rules (#29)
|
||||
|
||||
### Upgrading
|
||||
|
||||
- Adjust all uses of p_position_t row and col values to expect 1-based instead
|
||||
of 0-based values.
|
||||
|
||||
## v1.5.1
|
||||
|
||||
### Improvements
|
||||
|
||||
- Improve performance (#28)
|
||||
|
||||
## v1.5.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Track start and end text positions for tokens and rules in AST node structures (#27)
|
||||
- Add warnings for shift/reduce conflicts to log file (#25)
|
||||
- Add -w command line switch to treat warnings as errors and output to stderr (#26)
|
||||
- Add rule field aliases (#24)
|
||||
|
||||
### Improvements
|
||||
|
||||
- Show line numbers of rules on conflict (#23)
|
||||
|
||||
## v1.4.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Allow user to specify AST node name prefix or suffix
|
||||
- Allow specifying the start rule name
|
||||
- Allow rule terms to be marked as optional
|
||||
|
||||
### Improvements
|
||||
|
||||
- Give a better error message when a referenced ptype has not been declared
|
||||
|
||||
## v1.3.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add AST generation (#22)
|
||||
|
||||
## v1.2.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Allow one line user code blocks (#21)
|
||||
- Add backslash escape codes (#19)
|
||||
- Add API to access unexpected token found (#18)
|
||||
- Add token_names API (#17)
|
||||
- Add D example to user guide for p_context_init() (#16)
|
||||
- Allow user termination from lexer code blocks (#15)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix generator hang when state transition cycle is present (#20)
|
||||
|
||||
## v1.1.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add user parser terminations (#13)
|
||||
- Document generated parser API in user guide (#14)
|
||||
|
||||
## v1.0.0
|
||||
|
||||
- Initial release
|
||||
|
||||
2
Gemfile
2
Gemfile
@ -1,9 +1,7 @@
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "base64"
|
||||
gem "rake"
|
||||
gem "rspec"
|
||||
gem "rdoc"
|
||||
gem "redcarpet"
|
||||
gem "syntax"
|
||||
gem "simplecov"
|
||||
|
||||
48
Gemfile.lock
48
Gemfile.lock
@ -1,51 +1,37 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
base64 (0.3.0)
|
||||
date (3.4.1)
|
||||
diff-lcs (1.6.2)
|
||||
docile (1.4.1)
|
||||
erb (5.0.2)
|
||||
psych (5.2.6)
|
||||
date
|
||||
diff-lcs (1.5.0)
|
||||
psych (5.0.1)
|
||||
stringio
|
||||
rake (13.3.0)
|
||||
rdoc (6.14.2)
|
||||
erb
|
||||
rake (13.0.6)
|
||||
rdoc (6.5.0)
|
||||
psych (>= 4.0.0)
|
||||
redcarpet (3.6.1)
|
||||
rspec (3.13.1)
|
||||
rspec-core (~> 3.13.0)
|
||||
rspec-expectations (~> 3.13.0)
|
||||
rspec-mocks (~> 3.13.0)
|
||||
rspec-core (3.13.5)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-expectations (3.13.5)
|
||||
redcarpet (3.5.1)
|
||||
rspec (3.11.0)
|
||||
rspec-core (~> 3.11.0)
|
||||
rspec-expectations (~> 3.11.0)
|
||||
rspec-mocks (~> 3.11.0)
|
||||
rspec-core (3.11.0)
|
||||
rspec-support (~> 3.11.0)
|
||||
rspec-expectations (3.11.0)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-mocks (3.13.5)
|
||||
rspec-support (~> 3.11.0)
|
||||
rspec-mocks (3.11.1)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-support (3.13.4)
|
||||
simplecov (0.22.0)
|
||||
docile (~> 1.1)
|
||||
simplecov-html (~> 0.11)
|
||||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.13.2)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
stringio (3.1.7)
|
||||
rspec-support (~> 3.11.0)
|
||||
rspec-support (3.11.0)
|
||||
stringio (3.0.4)
|
||||
syntax (1.2.2)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
base64
|
||||
rake
|
||||
rdoc
|
||||
redcarpet
|
||||
rspec
|
||||
simplecov
|
||||
syntax
|
||||
|
||||
BUNDLED WITH
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2010-2024 Josh Holtrop
|
||||
Copyright (c) 2010-2023 Josh Holtrop
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
91
README.md
91
README.md
@ -1,104 +1,21 @@
|
||||
# The Propane Parser Generator
|
||||
|
||||
Propane is a LALR Parser Generator (LPG) which:
|
||||
Propane is an LR Parser Generator (LPG) which:
|
||||
|
||||
* accepts LR(0), SLR, and LALR grammars
|
||||
* generates a built-in lexer to tokenize input
|
||||
* supports UTF-8 lexer inputs
|
||||
* generates a table-driven shift/reduce parser to parse input in linear time
|
||||
* targets C or D language outputs
|
||||
* optionally supports automatic full AST generation
|
||||
* generates a table-driven parser to parse input in linear time
|
||||
* is MIT-licensed
|
||||
* is distributable as a standalone Ruby script
|
||||
|
||||
## Installation
|
||||
|
||||
Propane is designed to be distributed as a stand-alone single file script that
|
||||
can be copied into and versioned in a project's source tree.
|
||||
The only requirement to run Propane is that the system has a Ruby interpreter
|
||||
installed.
|
||||
The latest release can be downloaded from [https://github.com/holtrop/propane/releases](https://github.com/holtrop/propane/releases).
|
||||
|
||||
Simply copy the `propane` executable script into the desired location within
|
||||
the project to be built (typically the root of the repository) and mark it
|
||||
executable.
|
||||
TODO
|
||||
|
||||
## Usage
|
||||
|
||||
### Command Line Interface
|
||||
|
||||
Propane is typically invoked from the command-line as `./propane`.
|
||||
|
||||
Usage: ./propane [options] <input-file> <output-file>
|
||||
Options:
|
||||
-h, --help Show this usage and exit.
|
||||
--log LOG Write log file. This will show all parser states and their
|
||||
associated shifts and reduces. It can be helpful when
|
||||
debugging a grammar.
|
||||
--version Show program version and exit.
|
||||
-w Treat warnings as errors. This option will treat shift/reduce
|
||||
conflicts as fatal errors and will print them to stderr in
|
||||
addition to the log file.
|
||||
|
||||
The user must specify the path to a Propane input grammar file and a path to an
|
||||
output file.
|
||||
The generated source code will be written to the output file.
|
||||
If a log file path is specified, Propane will write a log file containing
|
||||
detailed information about the parser states and transitions.
|
||||
|
||||
### Propane Grammar File
|
||||
|
||||
A Propane grammar file provides Propane with the patterns, tokens, grammar
|
||||
rules, and user code blocks from which to build the generated lexer and parser.
|
||||
|
||||
Example grammar file:
|
||||
|
||||
```
|
||||
<<
|
||||
import std.math;
|
||||
>>
|
||||
|
||||
# Parser values are unsigned integers.
|
||||
ptype ulong;
|
||||
|
||||
# A few basic arithmetic operators.
|
||||
token plus /\+/;
|
||||
token times /\*/;
|
||||
token power /\*\*/;
|
||||
token integer /\d+/ <<
|
||||
ulong v;
|
||||
foreach (c; match)
|
||||
{
|
||||
v *= 10;
|
||||
v += (c - '0');
|
||||
}
|
||||
$$ = v;
|
||||
>>
|
||||
token lparen /\(/;
|
||||
token rparen /\)/;
|
||||
# Drop whitespace.
|
||||
drop /\s+/;
|
||||
|
||||
Start -> E1 << $$ = $1; >>
|
||||
E1 -> E2 << $$ = $1; >>
|
||||
E1 -> E1 plus E2 << $$ = $1 + $3; >>
|
||||
E2 -> E3 << $$ = $1; >>
|
||||
E2 -> E2 times E3 << $$ = $1 * $3; >>
|
||||
E3 -> E4 << $$ = $1; >>
|
||||
E3 -> E3 power E4 <<
|
||||
$$ = pow($1, $3);
|
||||
>>
|
||||
E4 -> integer << $$ = $1; >>
|
||||
E4 -> lparen E1 rparen << $$ = $2; >>
|
||||
```
|
||||
|
||||
Grammar files can contain comment lines beginning with `#` which are ignored.
|
||||
White space in the grammar file is also ignored.
|
||||
|
||||
It is convention to use the extension `.propane` for the Propane grammar file,
|
||||
however any file name is accepted by Propane.
|
||||
|
||||
See [https://holtrop.github.io/propane/index.html](https://holtrop.github.io/propane/index.html) for the full User Guide.
|
||||
TODO: Write usage instructions here
|
||||
|
||||
## Development
|
||||
|
||||
|
||||
14
Rakefile
14
Rakefile
@ -1,8 +1,5 @@
|
||||
require "rake/clean"
|
||||
require "rspec/core/rake_task"
|
||||
|
||||
CLEAN.include %w[spec/run gen .yardoc yard coverage dist]
|
||||
|
||||
task :build_dist do
|
||||
sh "ruby rb/build_dist.rb"
|
||||
end
|
||||
@ -13,20 +10,9 @@ RSpec::Core::RakeTask.new(:spec, :example_pattern) do |task, args|
|
||||
end
|
||||
end
|
||||
|
||||
# dspec task is useful to test the distributable release script, but is not
|
||||
# useful for coverage information.
|
||||
desc "Dist Specs"
|
||||
task :dspec, [:example_string] => :build_dist do |task, args|
|
||||
ENV["dist_specs"] = "1"
|
||||
Rake::Task["spec"].execute(args)
|
||||
ENV.delete("dist_specs")
|
||||
end
|
||||
|
||||
task :default => :spec
|
||||
|
||||
desc "Build user guide"
|
||||
task :user_guide do
|
||||
system("ruby", "-Ilib", "rb/gen_user_guide.rb")
|
||||
end
|
||||
|
||||
task :all => [:spec, :dspec, :user_guide]
|
||||
|
||||
1155
assets/parser.c.erb
1155
assets/parser.c.erb
File diff suppressed because it is too large
Load Diff
1864
assets/parser.d.erb
1864
assets/parser.d.erb
File diff suppressed because it is too large
Load Diff
@ -1,198 +0,0 @@
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* This file is generated by Propane.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
/**************************************************************************
|
||||
* Public types
|
||||
*************************************************************************/
|
||||
|
||||
/* Result codes. */
|
||||
#define <%= @grammar.prefix.upcase %>SUCCESS 0u
|
||||
#define <%= @grammar.prefix.upcase %>DECODE_ERROR 1u
|
||||
#define <%= @grammar.prefix.upcase %>UNEXPECTED_INPUT 2u
|
||||
#define <%= @grammar.prefix.upcase %>UNEXPECTED_TOKEN 3u
|
||||
#define <%= @grammar.prefix.upcase %>DROP 4u
|
||||
#define <%= @grammar.prefix.upcase %>EOF 5u
|
||||
#define <%= @grammar.prefix.upcase %>USER_TERMINATED 6u
|
||||
|
||||
/** Token type. */
|
||||
typedef <%= get_type_for(@grammar.terminate_token_id) %> <%= @grammar.prefix %>token_t;
|
||||
|
||||
/** Token IDs. */
|
||||
<% @grammar.tokens.each_with_index do |token, index| %>
|
||||
#define TOKEN_<%= token.code_name %> <%= index %>u
|
||||
<% unless token.id == index %>
|
||||
<% raise "Token ID (#{token.id}) does not match index (#{index}) for token #{token.name}!" %>
|
||||
<% end %>
|
||||
<% end %>
|
||||
#define INVALID_TOKEN_ID <%= @grammar.invalid_token_id %>u
|
||||
#define TERMINATE_TOKEN_ID <%= @grammar.terminate_token_id %>u
|
||||
|
||||
/** Code point type. */
|
||||
typedef uint32_t <%= @grammar.prefix %>code_point_t;
|
||||
|
||||
/**
|
||||
* A structure to keep track of input position.
|
||||
*
|
||||
* This is useful for reporting errors, etc...
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/** Input text row (0-based). */
|
||||
uint32_t row;
|
||||
|
||||
/** Input text column (0-based). */
|
||||
uint32_t col;
|
||||
} <%= @grammar.prefix %>position_t;
|
||||
|
||||
/** Return whether the position is valid. */
|
||||
#define <%= @grammar.prefix %>position_valid(p) ((p).row != 0u)
|
||||
|
||||
/** User header code blocks. */
|
||||
<%= @grammar.code_blocks.fetch("header", "") %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** Parser values type. */
|
||||
typedef <%= @grammar.ptype %> <%= @grammar.prefix %>value_t;
|
||||
<% else %>
|
||||
/** Parser values type(s). */
|
||||
typedef union
|
||||
{
|
||||
<% @grammar.ptypes.each do |name, typestring| %>
|
||||
<%= typestring %> v_<%= name %>;
|
||||
<% end %>
|
||||
} <%= @grammar.prefix %>value_t;
|
||||
<% end %>
|
||||
|
||||
<% if @grammar.ast %>
|
||||
/** AST node types. @{ */
|
||||
typedef struct <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>
|
||||
{
|
||||
/* ASTNode fields must be present in the same order here. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
} <%= @grammar.ast_prefix %>Token<%= @grammar.ast_suffix %>;
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
struct <%= name %>;
|
||||
<% end %>
|
||||
|
||||
<% @parser.rule_sets.each do |name, rule_set| %>
|
||||
<% next if name.start_with?("$") %>
|
||||
<% next if rule_set.optional? %>
|
||||
typedef struct <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>
|
||||
{
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
<% rule_set.ast_fields.each do |fields| %>
|
||||
union
|
||||
{
|
||||
<% fields.each do |field_name, type| %>
|
||||
struct <%= type %> * <%= field_name %>;
|
||||
<% end %>
|
||||
};
|
||||
<% end %>
|
||||
} <%= @grammar.ast_prefix %><%= name %><%= @grammar.ast_suffix %>;
|
||||
|
||||
<% end %>
|
||||
/** @} */
|
||||
<% end %>
|
||||
|
||||
/** Lexed token information. */
|
||||
typedef struct
|
||||
{
|
||||
/** Text position of first code point in token. */
|
||||
<%= @grammar.prefix %>position_t position;
|
||||
|
||||
/** Text position of last code point in token. */
|
||||
<%= @grammar.prefix %>position_t end_position;
|
||||
|
||||
/** Number of input bytes used by the token. */
|
||||
size_t length;
|
||||
|
||||
/** Token that was lexed. */
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
|
||||
/** Parser value associated with the token. */
|
||||
<%= @grammar.prefix %>value_t pvalue;
|
||||
} <%= @grammar.prefix %>token_info_t;
|
||||
|
||||
/**
|
||||
* Lexer and parser context.
|
||||
*
|
||||
* The user must allocate an instance of this structure and pass it to any
|
||||
* public API function.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/* Lexer context data. */
|
||||
|
||||
/** Input text. */
|
||||
uint8_t const * input;
|
||||
|
||||
/** Input text length. */
|
||||
size_t input_length;
|
||||
|
||||
/** Input text index (byte offset). */
|
||||
size_t input_index;
|
||||
|
||||
/** Input text position (row/column). */
|
||||
<%= @grammar.prefix %>position_t text_position;
|
||||
|
||||
/** Current lexer mode. */
|
||||
size_t mode;
|
||||
|
||||
/* Parser context data. */
|
||||
|
||||
/** Parse result value. */
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * parse_result;
|
||||
<% else %>
|
||||
<%= @grammar.prefix %>value_t parse_result;
|
||||
<% end %>
|
||||
|
||||
/** Unexpected token received. */
|
||||
<%= @grammar.prefix %>token_t token;
|
||||
|
||||
/** User terminate code. */
|
||||
size_t user_terminate_code;
|
||||
} <%= @grammar.prefix %>context_t;
|
||||
|
||||
/**************************************************************************
|
||||
* Public data
|
||||
*************************************************************************/
|
||||
|
||||
/** Token names. */
|
||||
extern const char * <%= @grammar.prefix %>token_names[];
|
||||
|
||||
void <%= @grammar.prefix %>context_init(<%= @grammar.prefix %>context_t * context, uint8_t const * input, size_t input_length);
|
||||
|
||||
size_t <%= @grammar.prefix %>decode_code_point(uint8_t const * input, size_t input_length,
|
||||
<%= @grammar.prefix %>code_point_t * out_code_point, uint8_t * out_code_point_length);
|
||||
|
||||
size_t <%= @grammar.prefix %>lex(<%= @grammar.prefix %>context_t * context, <%= @grammar.prefix %>token_info_t * out_token_info);
|
||||
|
||||
size_t <%= @grammar.prefix %>parse(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
<% if @grammar.ast %>
|
||||
<%= @grammar.ast_prefix %><%= @grammar.start_rule %><%= @grammar.ast_suffix %> * <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% else %>
|
||||
<%= start_rule_type[1] %> <%= @grammar.prefix %>result(<%= @grammar.prefix %>context_t * context);
|
||||
<% end %>
|
||||
|
||||
<%= @grammar.prefix %>position_t <%= @grammar.prefix %>position(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
size_t <%= @grammar.prefix %>user_terminate_code(<%= @grammar.prefix %>context_t * context);
|
||||
|
||||
<%= @grammar.prefix %>token_t <%= @grammar.prefix %>token(<%= @grammar.prefix %>context_t * context);
|
||||
1083
doc/user_guide.md
1083
doc/user_guide.md
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
au BufNewFile,BufRead *.propane set filetype=propane
|
||||
@ -1,33 +0,0 @@
|
||||
" Vim syntax file for Propane
|
||||
" Language: propane
|
||||
" Maintainer: Josh Holtrop
|
||||
" URL: https://github.com/holtrop/propane
|
||||
|
||||
if exists("b:current_syntax")
|
||||
finish
|
||||
endif
|
||||
|
||||
if !exists("b:propane_subtype")
|
||||
let b:propane_subtype = "d"
|
||||
endif
|
||||
|
||||
exe "syn include @propaneTarget syntax/".b:propane_subtype.".vim"
|
||||
|
||||
syn region propaneTarget matchgroup=propaneDelimiter start="<<" end=">>$" contains=@propaneTarget keepend
|
||||
|
||||
syn match propaneComment "#.*"
|
||||
syn match propaneOperator "->"
|
||||
syn match propaneFieldAlias ":[a-zA-Z0-9_]\+" contains=propaneFieldOperator
|
||||
syn match propaneFieldOperator ":" contained
|
||||
syn match propaneOperator "?"
|
||||
syn keyword propaneKeyword ast ast_prefix ast_suffix drop module prefix ptype start token tokenid
|
||||
|
||||
syn region propaneRegex start="/" end="/" skip="\v\\\\|\\/"
|
||||
|
||||
hi def link propaneComment Comment
|
||||
hi def link propaneKeyword Keyword
|
||||
hi def link propaneRegex String
|
||||
hi def link propaneOperator Operator
|
||||
hi def link propaneFieldOperator Operator
|
||||
hi def link propaneDelimiter Delimiter
|
||||
hi def link propaneFieldAlias Identifier
|
||||
@ -1,7 +1,6 @@
|
||||
require "erb"
|
||||
require "set"
|
||||
require "stringio"
|
||||
require_relative "propane/assets"
|
||||
require_relative "propane/cli"
|
||||
require_relative "propane/code_point_range"
|
||||
require_relative "propane/fa"
|
||||
@ -31,10 +30,10 @@ class Propane
|
||||
|
||||
class << self
|
||||
|
||||
def run(input_file, output_file, log_file, options)
|
||||
def run(input_file, output_file, log_file)
|
||||
begin
|
||||
grammar = Grammar.new(File.read(input_file))
|
||||
generator = Generator.new(grammar, output_file, log_file, options)
|
||||
generator = Generator.new(grammar, output_file, log_file)
|
||||
generator.generate
|
||||
rescue Error => e
|
||||
$stderr.puts e.message
|
||||
|
||||
@ -1,10 +0,0 @@
|
||||
class Propane
|
||||
module Assets
|
||||
class << self
|
||||
def get(name)
|
||||
path = File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/#{name}")
|
||||
File.binread(path)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -4,21 +4,15 @@ class Propane
|
||||
USAGE = <<EOF
|
||||
Usage: #{$0} [options] <input-file> <output-file>
|
||||
Options:
|
||||
-h, --help Show this usage and exit.
|
||||
--log LOG Write log file. This will show all parser states and their
|
||||
associated shifts and reduces. It can be helpful when
|
||||
debugging a grammar.
|
||||
--version Show program version and exit.
|
||||
-w Treat warnings as errors. This option will treat shift/reduce
|
||||
conflicts as fatal errors and will print them to stderr in
|
||||
addition to the log file.
|
||||
--log LOG Write log file
|
||||
--version Show program version and exit
|
||||
-h, --help Show this usage and exit
|
||||
EOF
|
||||
|
||||
class << self
|
||||
|
||||
def run(args)
|
||||
params = []
|
||||
options = {}
|
||||
log_file = nil
|
||||
i = 0
|
||||
while i < args.size
|
||||
@ -30,13 +24,11 @@ EOF
|
||||
log_file = args[i]
|
||||
end
|
||||
when "--version"
|
||||
puts "propane version #{VERSION}"
|
||||
puts "propane v#{VERSION}"
|
||||
return 0
|
||||
when "-h", "--help"
|
||||
puts USAGE
|
||||
return 0
|
||||
when "-w"
|
||||
options[:warnings_as_errors] = true
|
||||
when /^-/
|
||||
$stderr.puts "Error: unknown option #{arg}"
|
||||
return 1
|
||||
@ -53,7 +45,7 @@ EOF
|
||||
$stderr.puts "Error: cannot read #{params[0]}"
|
||||
return 2
|
||||
end
|
||||
Propane.run(*params, log_file, options)
|
||||
Propane.run(*params, log_file)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -2,7 +2,7 @@ class Propane
|
||||
|
||||
class Generator
|
||||
|
||||
def initialize(grammar, output_file, log_file, options)
|
||||
def initialize(grammar, output_file, log_file)
|
||||
@grammar = grammar
|
||||
@output_file = output_file
|
||||
if log_file
|
||||
@ -10,29 +10,15 @@ class Propane
|
||||
else
|
||||
@log = StringIO.new
|
||||
end
|
||||
@language =
|
||||
if output_file =~ /\.([a-z]+)$/
|
||||
$1
|
||||
else
|
||||
"d"
|
||||
end
|
||||
@options = options
|
||||
@classname = @grammar.classname || File.basename(output_file).sub(%r{[^a-zA-Z0-9].*}, "").capitalize
|
||||
process_grammar!
|
||||
end
|
||||
|
||||
def generate
|
||||
extensions = [@language]
|
||||
if @language == "c"
|
||||
extensions += %w[h]
|
||||
end
|
||||
extensions.each do |extension|
|
||||
template = Assets.get("parser.#{extension}.erb")
|
||||
erb = ERB.new(template, trim_mode: "<>")
|
||||
output_file = @output_file.sub(%r{\.[a-z]+$}, ".#{extension}")
|
||||
result = erb.result(binding.clone)
|
||||
File.open(output_file, "wb") do |fh|
|
||||
fh.write(result)
|
||||
end
|
||||
erb = ERB.new(File.read(File.join(File.dirname(File.expand_path(__FILE__)), "../../assets/parser.d.erb")), trim_mode: "<>")
|
||||
result = erb.result(binding.clone)
|
||||
File.open(@output_file, "wb") do |fh|
|
||||
fh.write(result)
|
||||
end
|
||||
@log.close
|
||||
end
|
||||
@ -43,8 +29,8 @@ class Propane
|
||||
# Assign default pattern mode to patterns without a mode assigned.
|
||||
found_default = false
|
||||
@grammar.patterns.each do |pattern|
|
||||
if pattern.modes.empty?
|
||||
pattern.modes << "default"
|
||||
if pattern.mode.nil?
|
||||
pattern.mode = "default"
|
||||
found_default = true
|
||||
end
|
||||
pattern.ptypename ||= "default"
|
||||
@ -52,7 +38,6 @@ class Propane
|
||||
unless found_default
|
||||
raise Error.new("No patterns found for default mode")
|
||||
end
|
||||
check_ptypes!
|
||||
# Add EOF token.
|
||||
@grammar.tokens << Token.new("$EOF", nil, nil)
|
||||
tokens_by_name = {}
|
||||
@ -68,14 +53,11 @@ class Propane
|
||||
tokens_by_name[token.name] = token
|
||||
end
|
||||
# Check for user start rule.
|
||||
unless @grammar.rules.find {|rule| rule.name == @grammar.start_rule}
|
||||
raise Error.new("Start rule `#{@grammar.start_rule}` not found")
|
||||
unless @grammar.rules.find {|rule| rule.name == "Start"}
|
||||
raise Error.new("Start rule not found")
|
||||
end
|
||||
# Add "real" start rule.
|
||||
@grammar.rules.unshift(Rule.new("$Start", [@grammar.start_rule, "$EOF"], nil, nil, nil))
|
||||
# Generate and add rules for optional components.
|
||||
generate_optional_component_rules!(tokens_by_name)
|
||||
# Build rule sets.
|
||||
@grammar.rules.unshift(Rule.new("$Start", ["Start", "$EOF"], nil, nil, nil))
|
||||
rule_sets = {}
|
||||
rule_set_id = @grammar.tokens.size
|
||||
@grammar.rules.each_with_index do |rule, rule_id|
|
||||
@ -124,55 +106,10 @@ class Propane
|
||||
end
|
||||
end
|
||||
determine_possibly_empty_rulesets!(rule_sets)
|
||||
rule_sets.each do |name, rule_set|
|
||||
rule_set.finalize(@grammar)
|
||||
end
|
||||
# Generate the lexer.
|
||||
@lexer = Lexer.new(@grammar)
|
||||
# Generate the parser.
|
||||
@parser = Parser.new(@grammar, rule_sets, @log, @options)
|
||||
end
|
||||
|
||||
# Check that any referenced ptypes have been defined.
|
||||
def check_ptypes!
|
||||
(@grammar.patterns + @grammar.tokens + @grammar.rules).each do |potor|
|
||||
if potor.ptypename
|
||||
unless @grammar.ptypes.include?(potor.ptypename)
|
||||
raise Error.new("Error: Line #{potor.line_number}: ptype #{potor.ptypename} not declared. Declare with `ptype` statement.")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Generate and add rules for any optional components.
|
||||
def generate_optional_component_rules!(tokens_by_name)
|
||||
optional_rules_added = Set.new
|
||||
@grammar.rules.each do |rule|
|
||||
rule.components.each do |component|
|
||||
if component =~ /^(.*)\?$/
|
||||
c = $1
|
||||
unless optional_rules_added.include?(component)
|
||||
# Create two rules for the optional component: one empty and
|
||||
# one just matching the component.
|
||||
# We need to find the ptypename for the optional component in
|
||||
# order to copy it to the generated rules.
|
||||
if tokens_by_name[c]
|
||||
# The optional component is a token.
|
||||
ptypename = tokens_by_name[c].ptypename
|
||||
else
|
||||
# The optional component must be a rule, so find any instance
|
||||
# of that rule that specifies a ptypename.
|
||||
ptypename = @grammar.rules.reduce(nil) do |result, rule|
|
||||
rule.name == c && rule.ptypename ? rule.ptypename : result
|
||||
end
|
||||
end
|
||||
@grammar.rules << Rule.new(component, [], nil, ptypename, rule.line_number)
|
||||
@grammar.rules << Rule.new(component, [c], "$$ = $1;\n", ptypename, rule.line_number)
|
||||
optional_rules_added << component
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@parser = Parser.new(@grammar, rule_sets, @log)
|
||||
end
|
||||
|
||||
# Determine which grammar rules could expand to empty sequences.
|
||||
@ -246,66 +183,19 @@ class Propane
|
||||
# Expanded user code block.
|
||||
def expand_code(code, parser, rule, pattern)
|
||||
code = code.gsub(/\$token\(([$\w]+)\)/) do |match|
|
||||
"TOKEN_#{Token.code_name($1)}"
|
||||
end
|
||||
code = code.gsub(/\$terminate\((.*)\);/) do |match|
|
||||
user_terminate_code = $1
|
||||
retval = rule ? "P_USER_TERMINATED" : "TERMINATE_TOKEN_ID"
|
||||
case @language
|
||||
when "c"
|
||||
"context->user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
||||
when "d"
|
||||
"context.user_terminate_code = (#{user_terminate_code}); return #{retval};"
|
||||
end
|
||||
"Token(TOKEN_#{Token.code_name($1)})"
|
||||
end
|
||||
if parser
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
case @language
|
||||
when "c"
|
||||
"_pvalue->v_#{rule.ptypename}"
|
||||
when "d"
|
||||
"_pvalue.v_#{rule.ptypename}"
|
||||
end
|
||||
"_pvalue.v_#{rule.ptypename}"
|
||||
end
|
||||
code = code.gsub(/\$(\d+)/) do |match|
|
||||
index = $1.to_i
|
||||
case @language
|
||||
when "c"
|
||||
"state_values_stack_index(statevalues, -1 - (int)n_states + #{index})->pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||
when "d"
|
||||
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||
end
|
||||
end
|
||||
code = code.gsub(/\$\{(\w+)\}/) do |match|
|
||||
aliasname = $1
|
||||
if index = rule.aliases[aliasname]
|
||||
case @language
|
||||
when "c"
|
||||
"state_values_stack_index(statevalues, -(int)n_states + #{index})->pvalue.v_#{rule.components[index].ptypename}"
|
||||
when "d"
|
||||
"statevalues[$-n_states+#{index}].pvalue.v_#{rule.components[index].ptypename}"
|
||||
end
|
||||
else
|
||||
raise Error.new("Field alias '#{aliasname}' not found")
|
||||
end
|
||||
"statevalues[$-1-n_states+#{index}].pvalue.v_#{rule.components[index - 1].ptypename}"
|
||||
end
|
||||
else
|
||||
code = code.gsub(/\$\$/) do |match|
|
||||
if @grammar.ast
|
||||
case @language
|
||||
when "c"
|
||||
"out_token_info->pvalue"
|
||||
when "d"
|
||||
"out_token_info.pvalue"
|
||||
end
|
||||
else
|
||||
case @language
|
||||
when "c"
|
||||
"out_token_info->pvalue.v_#{pattern.ptypename}"
|
||||
when "d"
|
||||
"out_token_info.pvalue.v_#{pattern.ptypename}"
|
||||
end
|
||||
end
|
||||
"result.pvalue.v_#{pattern.ptypename}"
|
||||
end
|
||||
code = code.gsub(/\$mode\(([a-zA-Z_][a-zA-Z_0-9]*)\)/) do |match|
|
||||
mode_name = $1
|
||||
@ -313,12 +203,7 @@ class Propane
|
||||
unless mode_id
|
||||
raise Error.new("Lexer mode '#{mode_name}' not found")
|
||||
end
|
||||
case @language
|
||||
when "c"
|
||||
"context->mode = #{mode_id}u"
|
||||
when "d"
|
||||
"context.mode = #{mode_id}u"
|
||||
end
|
||||
"m_mode = #{mode_id}u"
|
||||
end
|
||||
end
|
||||
code
|
||||
@ -330,43 +215,11 @@ class Propane
|
||||
# Start rule parser value type name and type string.
|
||||
def start_rule_type
|
||||
start_rule = @grammar.rules.find do |rule|
|
||||
rule.name == @grammar.start_rule
|
||||
rule.name == "Start"
|
||||
end
|
||||
[start_rule.ptypename, @grammar.ptypes[start_rule.ptypename]]
|
||||
end
|
||||
|
||||
# Get an unsigned integer type that can hold the given maximum value.
|
||||
#
|
||||
# @param max [Integer]
|
||||
# Maximum value to store.
|
||||
#
|
||||
# @return [String]
|
||||
# Type.
|
||||
def get_type_for(max)
|
||||
if max <= 0xFF
|
||||
case @language
|
||||
when "c"
|
||||
"uint8_t"
|
||||
when "d"
|
||||
"ubyte"
|
||||
end
|
||||
elsif max <= 0xFFFF
|
||||
case @language
|
||||
when "c"
|
||||
"uint16_t"
|
||||
when "d"
|
||||
"ushort"
|
||||
end
|
||||
else
|
||||
case @language
|
||||
when "c"
|
||||
"uint32_t"
|
||||
else
|
||||
"uint"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -5,33 +5,24 @@ class Propane
|
||||
# Reserve identifiers beginning with a double-underscore for internal use.
|
||||
IDENTIFIER_REGEX = /(?:[a-zA-Z]|_[a-zA-Z0-9])[a-zA-Z_0-9]*/
|
||||
|
||||
attr_reader :ast
|
||||
attr_reader :ast_prefix
|
||||
attr_reader :ast_suffix
|
||||
attr_reader :classname
|
||||
attr_reader :modulename
|
||||
attr_reader :patterns
|
||||
attr_reader :rules
|
||||
attr_reader :start_rule
|
||||
attr_reader :tokens
|
||||
attr_reader :code_blocks
|
||||
attr_reader :ptypes
|
||||
attr_reader :prefix
|
||||
|
||||
def initialize(input)
|
||||
@patterns = []
|
||||
@start_rule = "Start"
|
||||
@tokens = []
|
||||
@rules = []
|
||||
@code_blocks = {}
|
||||
@code_blocks = []
|
||||
@line_number = 1
|
||||
@next_line_number = @line_number
|
||||
@modeline = nil
|
||||
@mode = nil
|
||||
@input = input.gsub("\r\n", "\n")
|
||||
@ptypes = {"default" => "void *"}
|
||||
@prefix = "p_"
|
||||
@ast = false
|
||||
@ast_prefix = ""
|
||||
@ast_suffix = ""
|
||||
parse_grammar!
|
||||
end
|
||||
|
||||
@ -39,14 +30,6 @@ class Propane
|
||||
@ptypes["default"]
|
||||
end
|
||||
|
||||
def invalid_token_id
|
||||
@tokens.size
|
||||
end
|
||||
|
||||
def terminate_token_id
|
||||
@tokens.size + 1
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def parse_grammar!
|
||||
@ -58,20 +41,16 @@ class Propane
|
||||
def parse_statement!
|
||||
if parse_white_space!
|
||||
elsif parse_comment_line!
|
||||
elsif @modeline.nil? && parse_mode_label!
|
||||
elsif parse_ast_statement!
|
||||
elsif parse_ast_prefix_statement!
|
||||
elsif parse_ast_suffix_statement!
|
||||
elsif @mode.nil? && parse_mode_label!
|
||||
elsif parse_module_statement!
|
||||
elsif parse_class_statement!
|
||||
elsif parse_ptype_statement!
|
||||
elsif parse_pattern_statement!
|
||||
elsif parse_start_statement!
|
||||
elsif parse_token_statement!
|
||||
elsif parse_tokenid_statement!
|
||||
elsif parse_drop_statement!
|
||||
elsif parse_rule_statement!
|
||||
elsif parse_code_block_statement!
|
||||
elsif parse_prefix_statement!
|
||||
else
|
||||
if @input.size > 25
|
||||
@input = @input.slice(0..20) + "..."
|
||||
@ -81,8 +60,8 @@ class Propane
|
||||
end
|
||||
|
||||
def parse_mode_label!
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX}(?:\s*,\s*#{IDENTIFIER_REGEX})*)\s*:/)
|
||||
@modeline = md[1]
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*:/)
|
||||
@mode = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
@ -94,30 +73,22 @@ class Propane
|
||||
consume!(/#.*\n/)
|
||||
end
|
||||
|
||||
def parse_ast_statement!
|
||||
if consume!(/ast\s*;/)
|
||||
@ast = true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_prefix_statement!
|
||||
if md = consume!(/ast_prefix\s+(\w+)\s*;/)
|
||||
@ast_prefix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_ast_suffix_statement!
|
||||
if md = consume!(/ast_suffix\s+(\w+)\s*;/)
|
||||
@ast_suffix = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_module_statement!
|
||||
if consume!(/module\s+/)
|
||||
md = consume!(/([\w.]+)\s*/, "expected module name")
|
||||
@modulename = md[1]
|
||||
consume!(/;/, "expected `;'")
|
||||
@modeline = nil
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_class_statement!
|
||||
if consume!(/class\s+/)
|
||||
md = consume!(/([\w.]+)\s*/, "expected class name")
|
||||
@classname = md[1]
|
||||
consume!(/;/, "expected `;'")
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -126,9 +97,6 @@ class Propane
|
||||
if consume!(/ptype\s+/)
|
||||
name = "default"
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*=\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
name = md[1]
|
||||
end
|
||||
md = consume!(/([^;]+);/, "expected parser result type expression")
|
||||
@ -141,21 +109,18 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
pattern = parse_pattern! || name
|
||||
consume!(/\s+/)
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected `;' or code block")
|
||||
consume!(/;/, "expected pattern or `;' or code block")
|
||||
end
|
||||
token = Token.new(name, ptypename, @line_number)
|
||||
@tokens << token
|
||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||
pattern = Pattern.new(pattern: pattern, token: token, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||
@patterns << pattern
|
||||
@modeline = nil
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -165,15 +130,12 @@ class Propane
|
||||
md = consume!(/(#{IDENTIFIER_REGEX})\s*/, "expected token name")
|
||||
name = md[1]
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
consume!(/;/, "expected `;'");
|
||||
token = Token.new(name, ptypename, @line_number)
|
||||
@tokens << token
|
||||
@modeline = nil
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -186,8 +148,8 @@ class Propane
|
||||
end
|
||||
consume!(/\s+/)
|
||||
consume!(/;/, "expected `;'")
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, modes: get_modes_from_modeline)
|
||||
@modeline = nil
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, drop: true, mode: @mode)
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -195,20 +157,13 @@ class Propane
|
||||
def parse_rule_statement!
|
||||
if md = consume!(/(#{IDENTIFIER_REGEX})\s*(?:\((#{IDENTIFIER_REGEX})\))?\s*->\s*/)
|
||||
rule_name, ptypename = *md[1, 2]
|
||||
if @ast && ptypename
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}\??(?::#{IDENTIFIER_REGEX})?\s*)*)\s*/, "expected rule component list")
|
||||
md = consume!(/((?:#{IDENTIFIER_REGEX}\s*)*)\s*/, "expected rule component list")
|
||||
components = md[1].strip.split(/\s+/)
|
||||
if @ast
|
||||
consume!(/;/, "expected `;'")
|
||||
else
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected `;' or code block")
|
||||
end
|
||||
unless code = parse_code_block!
|
||||
consume!(/;/, "expected pattern or `;' or code block")
|
||||
end
|
||||
@rules << Rule.new(rule_name, components, code, ptypename, @line_number)
|
||||
@modeline = nil
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -217,44 +172,21 @@ class Propane
|
||||
if pattern = parse_pattern!
|
||||
consume!(/\s+/)
|
||||
if md = consume!(/\((#{IDENTIFIER_REGEX})\)\s*/)
|
||||
if @ast
|
||||
raise Error.new("Multiple ptypes are unsupported in AST mode")
|
||||
end
|
||||
ptypename = md[1]
|
||||
end
|
||||
unless code = parse_code_block!
|
||||
raise Error.new("Line #{@line_number}: expected code block to follow pattern")
|
||||
end
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, modes: get_modes_from_modeline, ptypename: ptypename)
|
||||
@modeline = nil
|
||||
@patterns << Pattern.new(pattern: pattern, line_number: @line_number, code: code, mode: @mode, ptypename: ptypename)
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_start_statement!
|
||||
if md = consume!(/start\s+(\w+)\s*;/)
|
||||
@start_rule = md[1]
|
||||
end
|
||||
end
|
||||
|
||||
def parse_code_block_statement!
|
||||
if md = consume!(/<<([a-z]*)(.*?)>>\n/m)
|
||||
name, code = md[1..2]
|
||||
code.sub!(/\A\n/, "")
|
||||
code += "\n" unless code.end_with?("\n")
|
||||
if @code_blocks[name]
|
||||
@code_blocks[name] += code
|
||||
else
|
||||
@code_blocks[name] = code
|
||||
end
|
||||
@modeline = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
def parse_prefix_statement!
|
||||
if md = consume!(/prefix\s+(#{IDENTIFIER_REGEX})\s*;/)
|
||||
@prefix = md[1]
|
||||
if code = parse_code_block!
|
||||
@code_blocks << code
|
||||
@mode = nil
|
||||
true
|
||||
end
|
||||
end
|
||||
@ -279,11 +211,8 @@ class Propane
|
||||
end
|
||||
|
||||
def parse_code_block!
|
||||
if md = consume!(/<<(.*?)>>\n/m)
|
||||
code = md[1]
|
||||
code.sub!(/\A\n/, "")
|
||||
code += "\n" unless code.end_with?("\n")
|
||||
code
|
||||
if md = consume!(/<<\n(.*?)^>>\n/m)
|
||||
md[1]
|
||||
end
|
||||
end
|
||||
|
||||
@ -315,14 +244,6 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
def get_modes_from_modeline
|
||||
if @modeline
|
||||
Set[*@modeline.split(",").map(&:strip)]
|
||||
else
|
||||
Set.new
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -1,13 +1,55 @@
|
||||
class Propane
|
||||
class Lexer
|
||||
|
||||
attr_reader :state_table
|
||||
attr_reader :transition_table
|
||||
attr_reader :mode_table
|
||||
|
||||
def initialize(grammar)
|
||||
@grammar = grammar
|
||||
build_tables!
|
||||
end
|
||||
|
||||
def build_tables
|
||||
@modes = @grammar.patterns.group_by do |pattern|
|
||||
pattern.mode
|
||||
end.transform_values do |patterns|
|
||||
{dfa: DFA.new(patterns)}
|
||||
end
|
||||
@modes.each_with_index do |(mode_name, mode_info), index|
|
||||
mode_info[:id] = index
|
||||
end
|
||||
transition_table = []
|
||||
state_table = []
|
||||
mode_table = []
|
||||
@modes.each do |mode_name, mode_info|
|
||||
state_table_offset = state_table.size
|
||||
mode_table << {
|
||||
state_table_offset: state_table_offset,
|
||||
}
|
||||
states = mode_info[:dfa].enumerate
|
||||
states.each do |state, id|
|
||||
drop = state.accepts && state.accepts.drop?
|
||||
token =
|
||||
if state.accepts && state.accepts.token
|
||||
state.accepts.token.id
|
||||
end
|
||||
code_id =
|
||||
if state.accepts && state.accepts.code_id
|
||||
state.accepts.code_id
|
||||
end
|
||||
state_table << {
|
||||
transition_table_index: transition_table.size,
|
||||
n_transitions: state.transitions.size,
|
||||
drop: drop,
|
||||
token: token,
|
||||
code_id: code_id,
|
||||
}
|
||||
state.transitions.each do |transition|
|
||||
transition_table << {
|
||||
first: transition.code_point_range.first,
|
||||
last: transition.code_point_range.last,
|
||||
destination: states[transition.destination] + state_table_offset,
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
[transition_table, state_table, mode_table]
|
||||
end
|
||||
|
||||
# Get ID for a mode.
|
||||
@ -23,52 +65,5 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def build_tables!
|
||||
modenames = @grammar.patterns.reduce(Set.new) do |result, pattern|
|
||||
result + pattern.modes
|
||||
end
|
||||
@modes = modenames.reduce({}) do |result, modename|
|
||||
result[modename] = @grammar.patterns.select do |pattern|
|
||||
pattern.modes.include?(modename)
|
||||
end
|
||||
result
|
||||
end.transform_values do |patterns|
|
||||
{dfa: DFA.new(patterns)}
|
||||
end
|
||||
@modes.each_with_index do |(mode_name, mode_info), index|
|
||||
mode_info[:id] = index
|
||||
end
|
||||
@state_table = []
|
||||
@transition_table = []
|
||||
@mode_table = []
|
||||
@modes.each do |mode_name, mode_info|
|
||||
state_table_offset = @state_table.size
|
||||
@mode_table << {
|
||||
state_table_offset: state_table_offset,
|
||||
}
|
||||
states = mode_info[:dfa].enumerate
|
||||
states.each do |state, id|
|
||||
token = state.accepts && state.accepts.token && state.accepts.token.id
|
||||
code_id = state.accepts && state.accepts.code_id && state.accepts.code_id
|
||||
@state_table << {
|
||||
transition_table_index: @transition_table.size,
|
||||
n_transitions: state.transitions.size,
|
||||
accepts: !!state.accepts,
|
||||
token: token,
|
||||
code_id: code_id,
|
||||
}
|
||||
state.transitions.each do |transition|
|
||||
@transition_table << {
|
||||
first: transition.code_point_range.first,
|
||||
last: transition.code_point_range.last,
|
||||
destination: states[transition.destination] + state_table_offset,
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
@ -2,20 +2,12 @@ class Propane
|
||||
|
||||
class Parser
|
||||
|
||||
attr_reader :state_table
|
||||
attr_reader :shift_table
|
||||
attr_reader :reduce_table
|
||||
attr_reader :rule_sets
|
||||
|
||||
def initialize(grammar, rule_sets, log, options)
|
||||
def initialize(grammar, rule_sets, log)
|
||||
@grammar = grammar
|
||||
@rule_sets = rule_sets
|
||||
@log = log
|
||||
@item_sets = []
|
||||
@item_sets_set = {}
|
||||
@warnings = Set.new
|
||||
@errors = Set.new
|
||||
@options = options
|
||||
start_item = Item.new(grammar.rules.first, 0)
|
||||
eval_item_sets = Set[ItemSet.new([start_item])]
|
||||
|
||||
@ -26,10 +18,10 @@ class Propane
|
||||
item_set.id = @item_sets.size
|
||||
@item_sets << item_set
|
||||
@item_sets_set[item_set] = item_set
|
||||
item_set.next_symbols.each do |next_symbol|
|
||||
unless next_symbol.name == "$EOF"
|
||||
next_item_set = item_set.build_next_item_set(next_symbol)
|
||||
eval_item_sets << next_item_set
|
||||
item_set.following_symbols.each do |following_symbol|
|
||||
unless following_symbol.name == "$EOF"
|
||||
following_set = item_set.build_following_item_set(following_symbol)
|
||||
eval_item_sets << following_set
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -40,71 +32,59 @@ class Propane
|
||||
end
|
||||
|
||||
build_reduce_actions!
|
||||
build_tables!
|
||||
write_log!
|
||||
errormessage = ""
|
||||
if @errors.size > 0
|
||||
errormessage += @errors.join("\n")
|
||||
end
|
||||
if @warnings.size > 0 && @options[:warnings_as_errors]
|
||||
if errormessage != ""
|
||||
errormessage += "\n"
|
||||
end
|
||||
errormessage += "Fatal errors (-w):\n" + @warnings.join("\n")
|
||||
end
|
||||
if errormessage != ""
|
||||
raise Error.new(errormessage)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def build_tables!
|
||||
@state_table = []
|
||||
@shift_table = []
|
||||
@reduce_table = []
|
||||
def build_tables
|
||||
shift_table = []
|
||||
state_table = []
|
||||
reduce_table = []
|
||||
@item_sets.each do |item_set|
|
||||
unless item_set.reduce_rules.empty?
|
||||
item_set.shift_entries.each do |shift_entry|
|
||||
token = shift_entry[:symbol]
|
||||
if item_set.reduce_actions
|
||||
if rule = item_set.reduce_actions[token]
|
||||
@warnings << "Shift/Reduce conflict (state #{item_set.id}) between token #{token.name} and rule #{rule.name} (defined on line #{rule.line_number})"
|
||||
end
|
||||
shift_entries = item_set.following_symbols.map do |following_symbol|
|
||||
state_id =
|
||||
if following_symbol.name == "$EOF"
|
||||
0
|
||||
else
|
||||
item_set.following_item_set[following_symbol].id
|
||||
end
|
||||
end
|
||||
{
|
||||
token_id: following_symbol.id,
|
||||
state_id: state_id,
|
||||
}
|
||||
end
|
||||
reduce_entries =
|
||||
if rule = item_set.reduce_rule
|
||||
[{token_id: @grammar.invalid_token_id, rule_id: rule.id, rule: rule,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}]
|
||||
elsif reduce_actions = item_set.reduce_actions
|
||||
reduce_actions.map do |token, rule|
|
||||
{token: token, token_id: token.id, rule_id: rule.id, rule: rule,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size,
|
||||
propagate_optional_target: rule.optional? && rule.components.size == 1}
|
||||
case ra = item_set.reduce_actions
|
||||
when Rule
|
||||
[{token_id: @grammar.tokens.size, rule_id: ra.id,
|
||||
rule_set_id: ra.rule_set.id, n_states: ra.components.size}]
|
||||
when Hash
|
||||
ra.map do |token, rule|
|
||||
{token_id: token.id, rule_id: rule.id,
|
||||
rule_set_id: rule.rule_set.id, n_states: rule.components.size}
|
||||
end
|
||||
else
|
||||
[]
|
||||
end
|
||||
@state_table << {
|
||||
shift_index: @shift_table.size,
|
||||
n_shifts: item_set.shift_entries.size,
|
||||
reduce_index: @reduce_table.size,
|
||||
state_table << {
|
||||
shift_index: shift_table.size,
|
||||
n_shifts: shift_entries.size,
|
||||
reduce_index: reduce_table.size,
|
||||
n_reduces: reduce_entries.size,
|
||||
}
|
||||
@shift_table += item_set.shift_entries
|
||||
@reduce_table += reduce_entries
|
||||
shift_table += shift_entries
|
||||
reduce_table += reduce_entries
|
||||
end
|
||||
[state_table, shift_table, reduce_table]
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def process_item_set(item_set)
|
||||
item_set.next_symbols.each do |next_symbol|
|
||||
unless next_symbol.name == "$EOF"
|
||||
next_item_set = @item_sets_set[item_set.build_next_item_set(next_symbol)]
|
||||
item_set.next_item_set[next_symbol] = next_item_set
|
||||
next_item_set.in_sets << item_set
|
||||
item_set.following_symbols.each do |following_symbol|
|
||||
unless following_symbol.name == "$EOF"
|
||||
following_set = @item_sets_set[item_set.build_following_item_set(following_symbol)]
|
||||
item_set.following_item_set[following_symbol] = following_set
|
||||
following_set.in_sets << item_set
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -114,109 +94,7 @@ class Propane
|
||||
# @return [void]
|
||||
def build_reduce_actions!
|
||||
@item_sets.each do |item_set|
|
||||
build_shift_entries(item_set)
|
||||
build_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
item_sets_to_process = @item_sets.select do |item_set|
|
||||
# We need lookahead reduce actions if:
|
||||
# 1) There is more than one possible rule to reduce. In this case the
|
||||
# lookahead token can help choose which rule to reduce.
|
||||
# 2) There is at least one shift action and one reduce action for
|
||||
# this item set. In this case the lookahead reduce actions are
|
||||
# needed to test for a Shift/Reduce conflict.
|
||||
item_set.reduce_rules.size > 1 ||
|
||||
(item_set.reduce_rules.size > 0 && item_set.shift_entries.size > 0)
|
||||
end
|
||||
if RbConfig::CONFIG["host_os"] =~ /linux/
|
||||
item_sets_by_id = {}
|
||||
item_sets_to_process.each do |item_set|
|
||||
item_sets_by_id[item_set.object_id] = item_set
|
||||
end
|
||||
tokens_by_id = {}
|
||||
@grammar.tokens.each do |token|
|
||||
tokens_by_id[token.object_id] = token
|
||||
end
|
||||
rules_by_id = {}
|
||||
@grammar.rules.each do |rule|
|
||||
rules_by_id[rule.object_id] = rule
|
||||
end
|
||||
n_threads = Util.determine_n_threads
|
||||
semaphore = Mutex.new
|
||||
queue = Queue.new
|
||||
threads = {}
|
||||
n_threads.times do
|
||||
piper, pipew = IO.pipe
|
||||
thread = Thread.new do
|
||||
loop do
|
||||
item_set = nil
|
||||
semaphore.synchronize do
|
||||
item_set = item_sets_to_process.slice!(0)
|
||||
end
|
||||
break if item_set.nil?
|
||||
fork do
|
||||
piper.close
|
||||
build_lookahead_reduce_actions_for_item_set(item_set, pipew)
|
||||
end
|
||||
end
|
||||
queue.push(Thread.current)
|
||||
end
|
||||
threads[thread] = [piper, pipew]
|
||||
end
|
||||
until threads.empty?
|
||||
thread = queue.pop
|
||||
piper, pipew = threads[thread]
|
||||
pipew.close
|
||||
thread_txt = piper.read
|
||||
thread_txt.each_line do |line|
|
||||
if line.start_with?("RA,")
|
||||
parts = line.split(",")
|
||||
item_set_id, token_id, rule_id = parts[1..3].map(&:to_i)
|
||||
item_set = item_sets_by_id[item_set_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find item set from thread"
|
||||
end
|
||||
token = tokens_by_id[token_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find token from thread"
|
||||
end
|
||||
rule = rules_by_id[rule_id]
|
||||
unless item_set
|
||||
raise "Internal error: could not find rule from thread"
|
||||
end
|
||||
item_set.reduce_actions ||= {}
|
||||
item_set.reduce_actions[token] = rule
|
||||
elsif line.start_with?("Error: ")
|
||||
@errors << line.chomp
|
||||
else
|
||||
raise "Internal error: unhandled thread line #{line}"
|
||||
end
|
||||
end
|
||||
thread.join
|
||||
threads.delete(thread)
|
||||
end
|
||||
else
|
||||
# Fall back to single threaded algorithm.
|
||||
item_sets_to_process.each do |item_set|
|
||||
item_set.reduce_actions = build_lookahead_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Build the shift entries for a single item set.
|
||||
#
|
||||
# @return [void]
|
||||
def build_shift_entries(item_set)
|
||||
item_set.shift_entries = item_set.next_symbols.map do |next_symbol|
|
||||
state_id =
|
||||
if next_symbol.name == "$EOF"
|
||||
0
|
||||
else
|
||||
item_set.next_item_set[next_symbol].id
|
||||
end
|
||||
{
|
||||
symbol: next_symbol,
|
||||
state_id: state_id,
|
||||
}
|
||||
item_set.reduce_actions = build_reduce_actions_for_item_set(item_set)
|
||||
end
|
||||
end
|
||||
|
||||
@ -225,45 +103,40 @@ class Propane
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
#
|
||||
# @return [void]
|
||||
# @return [nil, Rule, Hash]
|
||||
# If no reduce actions are possible for the given item set, nil.
|
||||
# If only one reduce action is possible for the given item set, the Rule
|
||||
# to reduce.
|
||||
# Otherwise, a mapping of lookahead Tokens to the Rules to reduce.
|
||||
def build_reduce_actions_for_item_set(item_set)
|
||||
# To build the reduce actions, we start by looking at any
|
||||
# "complete" items, i.e., items where the parse position is at the
|
||||
# end of a rule. These are the only rules that are candidates for
|
||||
# reduction in the current ItemSet.
|
||||
item_set.reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
reduce_rules = Set.new(item_set.items.select(&:complete?).map(&:rule))
|
||||
|
||||
if item_set.reduce_rules.size == 1
|
||||
item_set.reduce_rule = item_set.reduce_rules.first
|
||||
end
|
||||
end
|
||||
# If there are no rules to reduce for this ItemSet, we're done here.
|
||||
return nil if reduce_rules.size == 0
|
||||
|
||||
# If there is exactly one rule to reduce for this ItemSet, then do not
|
||||
# figure out the lookaheads; just reduce it.
|
||||
return reduce_rules.first if reduce_rules.size == 1
|
||||
|
||||
# Otherwise, we have more than one possible rule to reduce.
|
||||
|
||||
# Build the reduce actions for a single item set (parser state).
|
||||
#
|
||||
# @param item_set [ItemSet]
|
||||
# ItemSet (parser state)
|
||||
# @param fh [File]
|
||||
# Output file handle for multiprocessing mode.
|
||||
#
|
||||
# @return [Hash]
|
||||
# Mapping of lookahead Tokens to the Rules to reduce.
|
||||
def build_lookahead_reduce_actions_for_item_set(item_set, fh = nil)
|
||||
# We will be looking for all possible tokens that can follow instances of
|
||||
# these rules. Rather than looking through the entire grammar for the
|
||||
# possible following tokens, we will only look in the item sets leading
|
||||
# up to this one. This restriction gives us a more precise lookahead set,
|
||||
# and allows us to parse LALR grammars.
|
||||
item_sets = Set[item_set] + item_set.leading_item_sets
|
||||
item_set.reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
item_sets = item_set.leading_item_sets
|
||||
reduce_rules.reduce({}) do |reduce_actions, reduce_rule|
|
||||
lookahead_tokens_for_rule = build_lookahead_tokens_to_reduce(reduce_rule, item_sets)
|
||||
lookahead_tokens_for_rule.each do |lookahead_token|
|
||||
if existing_reduce_rule = reduce_actions[lookahead_token]
|
||||
error = "Error: reduce/reduce conflict (state #{item_set.id}) between rule #{existing_reduce_rule.name}##{existing_reduce_rule.id} (defined on line #{existing_reduce_rule.line_number}) and rule #{reduce_rule.name}##{reduce_rule.id} (defined on line #{reduce_rule.line_number}) for lookahead token #{lookahead_token}"
|
||||
@errors << error
|
||||
fh.puts(error) if fh
|
||||
raise Error.new("Error: reduce/reduce conflict between rule #{existing_reduce_rule.id} (#{existing_reduce_rule.name}) and rule #{reduce_rule.id} (#{reduce_rule.name})")
|
||||
end
|
||||
reduce_actions[lookahead_token] = reduce_rule
|
||||
fh.puts "RA,#{item_set.object_id},#{lookahead_token.object_id},#{reduce_rule.object_id}" if fh
|
||||
end
|
||||
reduce_actions
|
||||
end
|
||||
@ -303,14 +176,13 @@ class Propane
|
||||
# tokens to form the lookahead token set.
|
||||
item_sets.each do |item_set|
|
||||
item_set.items.each do |item|
|
||||
if item.next_symbol == rule_set
|
||||
if item.following_symbol == rule_set
|
||||
(1..).each do |offset|
|
||||
case symbol = item.next_symbol(offset)
|
||||
case symbol = item.following_symbol(offset)
|
||||
when nil
|
||||
rule_set = item.rule.rule_set
|
||||
unless checked_rule_sets.include?(rule_set)
|
||||
rule_sets_to_check_after << rule_set
|
||||
checked_rule_sets << rule_set
|
||||
end
|
||||
break
|
||||
when Token
|
||||
@ -363,26 +235,20 @@ class Propane
|
||||
@log.puts
|
||||
@log.puts " Incoming states: #{incoming_ids.join(", ")}"
|
||||
@log.puts " Outgoing states:"
|
||||
item_set.next_item_set.each do |next_symbol, next_item_set|
|
||||
@log.puts " #{next_symbol.name} => #{next_item_set.id}"
|
||||
item_set.following_item_set.each do |following_symbol, following_item_set|
|
||||
@log.puts " #{following_symbol.name} => #{following_item_set.id}"
|
||||
end
|
||||
@log.puts
|
||||
@log.puts " Reduce actions:"
|
||||
if item_set.reduce_rule
|
||||
@log.puts " * => rule #{item_set.reduce_rule.id}, rule set #{@rule_sets[item_set.reduce_rule.name].id} (#{item_set.reduce_rule.name})"
|
||||
elsif item_set.reduce_actions
|
||||
case item_set.reduce_actions
|
||||
when Rule
|
||||
@log.puts " * => #{item_set.reduce_actions.id} (#{item_set.reduce_actions.name})"
|
||||
when Hash
|
||||
item_set.reduce_actions.each do |token, rule|
|
||||
@log.puts " lookahead #{token.name} => #{rule.name} (#{rule.id}), rule set ##{rule.rule_set.id}"
|
||||
end
|
||||
end
|
||||
end
|
||||
if @warnings.size > 0
|
||||
@log.puts
|
||||
@log.puts "Warnings:"
|
||||
@warnings.each do |warning|
|
||||
@log.puts " #{warning}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -22,7 +22,6 @@ class Propane
|
||||
def initialize(rule, position)
|
||||
@rule = rule
|
||||
@position = position
|
||||
@_hash = [@rule, @position].hash
|
||||
end
|
||||
|
||||
# Hash function.
|
||||
@ -30,7 +29,7 @@ class Propane
|
||||
# @return [Integer]
|
||||
# Hash code.
|
||||
def hash
|
||||
@_hash
|
||||
[@rule, @position].hash
|
||||
end
|
||||
|
||||
# Compare Item objects.
|
||||
@ -57,7 +56,7 @@ class Propane
|
||||
|
||||
# Return the set of Items obtained by "closing" the current item.
|
||||
#
|
||||
# If the next symbol for the current item is another Rule name, then
|
||||
# If the following symbol for the current item is another Rule name, then
|
||||
# this method will return all Items for that Rule with a position of 0.
|
||||
# Otherwise, an empty Array is returned.
|
||||
#
|
||||
@ -82,17 +81,17 @@ class Propane
|
||||
@position == @rule.components.size
|
||||
end
|
||||
|
||||
# Get the next symbol for the Item.
|
||||
# Get the following symbol for the Item.
|
||||
#
|
||||
# That is, the symbol which is after the parse position marker in the
|
||||
# That is, the symbol which follows the parse position marker in the
|
||||
# current Item.
|
||||
#
|
||||
# @param offset [Integer]
|
||||
# Offset from current parse position to examine.
|
||||
#
|
||||
# @return [Token, RuleSet, nil]
|
||||
# Next symbol for the Item.
|
||||
def next_symbol(offset = 0)
|
||||
# Following symbol for the Item.
|
||||
def following_symbol(offset = 0)
|
||||
@rule.components[@position + offset]
|
||||
end
|
||||
|
||||
@ -109,25 +108,25 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
# Get whether this Item's next symbol is the given symbol.
|
||||
# Get whether this Item is followed by the provided symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Symbol to query.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this Item's next symbol is the given symbol.
|
||||
def next_symbol?(symbol)
|
||||
next_symbol == symbol
|
||||
# Whether this Item is followed by the provided symbol.
|
||||
def followed_by?(symbol)
|
||||
following_symbol == symbol
|
||||
end
|
||||
|
||||
# Get the next item for this Item.
|
||||
# Get the following item for this Item.
|
||||
#
|
||||
# That is, the Item formed by moving the parse position marker one place
|
||||
# forward from its position in this Item.
|
||||
#
|
||||
# @return [Item]
|
||||
# The next item for this Item.
|
||||
def next_item
|
||||
# The following item for this Item.
|
||||
def following_item
|
||||
Item.new(@rule, @position + 1)
|
||||
end
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@ class Propane
|
||||
class Parser
|
||||
|
||||
# Represent a parser "item set", which is a set of possible items that the
|
||||
# parser could currently be parsing. This is equivalent to a parser state.
|
||||
# parser could currently be parsing.
|
||||
class ItemSet
|
||||
|
||||
# @return [Set<Item>]
|
||||
@ -14,58 +14,45 @@ class Propane
|
||||
attr_accessor :id
|
||||
|
||||
# @return [Hash]
|
||||
# Maps a next symbol to its ItemSet.
|
||||
attr_reader :next_item_set
|
||||
# Maps a following symbol to its ItemSet.
|
||||
attr_reader :following_item_set
|
||||
|
||||
# @return [Set<ItemSet>]
|
||||
# ItemSets leading to this item set.
|
||||
attr_reader :in_sets
|
||||
|
||||
# @return [nil, Rule]
|
||||
# Rule to reduce if there is only one possibility.
|
||||
attr_accessor :reduce_rule
|
||||
|
||||
# @return [Set<Rule>]
|
||||
# Set of rules that could be reduced in this parser state.
|
||||
attr_accessor :reduce_rules
|
||||
|
||||
# @return [nil, Hash]
|
||||
# Reduce actions, mapping lookahead tokens to rules, if there is
|
||||
# more than one rule that could be reduced.
|
||||
# @return [nil, Rule, Hash]
|
||||
# Reduce actions, mapping lookahead tokens to rules.
|
||||
attr_accessor :reduce_actions
|
||||
|
||||
# @return [Array<Hash>]
|
||||
# Shift table entries.
|
||||
attr_accessor :shift_entries
|
||||
|
||||
# Build an ItemSet.
|
||||
#
|
||||
# @param items [Array<Item>]
|
||||
# Items in this ItemSet.
|
||||
def initialize(items)
|
||||
@items = Set.new(items)
|
||||
@next_item_set = {}
|
||||
@following_item_set = {}
|
||||
@in_sets = Set.new
|
||||
close!
|
||||
end
|
||||
|
||||
# Get the set of next symbols for all Items in this ItemSet.
|
||||
# Get the set of following symbols for all Items in this ItemSet.
|
||||
#
|
||||
# @return [Set<Token, RuleSet>]
|
||||
# Set of next symbols for all Items in this ItemSet.
|
||||
def next_symbols
|
||||
@_next_symbols ||= Set.new(@items.map(&:next_symbol).compact)
|
||||
# Set of following symbols for all Items in this ItemSet.
|
||||
def following_symbols
|
||||
Set.new(@items.map(&:following_symbol).compact)
|
||||
end
|
||||
|
||||
# Build a next ItemSet for the given next symbol.
|
||||
# Build a following ItemSet for the given following symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Next symbol to build the next ItemSet for.
|
||||
# Following symbol to build the following ItemSet for.
|
||||
#
|
||||
# @return [ItemSet]
|
||||
# Next ItemSet for the given next symbol.
|
||||
def build_next_item_set(symbol)
|
||||
ItemSet.new(items_with_next(symbol).map(&:next_item))
|
||||
# Following ItemSet for the given following symbol.
|
||||
def build_following_item_set(symbol)
|
||||
ItemSet.new(items_followed_by(symbol).map(&:following_item))
|
||||
end
|
||||
|
||||
# Hash function.
|
||||
@ -100,27 +87,14 @@ class Propane
|
||||
|
||||
# Set of ItemSets that lead to this ItemSet.
|
||||
#
|
||||
# This set includes this ItemSet.
|
||||
#
|
||||
# @return [Set<ItemSet>]
|
||||
# Set of all ItemSets that lead up to this ItemSet.
|
||||
def leading_item_sets
|
||||
@_leading_item_sets ||=
|
||||
begin
|
||||
result = Set.new
|
||||
eval_sets = Set[self]
|
||||
evaled = Set.new
|
||||
while eval_sets.size > 0
|
||||
eval_set = eval_sets.first
|
||||
eval_sets.delete(eval_set)
|
||||
evaled << eval_set
|
||||
eval_set.in_sets.each do |in_set|
|
||||
result << in_set
|
||||
unless evaled.include?(in_set)
|
||||
eval_sets << in_set
|
||||
end
|
||||
end
|
||||
end
|
||||
result
|
||||
end
|
||||
@in_sets.reduce(Set[self]) do |result, item_set|
|
||||
result + item_set.leading_item_sets
|
||||
end
|
||||
end
|
||||
|
||||
# Represent the ItemSet as a String.
|
||||
@ -153,16 +127,16 @@ class Propane
|
||||
end
|
||||
end
|
||||
|
||||
# Get the Items with the given next symbol.
|
||||
# Get the Items followed by the given following symbol.
|
||||
#
|
||||
# @param symbol [Token, RuleSet]
|
||||
# Next symbol.
|
||||
# Following symbol.
|
||||
#
|
||||
# @return [Array<Item>]
|
||||
# Items with the given next symbol.
|
||||
def items_with_next(symbol)
|
||||
# Items followed by the given following symbol.
|
||||
def items_followed_by(symbol)
|
||||
@items.select do |item|
|
||||
item.next_symbol?(symbol)
|
||||
item.followed_by?(symbol)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
@ -26,9 +26,9 @@ class Propane
|
||||
# Regex NFA for matching the pattern.
|
||||
attr_reader :nfa
|
||||
|
||||
# @return [Set]
|
||||
# Lexer modes for this pattern.
|
||||
attr_accessor :modes
|
||||
# @return [String, nil]
|
||||
# Lexer mode for this pattern.
|
||||
attr_accessor :mode
|
||||
|
||||
# @return [String, nil]
|
||||
# Parser value type name.
|
||||
@ -40,26 +40,37 @@ class Propane
|
||||
# Optional parameters.
|
||||
# @option options [String, nil] :code
|
||||
# Code block to execute when the pattern is matched.
|
||||
# @option options [Boolean] :drop
|
||||
# Whether this is a drop pattern.
|
||||
# @option options [String, nil] :pattern
|
||||
# Pattern.
|
||||
# @option options [Token, nil] :token
|
||||
# Token to be returned by this pattern.
|
||||
# @option options [Integer, nil] :line_number
|
||||
# Line number where the token was defined in the input grammar.
|
||||
# @option options [String, nil] :modes
|
||||
# Lexer modes for this pattern.
|
||||
# @option options [String, nil] :mode
|
||||
# Lexer mode for this pattern.
|
||||
def initialize(options)
|
||||
@code = options[:code]
|
||||
@drop = options[:drop]
|
||||
@pattern = options[:pattern]
|
||||
@token = options[:token]
|
||||
@line_number = options[:line_number]
|
||||
@modes = options[:modes]
|
||||
@mode = options[:mode]
|
||||
@ptypename = options[:ptypename]
|
||||
regex = Regex.new(@pattern, @line_number)
|
||||
regex = Regex.new(@pattern)
|
||||
regex.nfa.end_state.accepts = self
|
||||
@nfa = regex.nfa
|
||||
end
|
||||
|
||||
# Whether the pattern is a drop pattern.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether the pattern is a drop pattern.
|
||||
def drop?
|
||||
@drop
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -4,13 +4,12 @@ class Propane
|
||||
attr_reader :unit
|
||||
attr_reader :nfa
|
||||
|
||||
def initialize(pattern, line_number)
|
||||
def initialize(pattern)
|
||||
@pattern = pattern.dup
|
||||
@line_number = line_number
|
||||
@unit = parse_alternates
|
||||
@nfa = @unit.to_nfa
|
||||
if @pattern != ""
|
||||
raise Error.new(%[Line #{@line_number}: unexpected "#{@pattern}" in pattern])
|
||||
raise Error.new(%[Unexpected "#{@pattern}" in pattern])
|
||||
end
|
||||
end
|
||||
|
||||
@ -42,7 +41,7 @@ class Propane
|
||||
mu = MultiplicityUnit.new(last_unit, min_count, max_count)
|
||||
au.replace_last!(mu)
|
||||
else
|
||||
raise Error.new("Line #{@line_number}: #{c} follows nothing")
|
||||
raise Error.new("#{c} follows nothing")
|
||||
end
|
||||
when "|"
|
||||
au.new_alternate!
|
||||
@ -60,7 +59,7 @@ class Propane
|
||||
def parse_group
|
||||
au = parse_alternates
|
||||
if @pattern[0] != ")"
|
||||
raise Error.new("Line #{@line_number}: unterminated group in pattern")
|
||||
raise Error.new("Unterminated group in pattern")
|
||||
end
|
||||
@pattern.slice!(0)
|
||||
au
|
||||
@ -71,7 +70,7 @@ class Propane
|
||||
index = 0
|
||||
loop do
|
||||
if @pattern == ""
|
||||
raise Error.new("Line #{@line_number}: unterminated character class")
|
||||
raise Error.new("Unterminated character class")
|
||||
end
|
||||
c = @pattern.slice!(0)
|
||||
if c == "]"
|
||||
@ -85,13 +84,13 @@ class Propane
|
||||
elsif c == "-" && @pattern[0] != "]"
|
||||
begin_cu = ccu.last_unit
|
||||
unless begin_cu.is_a?(CharacterRangeUnit) && begin_cu.code_point_range.size == 1
|
||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||
raise Error.new("Character range must be between single characters")
|
||||
end
|
||||
if @pattern[0] == "\\"
|
||||
@pattern.slice!(0)
|
||||
end_cu = parse_backslash
|
||||
unless end_cu.is_a?(CharacterRangeUnit) && end_cu.code_point_range.size == 1
|
||||
raise Error.new("Line #{@line_number}: character range must be between single characters")
|
||||
raise Error.new("Character range must be between single characters")
|
||||
end
|
||||
max_code_point = end_cu.code_point
|
||||
else
|
||||
@ -117,7 +116,7 @@ class Propane
|
||||
elsif max_count.to_s != ""
|
||||
max_count = max_count.to_i
|
||||
if max_count < min_count
|
||||
raise Error.new("Line #{@line_number}: maximum repetition count cannot be less than minimum repetition count")
|
||||
raise Error.new("Maximum repetition count cannot be less than minimum repetition count")
|
||||
end
|
||||
else
|
||||
max_count = nil
|
||||
@ -125,33 +124,18 @@ class Propane
|
||||
@pattern = pattern
|
||||
[min_count, max_count]
|
||||
else
|
||||
raise Error.new("Line #{@line_number}: unexpected match count following {")
|
||||
raise Error.new("Unexpected match count at #{@pattern}")
|
||||
end
|
||||
end
|
||||
|
||||
def parse_backslash
|
||||
if @pattern == ""
|
||||
raise Error.new("Line #{@line_number}: error: unfollowed \\")
|
||||
raise Error.new("Error: unfollowed \\")
|
||||
else
|
||||
c = @pattern.slice!(0)
|
||||
case c
|
||||
when "a"
|
||||
CharacterRangeUnit.new("\a")
|
||||
when "b"
|
||||
CharacterRangeUnit.new("\b")
|
||||
when "d"
|
||||
CharacterRangeUnit.new("0", "9")
|
||||
when "D"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
when "f"
|
||||
CharacterRangeUnit.new("\f")
|
||||
when "n"
|
||||
CharacterRangeUnit.new("\n")
|
||||
when "r"
|
||||
CharacterRangeUnit.new("\r")
|
||||
when "s"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new(" ")
|
||||
@ -161,35 +145,6 @@ class Propane
|
||||
ccu << CharacterRangeUnit.new("\f")
|
||||
ccu << CharacterRangeUnit.new("\v")
|
||||
ccu
|
||||
when "S"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new(" ")
|
||||
ccu << CharacterRangeUnit.new("\t")
|
||||
ccu << CharacterRangeUnit.new("\r")
|
||||
ccu << CharacterRangeUnit.new("\n")
|
||||
ccu << CharacterRangeUnit.new("\f")
|
||||
ccu << CharacterRangeUnit.new("\v")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
when "t"
|
||||
CharacterRangeUnit.new("\t")
|
||||
when "v"
|
||||
CharacterRangeUnit.new("\v")
|
||||
when "w"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("_")
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu << CharacterRangeUnit.new("a", "z")
|
||||
ccu << CharacterRangeUnit.new("A", "Z")
|
||||
ccu
|
||||
when "W"
|
||||
ccu = CharacterClassUnit.new
|
||||
ccu << CharacterRangeUnit.new("_")
|
||||
ccu << CharacterRangeUnit.new("0", "9")
|
||||
ccu << CharacterRangeUnit.new("a", "z")
|
||||
ccu << CharacterRangeUnit.new("A", "Z")
|
||||
ccu.negate = true
|
||||
ccu
|
||||
else
|
||||
CharacterRangeUnit.new(c)
|
||||
end
|
||||
|
||||
@ -92,19 +92,16 @@ class Propane
|
||||
@units = []
|
||||
@negate = false
|
||||
end
|
||||
def method_missing(*args, &block)
|
||||
@units.__send__(*args, &block)
|
||||
def initialize
|
||||
@units = []
|
||||
end
|
||||
def method_missing(*args)
|
||||
@units.__send__(*args)
|
||||
end
|
||||
def <<(thing)
|
||||
if thing.is_a?(CharacterClassUnit)
|
||||
if thing.negate
|
||||
CodePointRange.invert_ranges(thing.map(&:code_point_range)).each do |cpr|
|
||||
CharacterRangeUnit.new(cpr.first, cpr.last)
|
||||
end
|
||||
else
|
||||
thing.each do |ccu_unit|
|
||||
@units << ccu_unit
|
||||
end
|
||||
thing.each do |ccu_unit|
|
||||
@units << ccu_unit
|
||||
end
|
||||
else
|
||||
@units << thing
|
||||
|
||||
@ -6,10 +6,6 @@ class Propane
|
||||
# Rule components.
|
||||
attr_reader :components
|
||||
|
||||
# @return [Hash]
|
||||
# Field aliases.
|
||||
attr_reader :aliases
|
||||
|
||||
# @return [String]
|
||||
# User code associated with the rule.
|
||||
attr_reader :code
|
||||
@ -34,11 +30,6 @@ class Propane
|
||||
# The RuleSet that this Rule is a part of.
|
||||
attr_accessor :rule_set
|
||||
|
||||
# @return [Array<Integer>]
|
||||
# Map this rule's components to their positions in the parent RuleSet's
|
||||
# node field pointer array. This is used for AST construction.
|
||||
attr_accessor :rule_set_node_field_index_map
|
||||
|
||||
# Construct a Rule.
|
||||
#
|
||||
# @param name [String]
|
||||
@ -53,20 +44,7 @@ class Propane
|
||||
# Line number where the rule was defined in the input grammar.
|
||||
def initialize(name, components, code, ptypename, line_number)
|
||||
@name = name
|
||||
@aliases = {}
|
||||
@components = components.each_with_index.map do |component, i|
|
||||
if component =~ /(\S+):(\S+)/
|
||||
c, aliasname = $1, $2
|
||||
if @aliases[aliasname]
|
||||
raise Error.new("Error: duplicate field alias `#{aliasname}` for rule #{name} defined on line #{line_number}")
|
||||
end
|
||||
@aliases[aliasname] = i
|
||||
c
|
||||
else
|
||||
component
|
||||
end
|
||||
end
|
||||
@rule_set_node_field_index_map = components.map {0}
|
||||
@components = components
|
||||
@code = code
|
||||
@ptypename = ptypename
|
||||
@line_number = line_number
|
||||
@ -82,14 +60,6 @@ class Propane
|
||||
@components.empty?
|
||||
end
|
||||
|
||||
# Return whether this is an optional Rule.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this is an optional Rule.
|
||||
def optional?
|
||||
@name.end_with?("?")
|
||||
end
|
||||
|
||||
# Represent the Rule as a String.
|
||||
#
|
||||
# @return [String]
|
||||
@ -98,17 +68,6 @@ class Propane
|
||||
"#{@name} -> #{@components.map(&:name).join(" ")}"
|
||||
end
|
||||
|
||||
# Check whether the rule set node field index map is just a 1:1 mapping.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Boolean indicating whether the rule set node field index map is just a
|
||||
# 1:1 mapping.
|
||||
def flat_rule_set_node_field_index_map?
|
||||
@rule_set_node_field_index_map.each_with_index.all? do |v, i|
|
||||
v == i
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -1,15 +1,10 @@
|
||||
class Propane
|
||||
|
||||
# A RuleSet collects all grammar rules of the same name.
|
||||
class RuleSet
|
||||
|
||||
# @return [Array<Hash>]
|
||||
# AST fields.
|
||||
attr_reader :ast_fields
|
||||
|
||||
# @return [Integer]
|
||||
# ID of the RuleSet.
|
||||
attr_reader :id
|
||||
attr_accessor :id
|
||||
|
||||
# @return [String]
|
||||
# Name of the RuleSet.
|
||||
@ -56,24 +51,6 @@ class Propane
|
||||
@could_be_empty
|
||||
end
|
||||
|
||||
# Return whether this is an optional RuleSet.
|
||||
#
|
||||
# @return [Boolean]
|
||||
# Whether this is an optional RuleSet.
|
||||
def optional?
|
||||
@name.end_with?("?")
|
||||
end
|
||||
|
||||
# For optional rule sets, return the underlying component that is optional.
|
||||
def option_target
|
||||
@rules.each do |rule|
|
||||
if rule.components.size > 0
|
||||
return rule.components[0]
|
||||
end
|
||||
end
|
||||
raise "Optional rule target not found"
|
||||
end
|
||||
|
||||
# Build the start token set for the RuleSet.
|
||||
#
|
||||
# @return [Set<Token>]
|
||||
@ -98,72 +75,6 @@ class Propane
|
||||
@_start_token_set
|
||||
end
|
||||
|
||||
# Finalize a RuleSet after adding all Rules to it.
|
||||
def finalize(grammar)
|
||||
if grammar.ast
|
||||
build_ast_fields(grammar)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Build the set of AST fields for this RuleSet.
|
||||
#
|
||||
# This is an Array of Hashes. Each entry in the Array corresponds to a
|
||||
# field location in the AST node. The entry is a Hash. It could have one or
|
||||
# two keys. It will always have the field name with a positional suffix as
|
||||
# a key. It may also have the field name without the positional suffix if
|
||||
# that field only exists in one position across all Rules in the RuleSet.
|
||||
#
|
||||
# @return [void]
|
||||
def build_ast_fields(grammar)
|
||||
field_ast_node_indexes = {}
|
||||
field_indexes_across_all_rules = {}
|
||||
# Stores the index into @ast_fields by field alias name.
|
||||
field_aliases = {}
|
||||
@ast_fields = []
|
||||
@rules.each do |rule|
|
||||
rule.components.each_with_index do |component, i|
|
||||
if component.is_a?(RuleSet) && component.optional?
|
||||
component = component.option_target
|
||||
end
|
||||
if component.is_a?(Token)
|
||||
node_name = "Token"
|
||||
else
|
||||
node_name = component.name
|
||||
end
|
||||
struct_name = "#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
field_name = "p#{node_name}#{i + 1}"
|
||||
unless field_ast_node_indexes[field_name]
|
||||
field_ast_node_indexes[field_name] = @ast_fields.size
|
||||
@ast_fields << {field_name => struct_name}
|
||||
end
|
||||
rule.aliases.each do |alias_name, index|
|
||||
if index == i
|
||||
alias_ast_fields_index = field_ast_node_indexes[field_name]
|
||||
if field_aliases[alias_name] && field_aliases[alias_name] != alias_ast_fields_index
|
||||
raise Error.new("Error: conflicting AST node field positions for alias `#{alias_name}` in rule #{rule.name} defined on line #{rule.line_number}")
|
||||
end
|
||||
field_aliases[alias_name] = alias_ast_fields_index
|
||||
@ast_fields[alias_ast_fields_index][alias_name] = @ast_fields[alias_ast_fields_index].first[1]
|
||||
end
|
||||
end
|
||||
field_indexes_across_all_rules[node_name] ||= Set.new
|
||||
field_indexes_across_all_rules[node_name] << field_ast_node_indexes[field_name]
|
||||
rule.rule_set_node_field_index_map[i] = field_ast_node_indexes[field_name]
|
||||
end
|
||||
end
|
||||
field_indexes_across_all_rules.each do |node_name, indexes_across_all_rules|
|
||||
if indexes_across_all_rules.size == 1
|
||||
# If this field was only seen in one position across all rules,
|
||||
# then add an alias to the positional field name that does not
|
||||
# include the position.
|
||||
@ast_fields[indexes_across_all_rules.first]["p#{node_name}"] =
|
||||
"#{grammar.ast_prefix}#{node_name}#{grammar.ast_suffix}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -10,32 +10,6 @@ class Propane
|
||||
"#{s}\n* #{message} *\n#{s}\n"
|
||||
end
|
||||
|
||||
# Determine the number of threads to use.
|
||||
#
|
||||
# @return [Integer]
|
||||
# The number of threads to use.
|
||||
def determine_n_threads
|
||||
# Try to figure out how many threads are available on the host hardware.
|
||||
begin
|
||||
case RbConfig::CONFIG["host_os"]
|
||||
when /linux/
|
||||
return File.read("/proc/cpuinfo").scan(/^processor\s*:/).size
|
||||
when /mswin|mingw|msys/
|
||||
if `wmic cpu get NumberOfLogicalProcessors -value` =~ /NumberOfLogicalProcessors=(\d+)/
|
||||
return $1.to_i
|
||||
end
|
||||
when /darwin/
|
||||
if `sysctl -n hw.ncpu` =~ /(\d+)/
|
||||
return $1.to_i
|
||||
end
|
||||
end
|
||||
rescue
|
||||
end
|
||||
|
||||
# If we can't figure it out, default to 4.
|
||||
4
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
class Propane
|
||||
VERSION = "2.3.0"
|
||||
VERSION = "0.1.0"
|
||||
end
|
||||
|
||||
2
propane.sh
Executable file
2
propane.sh
Executable file
@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
exec bundle exec ruby -Ilib bin/propane "$@"
|
||||
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
require "erb"
|
||||
require "fileutils"
|
||||
require "digest/md5"
|
||||
|
||||
@ -14,24 +13,6 @@ START_FILE = "bin/#{PROG_NAME}"
|
||||
LIB_DIR = "lib"
|
||||
DIST = "dist"
|
||||
|
||||
ASSETS_TEMPLATE = <<EOF
|
||||
class Propane
|
||||
module Assets
|
||||
class << self
|
||||
def get(name)
|
||||
case name
|
||||
<% Dir.glob("assets/*").each do |asset_file| %>
|
||||
when <%= File.basename(asset_file).inspect %>
|
||||
<%= File.binread(asset_file).inspect %>
|
||||
<% end %>
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
EOF
|
||||
|
||||
assets_module = ERB.new(ASSETS_TEMPLATE, trim_mode: "<>").result
|
||||
files_processed = {}
|
||||
combined_file = []
|
||||
|
||||
@ -44,11 +25,7 @@ combine_files = lambda do |file|
|
||||
if File.exist?(path)
|
||||
unless files_processed[path]
|
||||
files_processed[path] = true
|
||||
if require_name == "propane/assets"
|
||||
combined_file << assets_module
|
||||
else
|
||||
combine_files[path]
|
||||
end
|
||||
combine_files[path]
|
||||
end
|
||||
else
|
||||
raise "require path #{path.inspect} not found"
|
||||
|
||||
@ -1,151 +0,0 @@
|
||||
ast;
|
||||
ast_prefix P;
|
||||
|
||||
<<header
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint64_t i64;
|
||||
const uint8_t * s;
|
||||
double dou;
|
||||
} TokenVal;
|
||||
>>
|
||||
|
||||
ptype TokenVal;
|
||||
|
||||
# Keywords.
|
||||
token byte;
|
||||
token def;
|
||||
token int;
|
||||
token long;
|
||||
token module;
|
||||
token return;
|
||||
token short;
|
||||
token size_t;
|
||||
token ssize_t;
|
||||
token ubyte;
|
||||
token uint;
|
||||
token ulong;
|
||||
token ushort;
|
||||
|
||||
# Symbols.
|
||||
token arrow /->/;
|
||||
token comma /,/;
|
||||
token lbrace /\{/;
|
||||
token lparen /\(/;
|
||||
token rbrace /\}/;
|
||||
token rparen /\)/;
|
||||
token semicolon /;/;
|
||||
|
||||
# Integer literals.
|
||||
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
||||
$$.i64 = 64u;
|
||||
>>
|
||||
|
||||
# Identifier.
|
||||
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
||||
$$.s = match;
|
||||
$mode(default);
|
||||
return $token(ident);
|
||||
>>
|
||||
|
||||
# Comments.
|
||||
drop /#.*/;
|
||||
|
||||
# Whitespace.
|
||||
drop /[ \r\n]*/;
|
||||
|
||||
start Module;
|
||||
|
||||
# Assignment operators - right associative
|
||||
Expression -> Expression_Or:exp0;
|
||||
|
||||
# Logical OR operator - left associative
|
||||
Expression_Or -> Expression_And:exp0;
|
||||
|
||||
# Logical AND operator - left associative
|
||||
Expression_And -> Expression_Comp:exp0;
|
||||
|
||||
# Equality operators - left associative
|
||||
Expression_Comp -> Expression_Relational:exp0;
|
||||
|
||||
# Relational operators - left associative
|
||||
Expression_Relational -> Expression_REMatch:exp0;
|
||||
|
||||
# Regular expression - left associative
|
||||
Expression_REMatch -> Expression_BinOr:exp0;
|
||||
|
||||
# Binary OR operator - left associative
|
||||
Expression_BinOr -> Expression_Xor:exp0;
|
||||
|
||||
# Binary XOR operator - left associative
|
||||
Expression_Xor -> Expression_BinAnd:exp0;
|
||||
|
||||
# Binary AND operator - left associative
|
||||
Expression_BinAnd -> Expression_BitShift:exp0;
|
||||
|
||||
# Bit shift operators - left associative
|
||||
Expression_BitShift -> Expression_Plus:exp0;
|
||||
|
||||
# Add/subtract operators - left associative
|
||||
Expression_Plus -> Expression_Mul:exp0;
|
||||
|
||||
# Multiplication/divide/modulus operators - left associative
|
||||
Expression_Mul -> Expression_Range:exp0;
|
||||
|
||||
# Range construction operators - left associative
|
||||
Expression_Range -> Expression_UnaryPrefix:exp0;
|
||||
|
||||
# Unary prefix operators
|
||||
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
||||
|
||||
# Postfix operators
|
||||
Expression_Dot -> Expression_Ident:exp0;
|
||||
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
||||
|
||||
# Literals, identifiers, and parenthesized expressions
|
||||
Expression_Ident -> Literal;
|
||||
Expression_Ident -> ident;
|
||||
|
||||
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
||||
|
||||
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
||||
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
||||
|
||||
FunctionReturnType -> arrow Type;
|
||||
|
||||
Literal -> LiteralInteger;
|
||||
LiteralInteger -> hex_int_l;
|
||||
|
||||
Module -> ModuleStatement? ModuleItems;
|
||||
|
||||
ModuleItem -> FunctionDefinition;
|
||||
|
||||
ModuleItems -> ;
|
||||
ModuleItems -> ModuleItems ModuleItem;
|
||||
|
||||
ModulePath -> ident;
|
||||
|
||||
ModuleStatement -> module ModulePath semicolon;
|
||||
|
||||
ReturnStatement -> return Expression?:exp0 semicolon;
|
||||
|
||||
Statements -> ;
|
||||
Statements -> Statements Statement;
|
||||
Statement -> Expression semicolon;
|
||||
Statement -> ReturnStatement;
|
||||
|
||||
Type -> TypeBase;
|
||||
|
||||
TypeBase -> byte;
|
||||
TypeBase -> ubyte;
|
||||
TypeBase -> short;
|
||||
TypeBase -> ushort;
|
||||
TypeBase -> int;
|
||||
TypeBase -> uint;
|
||||
TypeBase -> long;
|
||||
TypeBase -> ulong;
|
||||
TypeBase -> size_t;
|
||||
TypeBase -> ssize_t;
|
||||
@ -1,177 +0,0 @@
|
||||
ast;
|
||||
ast_prefix P;
|
||||
|
||||
<<
|
||||
import std.bigint;
|
||||
|
||||
private string stringvalue;
|
||||
|
||||
union TokenVal
|
||||
{
|
||||
BigInt bi;
|
||||
string s;
|
||||
double dou;
|
||||
}
|
||||
>>
|
||||
|
||||
ptype TokenVal;
|
||||
|
||||
# Keywords.
|
||||
token byte;
|
||||
token def;
|
||||
token int;
|
||||
token long;
|
||||
token module;
|
||||
token return;
|
||||
token short;
|
||||
token size_t;
|
||||
token ssize_t;
|
||||
token ubyte;
|
||||
token uint;
|
||||
token ulong;
|
||||
token ushort;
|
||||
|
||||
# Symbols.
|
||||
token arrow /->/;
|
||||
token comma /,/;
|
||||
token lbrace /\{/;
|
||||
token lparen /\(/;
|
||||
token rbrace /\}/;
|
||||
token rparen /\)/;
|
||||
token semicolon /;/;
|
||||
|
||||
# Integer literals.
|
||||
token hex_int_l /0[xX][0-9a-fA-F][0-9a-fA-F_]*/ <<
|
||||
$$.bi = BigInt(match[0..3]);
|
||||
foreach (c; match[3..$])
|
||||
{
|
||||
if (('0' <= c) && (c <= '9'))
|
||||
{
|
||||
$$.bi *= 0x10;
|
||||
$$.bi += (c - '0');
|
||||
}
|
||||
if (('a' <= c) && (c <= 'f'))
|
||||
{
|
||||
$$.bi *= 0x10;
|
||||
$$.bi += (c - 'a' + 10);
|
||||
}
|
||||
if (('A' <= c) && (c <= 'F'))
|
||||
{
|
||||
$$.bi *= 0x10;
|
||||
$$.bi += (c - 'A' + 10);
|
||||
}
|
||||
}
|
||||
>>
|
||||
|
||||
# Identifier.
|
||||
token ident /\$?[a-zA-Z_][a-zA-Z_0-9]*\??/ <<
|
||||
if (match[0] == '$')
|
||||
{
|
||||
$$.s = match[1..$];
|
||||
}
|
||||
else
|
||||
{
|
||||
$$.s = match;
|
||||
}
|
||||
$mode(default);
|
||||
return $token(ident);
|
||||
>>
|
||||
|
||||
# Comments.
|
||||
drop /#.*/;
|
||||
|
||||
# Whitespace.
|
||||
drop /[ \r\n]*/;
|
||||
|
||||
start Module;
|
||||
|
||||
# Assignment operators - right associative
|
||||
Expression -> Expression_Or:exp0;
|
||||
|
||||
# Logical OR operator - left associative
|
||||
Expression_Or -> Expression_And:exp0;
|
||||
|
||||
# Logical AND operator - left associative
|
||||
Expression_And -> Expression_Comp:exp0;
|
||||
|
||||
# Equality operators - left associative
|
||||
Expression_Comp -> Expression_Relational:exp0;
|
||||
|
||||
# Relational operators - left associative
|
||||
Expression_Relational -> Expression_REMatch:exp0;
|
||||
|
||||
# Regular expression - left associative
|
||||
Expression_REMatch -> Expression_BinOr:exp0;
|
||||
|
||||
# Binary OR operator - left associative
|
||||
Expression_BinOr -> Expression_Xor:exp0;
|
||||
|
||||
# Binary XOR operator - left associative
|
||||
Expression_Xor -> Expression_BinAnd:exp0;
|
||||
|
||||
# Binary AND operator - left associative
|
||||
Expression_BinAnd -> Expression_BitShift:exp0;
|
||||
|
||||
# Bit shift operators - left associative
|
||||
Expression_BitShift -> Expression_Plus:exp0;
|
||||
|
||||
# Add/subtract operators - left associative
|
||||
Expression_Plus -> Expression_Mul:exp0;
|
||||
|
||||
# Multiplication/divide/modulus operators - left associative
|
||||
Expression_Mul -> Expression_Range:exp0;
|
||||
|
||||
# Range construction operators - left associative
|
||||
Expression_Range -> Expression_UnaryPrefix:exp0;
|
||||
|
||||
# Unary prefix operators
|
||||
Expression_UnaryPrefix -> Expression_Dot:exp0;
|
||||
|
||||
# Postfix operators
|
||||
Expression_Dot -> Expression_Ident:exp0;
|
||||
Expression_Dot -> Expression_Dot:exp1 lparen rparen;
|
||||
|
||||
# Literals, identifiers, and parenthesized expressions
|
||||
Expression_Ident -> Literal;
|
||||
Expression_Ident -> ident;
|
||||
|
||||
FunctionDefinition -> def ident:name lparen FunctionParameterList?:parameters rparen FunctionReturnType?:returntype lbrace Statements rbrace;
|
||||
|
||||
FunctionParameterList -> ident:name Type:type FunctionParameterListMore?:more;
|
||||
FunctionParameterListMore -> comma ident:name Type:type FunctionParameterListMore?:more;
|
||||
|
||||
FunctionReturnType -> arrow Type;
|
||||
|
||||
Literal -> LiteralInteger;
|
||||
LiteralInteger -> hex_int_l;
|
||||
|
||||
Module -> ModuleStatement? ModuleItems;
|
||||
|
||||
ModuleItem -> FunctionDefinition;
|
||||
|
||||
ModuleItems -> ;
|
||||
ModuleItems -> ModuleItems ModuleItem;
|
||||
|
||||
ModulePath -> ident;
|
||||
|
||||
ModuleStatement -> module ModulePath semicolon;
|
||||
|
||||
ReturnStatement -> return Expression?:exp0 semicolon;
|
||||
|
||||
Statements -> ;
|
||||
Statements -> Statements Statement;
|
||||
Statement -> Expression semicolon;
|
||||
Statement -> ReturnStatement;
|
||||
|
||||
Type -> TypeBase;
|
||||
|
||||
TypeBase -> byte;
|
||||
TypeBase -> ubyte;
|
||||
TypeBase -> short;
|
||||
TypeBase -> ushort;
|
||||
TypeBase -> int;
|
||||
TypeBase -> uint;
|
||||
TypeBase -> long;
|
||||
TypeBase -> ulong;
|
||||
TypeBase -> size_t;
|
||||
TypeBase -> ssize_t;
|
||||
@ -1,183 +0,0 @@
|
||||
<<header
|
||||
#include "json_types.h"
|
||||
#include "testutils.h"
|
||||
>>
|
||||
<<
|
||||
#include "math.h"
|
||||
#include <stdbool.h>
|
||||
static str_t string_value;
|
||||
>>
|
||||
|
||||
ptype JSONValue *;
|
||||
|
||||
drop /\s+/;
|
||||
token lbrace /\{/;
|
||||
token rbrace /\}/;
|
||||
token lbracket /\[/;
|
||||
token rbracket /\]/;
|
||||
token comma /,/;
|
||||
token colon /:/;
|
||||
token number /-?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][-+]?[0-9]+)?/ <<
|
||||
double n = 0.0;
|
||||
bool negative = false;
|
||||
size_t i = 0u;
|
||||
if (match[i] == '-')
|
||||
{
|
||||
negative = true;
|
||||
i++;
|
||||
}
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
n *= 10.0;
|
||||
n += (match[i] - '0');
|
||||
i++;
|
||||
}
|
||||
if (match[i] == '.')
|
||||
{
|
||||
i++;
|
||||
double mult = 0.1;
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
n += mult * (match[i] - '0');
|
||||
mult /= 10.0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (match[i] == 'e' || match[i] == 'E')
|
||||
{
|
||||
bool exp_negative = false;
|
||||
i++;
|
||||
if (match[i] == '-')
|
||||
{
|
||||
exp_negative = true;
|
||||
i++;
|
||||
}
|
||||
else if (match[i] == '+')
|
||||
{
|
||||
i++;
|
||||
}
|
||||
long exp = 0.0;
|
||||
while ('0' <= match[i] && match[i] <= '9')
|
||||
{
|
||||
exp *= 10;
|
||||
exp += (match[i] - '0');
|
||||
i++;
|
||||
}
|
||||
if (exp_negative)
|
||||
{
|
||||
exp = -exp;
|
||||
}
|
||||
n = pow(n, exp);
|
||||
}
|
||||
if (negative)
|
||||
{
|
||||
n = -n;
|
||||
}
|
||||
$$ = JSONValue_new(JSON_NUMBER);
|
||||
$$->number = n;
|
||||
>>
|
||||
token true <<
|
||||
$$ = JSONValue_new(JSON_TRUE);
|
||||
>>
|
||||
token false <<
|
||||
$$ = JSONValue_new(JSON_FALSE);
|
||||
>>
|
||||
token null <<
|
||||
$$ = JSONValue_new(JSON_NULL);
|
||||
>>
|
||||
/"/ <<
|
||||
$mode(string);
|
||||
str_init(&string_value, "");
|
||||
>>
|
||||
string: token string /"/ <<
|
||||
$$ = JSONValue_new(JSON_STRING);
|
||||
$$->string = string_value;
|
||||
$mode(default);
|
||||
>>
|
||||
string: /\\"/ <<
|
||||
str_append(&string_value, "\"");
|
||||
>>
|
||||
string: /\\\\/ <<
|
||||
str_append(&string_value, "\\");
|
||||
>>
|
||||
string: /\\\// <<
|
||||
str_append(&string_value, "/");
|
||||
>>
|
||||
string: /\\b/ <<
|
||||
str_append(&string_value, "\b");
|
||||
>>
|
||||
string: /\\f/ <<
|
||||
str_append(&string_value, "\f");
|
||||
>>
|
||||
string: /\\n/ <<
|
||||
str_append(&string_value, "\n");
|
||||
>>
|
||||
string: /\\r/ <<
|
||||
str_append(&string_value, "\r");
|
||||
>>
|
||||
string: /\\t/ <<
|
||||
str_append(&string_value, "\t");
|
||||
>>
|
||||
string: /\\u[0-9a-fA-F]{4}/ <<
|
||||
/* Not actually going to encode the code point for this example... */
|
||||
char s[] = {'{', match[2], match[3], match[4], match[5], '}', 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
string: /[^\\]/ <<
|
||||
char s[] = {match[0], 0};
|
||||
str_append(&string_value, s);
|
||||
>>
|
||||
Start -> Value <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> string <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> number <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> Object <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> Array <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> true <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> false <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Value -> null <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Object -> lbrace rbrace <<
|
||||
$$ = JSONObject_new();
|
||||
>>
|
||||
Object -> lbrace KeyValues rbrace <<
|
||||
$$ = $2;
|
||||
>>
|
||||
KeyValues -> KeyValue <<
|
||||
$$ = $1;
|
||||
>>
|
||||
KeyValues -> KeyValues comma KeyValue <<
|
||||
JSONObject_append($1, $3->object.entries[0].name, $3->object.entries[0].value);
|
||||
$$ = $1;
|
||||
>>
|
||||
KeyValue -> string colon Value <<
|
||||
$$ = JSONObject_new();
|
||||
JSONObject_append($$, str_cstr(&$1->string), $3);
|
||||
>>
|
||||
Array -> lbracket rbracket <<
|
||||
$$ = JSONArray_new();
|
||||
>>
|
||||
Array -> lbracket Values rbracket <<
|
||||
$$ = $2;
|
||||
>>
|
||||
Values -> Value <<
|
||||
$$ = $1;
|
||||
>>
|
||||
Values -> Values comma Value <<
|
||||
JSONArray_append($1, $3);
|
||||
$$ = $1;
|
||||
>>
|
||||
@ -1,64 +0,0 @@
|
||||
#include "json_types.h"
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "testutils.h"
|
||||
|
||||
JSONValue * JSONValue_new(size_t id)
|
||||
{
|
||||
JSONValue * jv = calloc(1, sizeof(JSONValue));
|
||||
jv->id = id;
|
||||
return jv;
|
||||
}
|
||||
|
||||
JSONValue * JSONObject_new(void)
|
||||
{
|
||||
JSONValue * jv = JSONValue_new(JSON_OBJECT);
|
||||
jv->object.size = 0u;
|
||||
return jv;
|
||||
}
|
||||
|
||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value)
|
||||
{
|
||||
size_t const size = object->object.size;
|
||||
for (size_t i = 0u; i < size; i++)
|
||||
{
|
||||
if (strcmp(name, object->object.entries[i].name) == 0)
|
||||
{
|
||||
object->object.entries[i].value = value;
|
||||
return;
|
||||
}
|
||||
}
|
||||
size_t const new_size = size + 1;
|
||||
void * new_entries = malloc(sizeof(object->object.entries[0]) * new_size);
|
||||
if (size > 0)
|
||||
{
|
||||
memcpy(new_entries, object->object.entries, size * sizeof(object->object.entries[0]));
|
||||
free(object->object.entries);
|
||||
}
|
||||
object->object.entries = new_entries;
|
||||
object->object.entries[size].name = name;
|
||||
object->object.entries[size].value = value;
|
||||
object->object.size = new_size;
|
||||
}
|
||||
|
||||
JSONValue * JSONArray_new(void)
|
||||
{
|
||||
JSONValue * jv = JSONValue_new(JSON_ARRAY);
|
||||
jv->array.size = 0u;
|
||||
return jv;
|
||||
}
|
||||
|
||||
void JSONArray_append(JSONValue * array, JSONValue * value)
|
||||
{
|
||||
size_t const size = array->array.size;
|
||||
size_t const new_size = size + 1;
|
||||
JSONValue ** new_entries = malloc(sizeof(JSONValue *) * new_size);
|
||||
if (array->array.size > 0)
|
||||
{
|
||||
memcpy(new_entries, array->array.entries, sizeof(JSONValue *) * size);
|
||||
free(array->array.entries);
|
||||
}
|
||||
array->array.entries = new_entries;
|
||||
array->array.entries[size] = value;
|
||||
array->array.size = new_size;
|
||||
}
|
||||
@ -1,46 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include "testutils.h"
|
||||
|
||||
#define JSON_OBJECT 0u
|
||||
#define JSON_ARRAY 1u
|
||||
#define JSON_NUMBER 2u
|
||||
#define JSON_STRING 3u
|
||||
#define JSON_TRUE 4u
|
||||
#define JSON_FALSE 5u
|
||||
#define JSON_NULL 6u
|
||||
|
||||
typedef struct JSONValue_s
|
||||
{
|
||||
size_t id;
|
||||
union
|
||||
{
|
||||
struct
|
||||
{
|
||||
size_t size;
|
||||
struct
|
||||
{
|
||||
char const * name;
|
||||
struct JSONValue_s * value;
|
||||
} * entries;
|
||||
} object;
|
||||
struct
|
||||
{
|
||||
size_t size;
|
||||
struct JSONValue_s ** entries;
|
||||
} array;
|
||||
double number;
|
||||
str_t string;
|
||||
};
|
||||
} JSONValue;
|
||||
|
||||
JSONValue * JSONValue_new(size_t id);
|
||||
|
||||
JSONValue * JSONObject_new(void);
|
||||
|
||||
void JSONObject_append(JSONValue * object, char const * name, JSONValue * value);
|
||||
|
||||
JSONValue * JSONArray_new(void);
|
||||
|
||||
void JSONArray_append(JSONValue * array, JSONValue * value);
|
||||
@ -5,6 +5,7 @@ class Propane
|
||||
# Comment line
|
||||
|
||||
module a.b;
|
||||
class Foobar;
|
||||
ptype XYZ * ;
|
||||
|
||||
token while;
|
||||
@ -29,51 +30,51 @@ B -> <<
|
||||
>>
|
||||
EOF
|
||||
grammar = Grammar.new(input)
|
||||
expect(grammar.classname).to eq "Foobar"
|
||||
expect(grammar.modulename).to eq "a.b"
|
||||
expect(grammar.ptype).to eq "XYZ *"
|
||||
expect(grammar.ptypes).to eq("default" => "XYZ *")
|
||||
expect(grammar.prefix).to eq "p_"
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "while"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 6
|
||||
expect(o.line_number).to eq 7
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "while"
|
||||
expect(o.line_number).to eq 6
|
||||
expect(o.line_number).to eq 7
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "id"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 9
|
||||
expect(o.line_number).to eq 10
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "[a-zA-Z_][a-zA-Z_0-9]*"
|
||||
expect(o.line_number).to eq 9
|
||||
expect(o.line_number).to eq 10
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "token_with_code"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 11
|
||||
expect(o.line_number).to eq 12
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.pattern).to eq "token_with_code"
|
||||
expect(o.line_number).to eq 11
|
||||
expect(o.line_number).to eq 12
|
||||
expect(o.code).to eq "Code for the token\n"
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "token_with_no_pattern"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 15
|
||||
expect(o.line_number).to eq 16
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "\\s+"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.line_number).to eq 17
|
||||
expect(o.line_number).to eq 18
|
||||
expect(o.token).to be_nil
|
||||
expect(o.code).to be_nil
|
||||
|
||||
@ -82,19 +83,19 @@ EOF
|
||||
o = grammar.rules[0]
|
||||
expect(o.name).to eq "A"
|
||||
expect(o.components).to eq %w[B]
|
||||
expect(o.line_number).to eq 19
|
||||
expect(o.line_number).to eq 20
|
||||
expect(o.code).to eq " a = 42;\n"
|
||||
|
||||
o = grammar.rules[1]
|
||||
expect(o.name).to eq "B"
|
||||
expect(o.components).to eq %w[C while id]
|
||||
expect(o.line_number).to eq 22
|
||||
expect(o.line_number).to eq 23
|
||||
expect(o.code).to be_nil
|
||||
|
||||
o = grammar.rules[2]
|
||||
expect(o.name).to eq "B"
|
||||
expect(o.components).to eq []
|
||||
expect(o.line_number).to eq 23
|
||||
expect(o.line_number).to eq 24
|
||||
expect(o.code).to eq " b = 0;\n"
|
||||
end
|
||||
|
||||
@ -110,11 +111,8 @@ token code2 <<
|
||||
>>
|
||||
|
||||
tokenid token_with_no_pattern;
|
||||
|
||||
prefix myparser_;
|
||||
EOF
|
||||
grammar = Grammar.new(input)
|
||||
expect(grammar.prefix).to eq "myparser_"
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "code1"}
|
||||
expect(o).to_not be_nil
|
||||
@ -151,30 +149,30 @@ EOF
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to be_empty
|
||||
expect(o.mode).to be_nil
|
||||
|
||||
o = grammar.tokens.find {|token| token.name == "b"}
|
||||
expect(o).to_not be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.token == o}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to eq Set["m1"]
|
||||
expect(o.mode).to eq "m1"
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "foo"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to be_empty
|
||||
expect(o.mode).to be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "bar"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to eq Set["m2"]
|
||||
expect(o.mode).to eq "m2"
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "q"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to be_empty
|
||||
expect(o.mode).to be_nil
|
||||
|
||||
o = grammar.patterns.find {|pattern| pattern.pattern == "r"}
|
||||
expect(o).to_not be_nil
|
||||
expect(o.modes).to eq Set["m3"]
|
||||
expect(o.mode).to eq "m3"
|
||||
end
|
||||
|
||||
it "allows assigning ptypes to tokens and rules" do
|
||||
|
||||
@ -126,74 +126,6 @@ EOF
|
||||
]
|
||||
expect(run(<<EOF, ";")).to eq expected
|
||||
token semicolon /;/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches a negated character class" do
|
||||
expected = [
|
||||
["pattern", "/abc/"],
|
||||
]
|
||||
expect(run(<<EOF, "/abc/")).to eq expected
|
||||
token pattern /\\/[^\\s]*\\//;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches special character classes " do
|
||||
expected = [
|
||||
["a", "abc123_FOO"],
|
||||
]
|
||||
expect(run(<<EOF, "abc123_FOO")).to eq expected
|
||||
token a /\\w+/;
|
||||
EOF
|
||||
expected = [
|
||||
["b", "FROG*%$#"],
|
||||
]
|
||||
expect(run(<<EOF, "FROG*%$#")).to eq expected
|
||||
token b /FROG\\D{1,4}/;
|
||||
EOF
|
||||
expected = [
|
||||
["c", "$883366"],
|
||||
]
|
||||
expect(run(<<EOF, "$883366")).to eq expected
|
||||
token c /$\\d+/;
|
||||
EOF
|
||||
expected = [
|
||||
["d", "^&$@"],
|
||||
]
|
||||
expect(run(<<EOF, "^&$@")).to eq expected
|
||||
token d /^\\W+/;
|
||||
EOF
|
||||
expected = [
|
||||
["a", "abc123_FOO"],
|
||||
[nil, " "],
|
||||
["b", "FROG*%$#"],
|
||||
[nil, " "],
|
||||
["c", "$883366"],
|
||||
[nil, " "],
|
||||
["d", "^&$@"],
|
||||
]
|
||||
expect(run(<<EOF, "abc123_FOO FROG*%$# $883366 ^&$@")).to eq expected
|
||||
token a /\\w+/;
|
||||
token b /FROG\\D{1,4}/;
|
||||
token c /$\\d+/;
|
||||
token d /^\\W+/;
|
||||
drop /\\s+/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "matches a negated character class with a nested inner negated character class" do
|
||||
expected = [
|
||||
["t", "$&*"],
|
||||
]
|
||||
expect(run(<<EOF, "$&*")).to eq expected
|
||||
token t /[^%\\W]+/;
|
||||
EOF
|
||||
end
|
||||
|
||||
it "\\s matches a newline" do
|
||||
expected = [["s", "\n"]]
|
||||
expect(run(<<EOF, "\n")).to eq expected
|
||||
token s /\\s/;
|
||||
EOF
|
||||
end
|
||||
end
|
||||
|
||||
@ -2,14 +2,14 @@ class Propane
|
||||
RSpec.describe Regex do
|
||||
|
||||
it "parses an empty expression" do
|
||||
regex = Regex.new("", 1)
|
||||
regex = Regex.new("")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0].size).to eq 0
|
||||
end
|
||||
|
||||
it "parses a single character unit expression" do
|
||||
regex = Regex.new("a", 1)
|
||||
regex = Regex.new("a")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -19,7 +19,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a group with a single character unit expression" do
|
||||
regex = Regex.new("(a)", 1)
|
||||
regex = Regex.new("(a)")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -33,7 +33,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a *" do
|
||||
regex = Regex.new("a*", 1)
|
||||
regex = Regex.new("a*")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -47,7 +47,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a +" do
|
||||
regex = Regex.new("a+", 1)
|
||||
regex = Regex.new("a+")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -61,7 +61,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a ?" do
|
||||
regex = Regex.new("a?", 1)
|
||||
regex = Regex.new("a?")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -75,7 +75,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a multiplicity count" do
|
||||
regex = Regex.new("a{5}", 1)
|
||||
regex = Regex.new("a{5}")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -89,7 +89,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a minimum-only multiplicity count" do
|
||||
regex = Regex.new("a{5,}", 1)
|
||||
regex = Regex.new("a{5,}")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -103,7 +103,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a minimum and maximum multiplicity count" do
|
||||
regex = Regex.new("a{5,8}", 1)
|
||||
regex = Regex.new("a{5,8}")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -118,7 +118,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped *" do
|
||||
regex = Regex.new("a\\*", 1)
|
||||
regex = Regex.new("a\\*")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -131,7 +131,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped +" do
|
||||
regex = Regex.new("a\\+", 1)
|
||||
regex = Regex.new("a\\+")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -144,7 +144,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses an escaped \\" do
|
||||
regex = Regex.new("\\\\d", 1)
|
||||
regex = Regex.new("\\\\d")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -157,7 +157,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a character class" do
|
||||
regex = Regex.new("[a-z_]", 1)
|
||||
regex = Regex.new("[a-z_]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -175,7 +175,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a negated character class" do
|
||||
regex = Regex.new("[^xyz]", 1)
|
||||
regex = Regex.new("[^xyz]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -189,25 +189,8 @@ class Propane
|
||||
expect(ccu[0].first).to eq "x".ord
|
||||
end
|
||||
|
||||
it "parses a negated character class with inner character classes" do
|
||||
regex = Regex.new("[^x\\sz]", 1)
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
seq_unit = regex.unit.alternates[0]
|
||||
expect(seq_unit.size).to eq 1
|
||||
expect(seq_unit[0]).to be_a Regex::CharacterClassUnit
|
||||
ccu = seq_unit[0]
|
||||
expect(ccu.negate).to be_truthy
|
||||
expect(ccu.size).to eq 8
|
||||
expect(ccu[0]).to be_a Regex::CharacterRangeUnit
|
||||
expect(ccu[0].first).to eq "x".ord
|
||||
expect(ccu[1].first).to eq " ".ord
|
||||
expect(ccu[7].first).to eq "z".ord
|
||||
end
|
||||
|
||||
it "parses - as a plain character at beginning of a character class" do
|
||||
regex = Regex.new("[-9]", 1)
|
||||
regex = Regex.new("[-9]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -221,7 +204,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character at end of a character class" do
|
||||
regex = Regex.new("[0-]", 1)
|
||||
regex = Regex.new("[0-]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -237,7 +220,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character at beginning of a negated character class" do
|
||||
regex = Regex.new("[^-9]", 1)
|
||||
regex = Regex.new("[^-9]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -252,7 +235,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses . as a plain character in a character class" do
|
||||
regex = Regex.new("[.]", 1)
|
||||
regex = Regex.new("[.]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -267,7 +250,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses - as a plain character when escaped in middle of character class" do
|
||||
regex = Regex.new("[0\\-9]", 1)
|
||||
regex = Regex.new("[0\\-9]")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -286,7 +269,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses alternates" do
|
||||
regex = Regex.new("ab|c", 1)
|
||||
regex = Regex.new("ab|c")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 2
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -296,7 +279,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses a ." do
|
||||
regex = Regex.new("a.b", 1)
|
||||
regex = Regex.new("a.b")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 1
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
@ -307,7 +290,7 @@ class Propane
|
||||
end
|
||||
|
||||
it "parses something complex" do
|
||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+", 1)
|
||||
regex = Regex.new("(a|)*|[^^]|\\|v|[x-y]+")
|
||||
expect(regex.unit).to be_a Regex::AlternatesUnit
|
||||
expect(regex.unit.alternates.size).to eq 4
|
||||
expect(regex.unit.alternates[0]).to be_a Regex::SequenceUnit
|
||||
|
||||
1399
spec/propane_spec.rb
1399
spec/propane_spec.rb
File diff suppressed because it is too large
Load Diff
@ -1,27 +1,11 @@
|
||||
unless ENV["dist_specs"]
|
||||
require "bundler/setup"
|
||||
require "simplecov"
|
||||
require "bundler/setup"
|
||||
require "propane"
|
||||
|
||||
SimpleCov.start do
|
||||
add_filter "/spec/"
|
||||
add_filter "/.bundle/"
|
||||
if ENV["partial_specs"]
|
||||
command_name "RSpec-partial"
|
||||
else
|
||||
command_name "RSpec"
|
||||
end
|
||||
project_name "Propane"
|
||||
merge_timeout 3600
|
||||
end
|
||||
RSpec.configure do |config|
|
||||
# Enable flags like --only-failures and --next-failure
|
||||
config.example_status_persistence_file_path = ".rspec_status"
|
||||
|
||||
RSpec.configure do |config|
|
||||
# Enable flags like --only-failures and --next-failure
|
||||
config.example_status_persistence_file_path = ".rspec_status"
|
||||
|
||||
config.expect_with :rspec do |c|
|
||||
c.syntax = :expect
|
||||
end
|
||||
config.expect_with :rspec do |c|
|
||||
c.syntax = :expect
|
||||
end
|
||||
end
|
||||
|
||||
require "propane"
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
assert(start->pItems1 != NULL);
|
||||
assert(start->pItems != NULL);
|
||||
Items * items = start->pItems;
|
||||
assert(items->pItem != NULL);
|
||||
assert(items->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
||||
assert(items->pItemsMore != NULL);
|
||||
ItemsMore * itemsmore = items->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore != NULL);
|
||||
itemsmore = itemsmore->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore == NULL);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems == NULL);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems != NULL);
|
||||
assert(start->pItems->pItem != NULL);
|
||||
assert(start->pItems->pItem->pDual != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,57 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
Start * start = p_result(&context);
|
||||
assert(start.pItems1 !is null);
|
||||
assert(start.pItems !is null);
|
||||
Items * items = start.pItems;
|
||||
assert(items.pItem !is null);
|
||||
assert(items.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
||||
assert(items.pItemsMore !is null);
|
||||
ItemsMore * itemsmore = items.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore !is null);
|
||||
itemsmore = itemsmore.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore is null);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems is null);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems !is null);
|
||||
assert(start.pItems.pItem !is null);
|
||||
assert(start.pItems.pItem.pDual !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
||||
}
|
||||
@ -1,19 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(TOKEN_a, start->first->pToken->token);
|
||||
assert_eq(TOKEN_b, start->second->pToken->token);
|
||||
assert_eq(TOKEN_c, start->third->pToken->token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,21 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\nb\nc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(TOKEN_a, start.first.pToken.token);
|
||||
assert_eq(TOKEN_b, start.second.pToken.token);
|
||||
assert_eq(TOKEN_c, start.third.pToken.token);
|
||||
}
|
||||
@ -1,102 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(2, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(2, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(3, start->pT1->pA->position.row);
|
||||
assert_eq(3, start->pT1->pA->position.col);
|
||||
assert_eq(3, start->pT1->pA->end_position.row);
|
||||
assert_eq(8, start->pT1->pA->end_position.col);
|
||||
assert_eq(2, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(3, start->pT1->end_position.row);
|
||||
assert_eq(8, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(2, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(3, start->end_position.row);
|
||||
assert_eq(8, start->end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(1, start->pT1->pA->position.col);
|
||||
assert_eq(2, start->pT1->pA->end_position.row);
|
||||
assert_eq(2, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(2, start->pT1->end_position.row);
|
||||
assert_eq(2, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(2, start->end_position.row);
|
||||
assert_eq(2, start->end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(2, start->pT1->pA->position.row);
|
||||
assert_eq(1, start->pT1->pA->position.col);
|
||||
assert_eq(3, start->pT1->pA->end_position.row);
|
||||
assert_eq(1, start->pT1->pA->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(3, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(3, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert(!p_position_valid(start->pT1->pA->position));
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(1, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(1, start->end_position.row);
|
||||
assert_eq(1, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,104 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\na\n bb ccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(2, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(2, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(3, start.pT1.pA.position.row);
|
||||
assert_eq(3, start.pT1.pA.position.col);
|
||||
assert_eq(3, start.pT1.pA.end_position.row);
|
||||
assert_eq(8, start.pT1.pA.end_position.col);
|
||||
assert_eq(2, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(3, start.pT1.end_position.row);
|
||||
assert_eq(8, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(2, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(3, start.end_position.row);
|
||||
assert_eq(8, start.end_position.col);
|
||||
|
||||
input = "a\nbb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(1, start.pT1.pA.position.col);
|
||||
assert_eq(2, start.pT1.pA.end_position.row);
|
||||
assert_eq(2, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(2, start.pT1.end_position.row);
|
||||
assert_eq(2, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(2, start.end_position.row);
|
||||
assert_eq(2, start.end_position.col);
|
||||
|
||||
input = "a\nc\nc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(start.pT1.pA.position.valid);
|
||||
assert_eq(2, start.pT1.pA.position.row);
|
||||
assert_eq(1, start.pT1.pA.position.col);
|
||||
assert_eq(3, start.pT1.pA.end_position.row);
|
||||
assert_eq(1, start.pT1.pA.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(3, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(3, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
|
||||
input = "a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert(!start.pT1.pA.position.valid);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(1, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(1, start.end_position.row);
|
||||
assert_eq(1, start.end_position.col);
|
||||
}
|
||||
@ -1,415 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main(int argc, char * argv[])
|
||||
{
|
||||
const char * input =
|
||||
"# 0\n"
|
||||
"def byte_val() -> byte\n"
|
||||
"{\n"
|
||||
" return 0x42;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 1\n"
|
||||
"def short_val() -> short\n"
|
||||
"{\n"
|
||||
" return 0x4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 2\n"
|
||||
"def int_val() -> int\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 3\n"
|
||||
"def long_val() -> long\n"
|
||||
"{\n"
|
||||
" return 0x4242_4242_4242_4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 4\n"
|
||||
"def ssize_t_val() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 5\n"
|
||||
"def byte_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 6\n"
|
||||
"def byte_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 7\n"
|
||||
"def byte_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 8\n"
|
||||
"def byte_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return byte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 9\n"
|
||||
"def short_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 10\n"
|
||||
"def short_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 11\n"
|
||||
"def short_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 12\n"
|
||||
"def short_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return short_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 13\n"
|
||||
"def int_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 14\n"
|
||||
"def int_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 15\n"
|
||||
"def int_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 16\n"
|
||||
"def int_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 17\n"
|
||||
"def long_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 18\n"
|
||||
"def long_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 19\n"
|
||||
"def long_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 20\n"
|
||||
"def long_to_ssize_t() -> ssize_t\n"
|
||||
"{\n"
|
||||
" return long_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 21\n"
|
||||
"def ssize_t_to_byte() -> byte\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 22\n"
|
||||
"def ssize_t_to_short() -> short\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 23\n"
|
||||
"def ssize_t_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 24\n"
|
||||
"def ssize_t_to_long() -> long\n"
|
||||
"{\n"
|
||||
" return ssize_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 25\n"
|
||||
"def ubyte_val() -> ubyte\n"
|
||||
"{\n"
|
||||
" return 0x42;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 26\n"
|
||||
"def ushort_val() -> ushort\n"
|
||||
"{\n"
|
||||
" return 0x4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 27\n"
|
||||
"def uint_val() -> uint\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 28\n"
|
||||
"def ulong_val() -> ulong\n"
|
||||
"{\n"
|
||||
" return 0x4242_4242_4242_4242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 29\n"
|
||||
"def size_t_val() -> size_t\n"
|
||||
"{\n"
|
||||
" return 0x42424242;\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 30\n"
|
||||
"def ubyte_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 31\n"
|
||||
"def ubyte_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 32\n"
|
||||
"def ubyte_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 33\n"
|
||||
"def ubyte_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ubyte_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 34\n"
|
||||
"def ushort_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 35\n"
|
||||
"def ushort_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 36\n"
|
||||
"def ushort_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 37\n"
|
||||
"def ushort_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ushort_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 38\n"
|
||||
"def uint_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 39\n"
|
||||
"def uint_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 40\n"
|
||||
"def uint_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 41\n"
|
||||
"def uint_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return uint_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 42\n"
|
||||
"def ulong_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 43\n"
|
||||
"def ulong_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 44\n"
|
||||
"def ulong_to_uint() -> uint\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 45\n"
|
||||
"def ulong_to_size_t() -> size_t\n"
|
||||
"{\n"
|
||||
" return ulong_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 46\n"
|
||||
"def size_t_to_ubyte() -> ubyte\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 47\n"
|
||||
"def size_t_to_ushort() -> ushort\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 48\n"
|
||||
"def size_t_to_int() -> int\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 49\n"
|
||||
"def size_t_to_ulong() -> ulong\n"
|
||||
"{\n"
|
||||
" return size_t_val();\n"
|
||||
"}\n"
|
||||
"\n"
|
||||
"# 50\n"
|
||||
"def main() -> int\n"
|
||||
"{\n"
|
||||
" return int_val();\n"
|
||||
"}\n";
|
||||
struct
|
||||
{
|
||||
const char * name;
|
||||
p_token_t token;
|
||||
} expected[] = {
|
||||
{"byte_val", TOKEN_byte},
|
||||
{"short_val", TOKEN_short},
|
||||
{"int_val", TOKEN_int},
|
||||
{"long_val", TOKEN_long},
|
||||
{"ssize_t_val", TOKEN_ssize_t},
|
||||
{"byte_to_short", TOKEN_short},
|
||||
{"byte_to_int", TOKEN_int},
|
||||
{"byte_to_long", TOKEN_long},
|
||||
{"byte_to_ssize_t", TOKEN_ssize_t},
|
||||
{"short_to_byte", TOKEN_byte},
|
||||
{"short_to_int", TOKEN_int},
|
||||
{"short_to_long", TOKEN_long},
|
||||
{"short_to_ssize_t", TOKEN_ssize_t},
|
||||
{"int_to_byte", TOKEN_byte},
|
||||
{"int_to_short", TOKEN_short},
|
||||
{"int_to_long", TOKEN_long},
|
||||
{"int_to_ssize_t", TOKEN_ssize_t},
|
||||
{"long_to_byte", TOKEN_byte},
|
||||
{"long_to_short", TOKEN_short},
|
||||
{"long_to_int", TOKEN_int},
|
||||
{"long_to_ssize_t", TOKEN_ssize_t},
|
||||
{"ssize_t_to_byte", TOKEN_byte},
|
||||
{"ssize_t_to_short", TOKEN_short},
|
||||
{"ssize_t_to_int", TOKEN_int},
|
||||
{"ssize_t_to_long", TOKEN_long},
|
||||
{"ubyte_val", TOKEN_ubyte},
|
||||
{"ushort_val", TOKEN_ushort},
|
||||
{"uint_val", TOKEN_uint},
|
||||
{"ulong_val", TOKEN_ulong},
|
||||
{"size_t_val", TOKEN_size_t},
|
||||
{"ubyte_to_ushort", TOKEN_ushort},
|
||||
{"ubyte_to_uint", TOKEN_uint},
|
||||
{"ubyte_to_ulong", TOKEN_ulong},
|
||||
{"ubyte_to_size_t", TOKEN_size_t},
|
||||
{"ushort_to_ubyte", TOKEN_ubyte},
|
||||
{"ushort_to_uint", TOKEN_uint},
|
||||
{"ushort_to_ulong", TOKEN_ulong},
|
||||
{"ushort_to_size_t", TOKEN_size_t},
|
||||
{"uint_to_ubyte", TOKEN_ubyte},
|
||||
{"uint_to_ushort", TOKEN_ushort},
|
||||
{"uint_to_ulong", TOKEN_ulong},
|
||||
{"uint_to_size_t", TOKEN_size_t},
|
||||
{"ulong_to_ubyte", TOKEN_ubyte},
|
||||
{"ulong_to_ushort", TOKEN_ushort},
|
||||
{"ulong_to_uint", TOKEN_uint},
|
||||
{"ulong_to_size_t", TOKEN_size_t},
|
||||
{"size_t_to_ubyte", TOKEN_ubyte},
|
||||
{"size_t_to_ushort", TOKEN_ushort},
|
||||
{"size_t_to_int", TOKEN_int},
|
||||
{"size_t_to_ulong", TOKEN_ulong},
|
||||
{"main", TOKEN_int},
|
||||
};
|
||||
p_context_t context;
|
||||
p_context_init(&context, (const uint8_t *)input, strlen(input));
|
||||
size_t result = p_parse(&context);
|
||||
assert_eq(P_SUCCESS, result);
|
||||
PModule * pmod = p_result(&context);
|
||||
PModuleItems * pmis = pmod->pModuleItems;
|
||||
PFunctionDefinition ** pfds;
|
||||
size_t n_pfds = 0u;
|
||||
while (pmis != NULL)
|
||||
{
|
||||
PModuleItem * pmi = pmis->pModuleItem;
|
||||
if (pmi->pFunctionDefinition != NULL)
|
||||
{
|
||||
n_pfds++;
|
||||
}
|
||||
pmis = pmis->pModuleItems;
|
||||
}
|
||||
pfds = malloc(n_pfds * sizeof(PModuleItems *));
|
||||
pmis = pmod->pModuleItems;
|
||||
size_t pfd_i = n_pfds;
|
||||
while (pmis != NULL)
|
||||
{
|
||||
PModuleItem * pmi = pmis->pModuleItem;
|
||||
PFunctionDefinition * pfd = pmi->pFunctionDefinition;
|
||||
if (pfd != NULL)
|
||||
{
|
||||
pfd_i--;
|
||||
assert(pfd_i < n_pfds);
|
||||
pfds[pfd_i] = pfd;
|
||||
}
|
||||
pmis = pmis->pModuleItems;
|
||||
}
|
||||
assert_eq(51, n_pfds);
|
||||
for (size_t i = 0; i < n_pfds; i++)
|
||||
{
|
||||
if (strncmp(expected[i].name, (const char *)pfds[i]->name->pvalue.s, strlen(expected[i].name)) != 0 ||
|
||||
(expected[i].token != pfds[i]->returntype->pType->pTypeBase->pToken1->token))
|
||||
{
|
||||
fprintf(stderr, "Index %lu: expected %s/%u, got %u\n", i, expected[i].name, expected[i].token, pfds[i]->returntype->pType->pTypeBase->pToken1->token);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,408 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "
|
||||
# 0
|
||||
def byte_val() -> byte
|
||||
{
|
||||
return 0x42;
|
||||
}
|
||||
|
||||
# 1
|
||||
def short_val() -> short
|
||||
{
|
||||
return 0x4242;
|
||||
}
|
||||
|
||||
# 2
|
||||
def int_val() -> int
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 3
|
||||
def long_val() -> long
|
||||
{
|
||||
return 0x4242_4242_4242_4242;
|
||||
}
|
||||
|
||||
# 4
|
||||
def ssize_t_val() -> ssize_t
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 5
|
||||
def byte_to_short() -> short
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 6
|
||||
def byte_to_int() -> int
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 7
|
||||
def byte_to_long() -> long
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 8
|
||||
def byte_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return byte_val();
|
||||
}
|
||||
|
||||
# 9
|
||||
def short_to_byte() -> byte
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 10
|
||||
def short_to_int() -> int
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 11
|
||||
def short_to_long() -> long
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 12
|
||||
def short_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return short_val();
|
||||
}
|
||||
|
||||
# 13
|
||||
def int_to_byte() -> byte
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 14
|
||||
def int_to_short() -> short
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 15
|
||||
def int_to_long() -> long
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 16
|
||||
def int_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
|
||||
# 17
|
||||
def long_to_byte() -> byte
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 18
|
||||
def long_to_short() -> short
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 19
|
||||
def long_to_int() -> int
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 20
|
||||
def long_to_ssize_t() -> ssize_t
|
||||
{
|
||||
return long_val();
|
||||
}
|
||||
|
||||
# 21
|
||||
def ssize_t_to_byte() -> byte
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 22
|
||||
def ssize_t_to_short() -> short
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 23
|
||||
def ssize_t_to_int() -> int
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 24
|
||||
def ssize_t_to_long() -> long
|
||||
{
|
||||
return ssize_t_val();
|
||||
}
|
||||
|
||||
# 25
|
||||
def ubyte_val() -> ubyte
|
||||
{
|
||||
return 0x42;
|
||||
}
|
||||
|
||||
# 26
|
||||
def ushort_val() -> ushort
|
||||
{
|
||||
return 0x4242;
|
||||
}
|
||||
|
||||
# 27
|
||||
def uint_val() -> uint
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 28
|
||||
def ulong_val() -> ulong
|
||||
{
|
||||
return 0x4242_4242_4242_4242;
|
||||
}
|
||||
|
||||
# 29
|
||||
def size_t_val() -> size_t
|
||||
{
|
||||
return 0x42424242;
|
||||
}
|
||||
|
||||
# 30
|
||||
def ubyte_to_ushort() -> ushort
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 31
|
||||
def ubyte_to_uint() -> uint
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 32
|
||||
def ubyte_to_ulong() -> ulong
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 33
|
||||
def ubyte_to_size_t() -> size_t
|
||||
{
|
||||
return ubyte_val();
|
||||
}
|
||||
|
||||
# 34
|
||||
def ushort_to_ubyte() -> ubyte
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 35
|
||||
def ushort_to_uint() -> uint
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 36
|
||||
def ushort_to_ulong() -> ulong
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 37
|
||||
def ushort_to_size_t() -> size_t
|
||||
{
|
||||
return ushort_val();
|
||||
}
|
||||
|
||||
# 38
|
||||
def uint_to_ubyte() -> ubyte
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 39
|
||||
def uint_to_ushort() -> ushort
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 40
|
||||
def uint_to_ulong() -> ulong
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 41
|
||||
def uint_to_size_t() -> size_t
|
||||
{
|
||||
return uint_val();
|
||||
}
|
||||
|
||||
# 42
|
||||
def ulong_to_ubyte() -> ubyte
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 43
|
||||
def ulong_to_ushort() -> ushort
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 44
|
||||
def ulong_to_uint() -> uint
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 45
|
||||
def ulong_to_size_t() -> size_t
|
||||
{
|
||||
return ulong_val();
|
||||
}
|
||||
|
||||
# 46
|
||||
def size_t_to_ubyte() -> ubyte
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 47
|
||||
def size_t_to_ushort() -> ushort
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 48
|
||||
def size_t_to_int() -> int
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 49
|
||||
def size_t_to_ulong() -> ulong
|
||||
{
|
||||
return size_t_val();
|
||||
}
|
||||
|
||||
# 50
|
||||
def main() -> int
|
||||
{
|
||||
return int_val();
|
||||
}
|
||||
";
|
||||
struct Expected
|
||||
{
|
||||
string name;
|
||||
p_token_t token;
|
||||
}
|
||||
Expected[] expected = [
|
||||
Expected("byte_val", TOKEN_byte),
|
||||
Expected("short_val", TOKEN_short),
|
||||
Expected("int_val", TOKEN_int),
|
||||
Expected("long_val", TOKEN_long),
|
||||
Expected("ssize_t_val", TOKEN_ssize_t),
|
||||
Expected("byte_to_short", TOKEN_short),
|
||||
Expected("byte_to_int", TOKEN_int),
|
||||
Expected("byte_to_long", TOKEN_long),
|
||||
Expected("byte_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("short_to_byte", TOKEN_byte),
|
||||
Expected("short_to_int", TOKEN_int),
|
||||
Expected("short_to_long", TOKEN_long),
|
||||
Expected("short_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("int_to_byte", TOKEN_byte),
|
||||
Expected("int_to_short", TOKEN_short),
|
||||
Expected("int_to_long", TOKEN_long),
|
||||
Expected("int_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("long_to_byte", TOKEN_byte),
|
||||
Expected("long_to_short", TOKEN_short),
|
||||
Expected("long_to_int", TOKEN_int),
|
||||
Expected("long_to_ssize_t", TOKEN_ssize_t),
|
||||
Expected("ssize_t_to_byte", TOKEN_byte),
|
||||
Expected("ssize_t_to_short", TOKEN_short),
|
||||
Expected("ssize_t_to_int", TOKEN_int),
|
||||
Expected("ssize_t_to_long", TOKEN_long),
|
||||
Expected("ubyte_val", TOKEN_ubyte),
|
||||
Expected("ushort_val", TOKEN_ushort),
|
||||
Expected("uint_val", TOKEN_uint),
|
||||
Expected("ulong_val", TOKEN_ulong),
|
||||
Expected("size_t_val", TOKEN_size_t),
|
||||
Expected("ubyte_to_ushort", TOKEN_ushort),
|
||||
Expected("ubyte_to_uint", TOKEN_uint),
|
||||
Expected("ubyte_to_ulong", TOKEN_ulong),
|
||||
Expected("ubyte_to_size_t", TOKEN_size_t),
|
||||
Expected("ushort_to_ubyte", TOKEN_ubyte),
|
||||
Expected("ushort_to_uint", TOKEN_uint),
|
||||
Expected("ushort_to_ulong", TOKEN_ulong),
|
||||
Expected("ushort_to_size_t", TOKEN_size_t),
|
||||
Expected("uint_to_ubyte", TOKEN_ubyte),
|
||||
Expected("uint_to_ushort", TOKEN_ushort),
|
||||
Expected("uint_to_ulong", TOKEN_ulong),
|
||||
Expected("uint_to_size_t", TOKEN_size_t),
|
||||
Expected("ulong_to_ubyte", TOKEN_ubyte),
|
||||
Expected("ulong_to_ushort", TOKEN_ushort),
|
||||
Expected("ulong_to_uint", TOKEN_uint),
|
||||
Expected("ulong_to_size_t", TOKEN_size_t),
|
||||
Expected("size_t_to_ubyte", TOKEN_ubyte),
|
||||
Expected("size_t_to_ushort", TOKEN_ushort),
|
||||
Expected("size_t_to_int", TOKEN_int),
|
||||
Expected("size_t_to_ulong", TOKEN_ulong),
|
||||
Expected("main", TOKEN_int),
|
||||
];
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
size_t result = p_parse(&context);
|
||||
assert_eq(P_SUCCESS, result);
|
||||
PModule * pmod = p_result(&context);
|
||||
PModuleItems * pmis = pmod.pModuleItems;
|
||||
PFunctionDefinition *[] pfds;
|
||||
while (pmis !is null)
|
||||
{
|
||||
PModuleItem * pmi = pmis.pModuleItem;
|
||||
if (pmi is null)
|
||||
{
|
||||
stderr.writeln("pmi is null!!!?");
|
||||
assert(0);
|
||||
}
|
||||
PFunctionDefinition * pfd = pmi.pFunctionDefinition;
|
||||
if (pfd !is null)
|
||||
{
|
||||
pfds = [pfd] ~ pfds;
|
||||
}
|
||||
pmis = pmis.pModuleItems;
|
||||
}
|
||||
assert_eq(51, pfds.length);
|
||||
for (size_t i = 0; i < pfds.length; i++)
|
||||
{
|
||||
if ((expected[i].name != pfds[i].name.pvalue.s) ||
|
||||
(expected[i].token != pfds[i].returntype.pType.pTypeBase.pToken1.token))
|
||||
{
|
||||
stderr.writeln("Index ", i, ": expected ", expected[i].name, "/", expected[i].token, ", got ", pfds[i].name.pvalue.s, "/", pfds[i].returntype.pType.pTypeBase.pToken1.token);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
PStartS * start = p_result(&context);
|
||||
assert(start->pItems1 != NULL);
|
||||
assert(start->pItems != NULL);
|
||||
PItemsS * items = start->pItems;
|
||||
assert(items->pItem != NULL);
|
||||
assert(items->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, items->pItem->pToken1->token);
|
||||
assert_eq(11, items->pItem->pToken1->pvalue);
|
||||
assert(items->pItemsMore != NULL);
|
||||
PItemsMoreS * itemsmore = items->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem != NULL);
|
||||
assert(itemsmore->pItem->pItem->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pItem->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pItem->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore != NULL);
|
||||
itemsmore = itemsmore->pItemsMore;
|
||||
assert(itemsmore->pItem != NULL);
|
||||
assert(itemsmore->pItem->pToken1 != NULL);
|
||||
assert_eq(TOKEN_b, itemsmore->pItem->pToken1->token);
|
||||
assert_eq(22, itemsmore->pItem->pToken1->pvalue);
|
||||
assert(itemsmore->pItemsMore == NULL);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems == NULL);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start->pItems != NULL);
|
||||
assert(start->pItems->pItem != NULL);
|
||||
assert(start->pItems->pItem->pDual != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo1 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne2 != NULL);
|
||||
assert(start->pItems->pItem->pDual->pTwo2 == NULL);
|
||||
assert(start->pItems->pItem->pDual->pOne1 == NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,57 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a, ((b)), b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
PStartS * start = p_result(&context);
|
||||
assert(start.pItems1 !is null);
|
||||
assert(start.pItems !is null);
|
||||
PItemsS * items = start.pItems;
|
||||
assert(items.pItem !is null);
|
||||
assert(items.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_a, items.pItem.pToken1.token);
|
||||
assert_eq(11, items.pItem.pToken1.pvalue);
|
||||
assert(items.pItemsMore !is null);
|
||||
PItemsMoreS * itemsmore = items.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem !is null);
|
||||
assert(itemsmore.pItem.pItem.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pItem.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pItem.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore !is null);
|
||||
itemsmore = itemsmore.pItemsMore;
|
||||
assert(itemsmore.pItem !is null);
|
||||
assert(itemsmore.pItem.pToken1 !is null);
|
||||
assert_eq(TOKEN_b, itemsmore.pItem.pToken1.token);
|
||||
assert_eq(22, itemsmore.pItem.pToken1.pvalue);
|
||||
assert(itemsmore.pItemsMore is null);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems is null);
|
||||
|
||||
input = "2 1";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
start = p_result(&context);
|
||||
assert(start.pItems !is null);
|
||||
assert(start.pItems.pItem !is null);
|
||||
assert(start.pItems.pItem.pDual !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo1 !is null);
|
||||
assert(start.pItems.pItem.pDual.pOne2 !is null);
|
||||
assert(start.pItems.pItem.pDual.pTwo2 is null);
|
||||
assert(start.pItems.pItem.pDual.pOne1 is null);
|
||||
}
|
||||
@ -1,84 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start->pT1->pToken->position.row);
|
||||
assert_eq(1, start->pT1->pToken->position.col);
|
||||
assert_eq(1, start->pT1->pToken->end_position.row);
|
||||
assert_eq(1, start->pT1->pToken->end_position.col);
|
||||
assert_eq(1, start->pT1->position.row);
|
||||
assert_eq(1, start->pT1->position.col);
|
||||
assert_eq(1, start->pT1->end_position.row);
|
||||
assert_eq(1, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(1, start->pT2->pToken->position.row);
|
||||
assert_eq(2, start->pT2->pToken->position.col);
|
||||
assert_eq(1, start->pT2->pToken->end_position.row);
|
||||
assert_eq(3, start->pT2->pToken->end_position.col);
|
||||
assert_eq(1, start->pT2->position.row);
|
||||
assert_eq(2, start->pT2->position.col);
|
||||
assert_eq(1, start->pT2->end_position.row);
|
||||
assert_eq(3, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(1, start->pT3->pToken->position.row);
|
||||
assert_eq(4, start->pT3->pToken->position.col);
|
||||
assert_eq(1, start->pT3->pToken->end_position.row);
|
||||
assert_eq(6, start->pT3->pToken->end_position.col);
|
||||
assert_eq(1, start->pT3->position.row);
|
||||
assert_eq(4, start->pT3->position.col);
|
||||
assert_eq(1, start->pT3->end_position.row);
|
||||
assert_eq(6, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(1, start->position.row);
|
||||
assert_eq(1, start->position.col);
|
||||
assert_eq(1, start->end_position.row);
|
||||
assert_eq(6, start->end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(3, start->pT1->pToken->position.row);
|
||||
assert_eq(3, start->pT1->pToken->position.col);
|
||||
assert_eq(3, start->pT1->pToken->end_position.row);
|
||||
assert_eq(4, start->pT1->pToken->end_position.col);
|
||||
assert_eq(3, start->pT1->position.row);
|
||||
assert_eq(3, start->pT1->position.col);
|
||||
assert_eq(3, start->pT1->end_position.row);
|
||||
assert_eq(4, start->pT1->end_position.col);
|
||||
|
||||
assert_eq(4, start->pT2->pToken->position.row);
|
||||
assert_eq(1, start->pT2->pToken->position.col);
|
||||
assert_eq(5, start->pT2->pToken->end_position.row);
|
||||
assert_eq(2, start->pT2->pToken->end_position.col);
|
||||
assert_eq(4, start->pT2->position.row);
|
||||
assert_eq(1, start->pT2->position.col);
|
||||
assert_eq(5, start->pT2->end_position.row);
|
||||
assert_eq(2, start->pT2->end_position.col);
|
||||
|
||||
assert_eq(7, start->pT3->pToken->position.row);
|
||||
assert_eq(6, start->pT3->pToken->position.col);
|
||||
assert_eq(7, start->pT3->pToken->end_position.row);
|
||||
assert_eq(6, start->pT3->pToken->end_position.col);
|
||||
assert_eq(7, start->pT3->position.row);
|
||||
assert_eq(6, start->pT3->position.col);
|
||||
assert_eq(7, start->pT3->end_position.row);
|
||||
assert_eq(6, start->pT3->end_position.col);
|
||||
|
||||
assert_eq(3, start->position.row);
|
||||
assert_eq(3, start->position.col);
|
||||
assert_eq(7, start->end_position.row);
|
||||
assert_eq(6, start->end_position.col);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,86 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "abbccc";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
|
||||
assert_eq(1, start.pT1.pToken.position.row);
|
||||
assert_eq(1, start.pT1.pToken.position.col);
|
||||
assert_eq(1, start.pT1.pToken.end_position.row);
|
||||
assert_eq(1, start.pT1.pToken.end_position.col);
|
||||
assert_eq(1, start.pT1.position.row);
|
||||
assert_eq(1, start.pT1.position.col);
|
||||
assert_eq(1, start.pT1.end_position.row);
|
||||
assert_eq(1, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(1, start.pT2.pToken.position.row);
|
||||
assert_eq(2, start.pT2.pToken.position.col);
|
||||
assert_eq(1, start.pT2.pToken.end_position.row);
|
||||
assert_eq(3, start.pT2.pToken.end_position.col);
|
||||
assert_eq(1, start.pT2.position.row);
|
||||
assert_eq(2, start.pT2.position.col);
|
||||
assert_eq(1, start.pT2.end_position.row);
|
||||
assert_eq(3, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(1, start.pT3.pToken.position.row);
|
||||
assert_eq(4, start.pT3.pToken.position.col);
|
||||
assert_eq(1, start.pT3.pToken.end_position.row);
|
||||
assert_eq(6, start.pT3.pToken.end_position.col);
|
||||
assert_eq(1, start.pT3.position.row);
|
||||
assert_eq(4, start.pT3.position.col);
|
||||
assert_eq(1, start.pT3.end_position.row);
|
||||
assert_eq(6, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(1, start.position.row);
|
||||
assert_eq(1, start.position.col);
|
||||
assert_eq(1, start.end_position.row);
|
||||
assert_eq(6, start.end_position.col);
|
||||
|
||||
input = "\n\n bb\nc\ncc\n\n a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
|
||||
assert_eq(3, start.pT1.pToken.position.row);
|
||||
assert_eq(3, start.pT1.pToken.position.col);
|
||||
assert_eq(3, start.pT1.pToken.end_position.row);
|
||||
assert_eq(4, start.pT1.pToken.end_position.col);
|
||||
assert_eq(3, start.pT1.position.row);
|
||||
assert_eq(3, start.pT1.position.col);
|
||||
assert_eq(3, start.pT1.end_position.row);
|
||||
assert_eq(4, start.pT1.end_position.col);
|
||||
|
||||
assert_eq(4, start.pT2.pToken.position.row);
|
||||
assert_eq(1, start.pT2.pToken.position.col);
|
||||
assert_eq(5, start.pT2.pToken.end_position.row);
|
||||
assert_eq(2, start.pT2.pToken.end_position.col);
|
||||
assert_eq(4, start.pT2.position.row);
|
||||
assert_eq(1, start.pT2.position.col);
|
||||
assert_eq(5, start.pT2.end_position.row);
|
||||
assert_eq(2, start.pT2.end_position.col);
|
||||
|
||||
assert_eq(7, start.pT3.pToken.position.row);
|
||||
assert_eq(6, start.pT3.pToken.position.col);
|
||||
assert_eq(7, start.pT3.pToken.end_position.row);
|
||||
assert_eq(6, start.pT3.pToken.end_position.col);
|
||||
assert_eq(7, start.pT3.position.row);
|
||||
assert_eq(6, start.pT3.position.col);
|
||||
assert_eq(7, start.pT3.end_position.row);
|
||||
assert_eq(6, start.pT3.end_position.col);
|
||||
|
||||
assert_eq(3, start.position.row);
|
||||
assert_eq(3, start.position.col);
|
||||
assert_eq(7, start.end_position.row);
|
||||
assert_eq(6, start.end_position.col);
|
||||
}
|
||||
@ -1,29 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include "testutils.h"
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "1 + 2 * 3 + 4";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(11, p_result(&context));
|
||||
|
||||
input = "1 * 2 ** 4 * 3";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(48, p_result(&context));
|
||||
|
||||
input = "(1 + 2) * 3 + 4";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(13, p_result(&context));
|
||||
|
||||
input = "(2 * 2) ** 3 + 4 + 5";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(73, p_result(&context));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,32 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "1 + 2 * 3 + 4";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(11, p_result(&context));
|
||||
|
||||
input = "1 * 2 ** 4 * 3";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(48, p_result(&context));
|
||||
|
||||
input = "(1 + 2) * 3 + 4";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(13, p_result(&context));
|
||||
|
||||
input = "(2 * 2) ** 3 + 4 + 5";
|
||||
p_context_init(&context, input);
|
||||
assert_eq(P_SUCCESS, p_parse(&context));
|
||||
assert_eq(73, p_result(&context));
|
||||
}
|
||||
52
spec/test_d_lexer.d
Normal file
52
spec/test_d_lexer.d
Normal file
@ -0,0 +1,52 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
alias Result = Testparser.Decoder.Result;
|
||||
Result result;
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("5");
|
||||
assert(result == Result.success('5', 1u));
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("");
|
||||
assert(result == Result.eof());
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("\xC2\xA9");
|
||||
assert(result == Result.success(0xA9u, 2u));
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("\xf0\x9f\xa7\xa1");
|
||||
assert(result == Result.success(0x1F9E1, 4u));
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("\xf0\x9f\x27");
|
||||
assert(result == Result.decode_error());
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("\xf0\x9f\xa7\xFF");
|
||||
assert(result == Result.decode_error());
|
||||
|
||||
result = Testparser.Decoder.decode_code_point("\xfe");
|
||||
assert(result == Result.decode_error());
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
alias Result = Testparser.Lexer.Result;
|
||||
string input = "5 + 4 * \n677 + 567";
|
||||
Testparser.Lexer lexer = new Testparser.Lexer(input);
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 0, 0, 1, Testparser.TOKEN_int));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 0, 2, 1, Testparser.TOKEN_plus));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 0, 4, 1, Testparser.TOKEN_int));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 0, 6, 1, Testparser.TOKEN_times));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 1, 0, 3, Testparser.TOKEN_int));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 1, 4, 1, Testparser.TOKEN_plus));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 1, 6, 3, Testparser.TOKEN_int));
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 1, 9, 0, Testparser.TOKEN___EOF));
|
||||
|
||||
lexer = new Testparser.Lexer("");
|
||||
assert(lexer.lex_token() == Result(Result.Type.TOKEN, 0, 0, 0, Testparser.TOKEN___EOF));
|
||||
}
|
||||
18
spec/test_d_parser_identical_rules_lookahead.d
Normal file
18
spec/test_d_parser_identical_rules_lookahead.d
Normal file
@ -0,0 +1,18 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "aba";
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
|
||||
input = "abb";
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
}
|
||||
22
spec/test_d_parser_rule_from_multiple_states.d
Normal file
22
spec/test_d_parser_rule_from_multiple_states.d
Normal file
@ -0,0 +1,22 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == false);
|
||||
|
||||
input = "a b";
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
|
||||
input = "bb";
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
}
|
||||
@ -1,42 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a 42";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "a\n123\na a";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 3);
|
||||
assert(p_position(&context).col == 4);
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 1);
|
||||
assert(p_position(&context).col == 1);
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(&context).row == 3);
|
||||
assert(p_position(&context).col == 2);
|
||||
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
assert(p_position(&context).row == 6);
|
||||
assert(p_position(&context).col == 5);
|
||||
|
||||
assert(strcmp(p_token_names[TOKEN_a], "a") == 0);
|
||||
assert(strcmp(p_token_names[TOKEN_num], "num") == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,40 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a 42";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "a\n123\na a";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(3, 4));
|
||||
assert(p_token(&context) == TOKEN_a);
|
||||
|
||||
input = "12";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(1, 1));
|
||||
assert(p_token(&context) == TOKEN_num);
|
||||
|
||||
input = "a 12\n\nab";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
assert(p_position(&context) == p_position_t(3, 2));
|
||||
|
||||
input = "a 12\n\na\n\n77\na \xAA";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_DECODE_ERROR);
|
||||
assert(p_position(&context) == p_position_t(6, 5));
|
||||
|
||||
assert(p_token_names[TOKEN_a] == "a");
|
||||
assert(p_token_names[TOKEN_num] == "num");
|
||||
}
|
||||
@ -1,13 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
return 0;
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "foo1\nbar2";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
||||
@ -1,110 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
size_t result;
|
||||
p_code_point_t code_point;
|
||||
uint8_t code_point_length;
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"5", 1u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == '5');
|
||||
assert(code_point_length == 1u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"", 0u, &code_point, &code_point_length);
|
||||
assert(result == P_EOF);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xC2\xA9", 2u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0xA9u);
|
||||
assert(code_point_length == 2u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\xa7\xa1", 4u, &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0x1F9E1u);
|
||||
assert(code_point_length == 4u);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\x27", 3u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xf0\x9f\xa7\xFF", 4u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point((uint8_t const *)"\xfe", 1u, &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
|
||||
p_token_info_t token_info;
|
||||
char const * input = "5 + 4 * \n677 + 567";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 1u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 3u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 3u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 5u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 5u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 7u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 7u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_times);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 3u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 5u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 5u);
|
||||
assert(token_info.length == 1u);
|
||||
assert(token_info.token == TOKEN_plus);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 7u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 9u);
|
||||
assert(token_info.length == 3u);
|
||||
assert(token_info.token == TOKEN_int);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 2u);
|
||||
assert(token_info.position.col == 10u);
|
||||
assert(token_info.end_position.row == 2u);
|
||||
assert(token_info.end_position.col == 10u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
|
||||
p_context_init(&context, (uint8_t const *)"", 0u);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info.position.row == 1u);
|
||||
assert(token_info.position.col == 1u);
|
||||
assert(token_info.end_position.row == 1u);
|
||||
assert(token_info.end_position.col == 1u);
|
||||
assert(token_info.length == 0u);
|
||||
assert(token_info.token == TOKEN___EOF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,69 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
size_t result;
|
||||
p_code_point_t code_point;
|
||||
ubyte code_point_length;
|
||||
|
||||
result = p_decode_code_point("5", &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == '5');
|
||||
assert(code_point_length == 1u);
|
||||
|
||||
result = p_decode_code_point("", &code_point, &code_point_length);
|
||||
assert(result == P_EOF);
|
||||
|
||||
result = p_decode_code_point("\xC2\xA9", &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0xA9u);
|
||||
assert(code_point_length == 2u);
|
||||
|
||||
result = p_decode_code_point("\xf0\x9f\xa7\xa1", &code_point, &code_point_length);
|
||||
assert(result == P_SUCCESS);
|
||||
assert(code_point == 0x1F9E1u);
|
||||
assert(code_point_length == 4u);
|
||||
|
||||
result = p_decode_code_point("\xf0\x9f\x27", &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point("\xf0\x9f\xa7\xFF", &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
|
||||
result = p_decode_code_point("\xfe", &code_point, &code_point_length);
|
||||
assert(result == P_DECODE_ERROR);
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
p_token_info_t token_info;
|
||||
string input = "5 + 4 * \n677 + 567";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 3), p_position_t(1, 3), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 5), p_position_t(1, 5), 1, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 7), p_position_t(1, 7), 1, TOKEN_times));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 1), p_position_t(2, 3), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 5), p_position_t(2, 5), 1, TOKEN_plus));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 7), p_position_t(2, 9), 3, TOKEN_int));
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(2, 10), p_position_t(2, 10), 0, TOKEN___EOF));
|
||||
|
||||
p_context_init(&context, "");
|
||||
assert(p_lex(&context, &token_info) == P_SUCCESS);
|
||||
assert(token_info == p_token_info_t(p_position_t(1, 1), p_position_t(1, 1), 0, TOKEN___EOF));
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "identifier_123";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,8 +9,7 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `identifier_123`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
writeln("pass1");
|
||||
}
|
||||
|
||||
@ -1,20 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abc \"a string\" def";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "abc \"abc def\" def";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,13 +9,12 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `abc "a string" def`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
writeln("pass1");
|
||||
|
||||
input = `abc "abc def" def`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
writeln("pass2");
|
||||
}
|
||||
|
||||
@ -1,20 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abc.def";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "abc . abc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,21 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = `abc.def`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
writeln("pass1");
|
||||
|
||||
input = `abc . abc`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
writeln("pass2");
|
||||
}
|
||||
@ -1,19 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
|
||||
input = "fabulous";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 8u);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,13 +9,12 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = `x`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
assert(parser.result == 1u);
|
||||
|
||||
input = `fabulous`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 8u);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
assert(parser.result == 8u);
|
||||
}
|
||||
|
||||
@ -1,18 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "x";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
|
||||
input = "123";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 123u);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,20 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = `x`;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_INPUT);
|
||||
|
||||
input = `123`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 123u);
|
||||
}
|
||||
@ -1,13 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "\a\b\t\n\v\f\rt";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
||||
@ -1,19 +0,0 @@
|
||||
#include "testparsermyp1.h"
|
||||
#include "testparsermyp2.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input1 = "a\n1";
|
||||
myp1_context_t context1;
|
||||
myp1_context_init(&context1, (uint8_t const *)input1, strlen(input1));
|
||||
assert(myp1_parse(&context1) == MYP1_SUCCESS);
|
||||
|
||||
char const * input2 = "bcb";
|
||||
myp2_context_t context2;
|
||||
myp2_context_init(&context2, (uint8_t const *)input2, strlen(input2));
|
||||
assert(myp2_parse(&context2) == MYP2_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,21 +0,0 @@
|
||||
import testparsermyp1;
|
||||
import testparsermyp2;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input1 = "a\n1";
|
||||
myp1_context_t context1;
|
||||
myp1_context_init(&context1, input1);
|
||||
assert(myp1_parse(&context1) == MYP1_SUCCESS);
|
||||
|
||||
string input2 = "bcb";
|
||||
myp2_context_t context2;
|
||||
myp2_context_init(&context2, input2);
|
||||
assert(myp2_parse(&context2) == MYP2_SUCCESS);
|
||||
}
|
||||
@ -1,45 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start->a == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert_eq(TOKEN_b, start->pToken2->token);
|
||||
assert(start->pR3 == NULL);
|
||||
assert(start->pR == NULL);
|
||||
assert(start->r == NULL);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->a != NULL);
|
||||
assert_eq(TOKEN_a, start->pToken1->token);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR3 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert(start->r != NULL);
|
||||
assert(start->pR == start->pR3);
|
||||
assert(start->pR == start->r);
|
||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->a == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->r != NULL);
|
||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1,46 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert_eq(TOKEN_b, start.pToken2.token);
|
||||
assert(start.pR3 is null);
|
||||
assert(start.pR is null);
|
||||
assert(start.r is null);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 != null);
|
||||
assert_eq(TOKEN_a, start.pToken1.token);
|
||||
assert(start.pToken2 != null);
|
||||
assert(start.pR3 != null);
|
||||
assert(start.pR != null);
|
||||
assert(start.r != null);
|
||||
assert(start.pR == start.pR3);
|
||||
assert(start.pR == start.r);
|
||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert(start.pR !is null);
|
||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||
}
|
||||
@ -1,22 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1,23 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
||||
@ -1,42 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert_eq(TOKEN_b, start->pToken2->token);
|
||||
assert(start->pR3 == NULL);
|
||||
assert(start->pR == NULL);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->pToken1 != NULL);
|
||||
assert_eq(TOKEN_a, start->pToken1->token);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR3 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert(start->pR == start->pR3);
|
||||
assert_eq(TOKEN_c, start->pR->pToken1->token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start->pToken1 == NULL);
|
||||
assert(start->pToken2 != NULL);
|
||||
assert(start->pR != NULL);
|
||||
assert_eq(TOKEN_d, start->pR->pToken1->token);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
import testutils;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "b";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
Start * start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert_eq(TOKEN_b, start.pToken2.token);
|
||||
assert(start.pR3 is null);
|
||||
assert(start.pR is null);
|
||||
|
||||
input = "abcd";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 != null);
|
||||
assert_eq(TOKEN_a, start.pToken1.token);
|
||||
assert(start.pToken2 != null);
|
||||
assert(start.pR3 != null);
|
||||
assert(start.pR != null);
|
||||
assert(start.pR == start.pR3);
|
||||
assert_eq(TOKEN_c, start.pR.pToken1.token);
|
||||
|
||||
input = "bdc";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
start = p_result(&context);
|
||||
assert(start.pToken1 is null);
|
||||
assert(start.pToken2 !is null);
|
||||
assert(start.pR !is null);
|
||||
assert_eq(TOKEN_d, start.pR.pToken1.token);
|
||||
}
|
||||
@ -1,17 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "aba";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,19 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "aba";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "abb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
||||
@ -1,24 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context).row == 1);
|
||||
assert(p_position(&context).col == 2);
|
||||
assert(context.token == TOKEN___EOF);
|
||||
|
||||
input = "a b";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "bb";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1,25 +0,0 @@
|
||||
import testparser;
|
||||
import std.stdio;
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_UNEXPECTED_TOKEN);
|
||||
assert(p_position(&context) == p_position_t(1, 2));
|
||||
assert(context.token == TOKEN___EOF);
|
||||
|
||||
input = "a b";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "bb";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
}
|
||||
@ -1,13 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "ab";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,7 +9,6 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "ab";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
}
|
||||
|
||||
@ -1,56 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include "json_types.h"
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
input = "{}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_OBJECT);
|
||||
|
||||
input = "[]";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_ARRAY);
|
||||
|
||||
input = "-45.6";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == -45.6);
|
||||
|
||||
input = "2E-2";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context)->id == JSON_NUMBER);
|
||||
assert(p_result(&context)->number == 0.02);
|
||||
|
||||
input = "{\"hi\":true}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
JSONValue * o = p_result(&context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(1, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "hi") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_TRUE);
|
||||
|
||||
input = "{\"ff\": false, \"nn\": null}";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
o = p_result(&context);
|
||||
assert(o->id == JSON_OBJECT);
|
||||
assert_eq(2, o->object.size);
|
||||
assert(strcmp(o->object.entries[0].name, "ff") == 0);
|
||||
assert(o->object.entries[0].value->id == JSON_FALSE);
|
||||
assert(strcmp(o->object.entries[1].name, "nn") == 0);
|
||||
assert(o->object.entries[1].value->id == JSON_NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -10,45 +10,44 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = ``;
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
|
||||
input = `{}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONObject)parser.result);
|
||||
|
||||
input = `[]`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONArray)p_result(&context));
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONArray)parser.result);
|
||||
|
||||
input = `-45.6`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(&context));
|
||||
assert((cast(JSONNumber)p_result(&context)).value == -45.6);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONNumber)parser.result);
|
||||
assert((cast(JSONNumber)parser.result).value == -45.6);
|
||||
|
||||
input = `2E-2`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONNumber)p_result(&context));
|
||||
assert((cast(JSONNumber)p_result(&context)).value == 0.02);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONNumber)parser.result);
|
||||
assert((cast(JSONNumber)parser.result).value == 0.02);
|
||||
|
||||
input = `{"hi":true}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
JSONObject o = cast(JSONObject)p_result(&context);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONObject)parser.result);
|
||||
JSONObject o = cast(JSONObject)parser.result;
|
||||
assert(o.value["hi"]);
|
||||
assert(cast(JSONTrue)o.value["hi"]);
|
||||
|
||||
input = `{"ff": false, "nn": null}`;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(cast(JSONObject)p_result(&context));
|
||||
o = cast(JSONObject)p_result(&context);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse());
|
||||
assert(cast(JSONObject)parser.result);
|
||||
o = cast(JSONObject)parser.result;
|
||||
assert(o.value["ff"]);
|
||||
assert(cast(JSONFalse)o.value["ff"]);
|
||||
assert(o.value["nn"]);
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 0u);
|
||||
|
||||
input = "aaaaaaaaaaaaaaaa";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 16u);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,18 +9,17 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "a";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 1u);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
assert(parser.result == 1u);
|
||||
|
||||
input = "";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 0u);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
assert(parser.result == 0u);
|
||||
|
||||
input = "aaaaaaaaaaaaaaaa";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
assert(p_result(&context) == 16u);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
assert(parser.result == 16u);
|
||||
}
|
||||
|
||||
@ -1,20 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass1\n");
|
||||
|
||||
input = "defabcdef";
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
printf("pass2\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,13 +9,12 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "abcdef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
writeln("pass1");
|
||||
|
||||
input = "defabcdef";
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
writeln("pass2");
|
||||
}
|
||||
|
||||
@ -1,13 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
char const * input = "defghidef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, (uint8_t const *)input, strlen(input));
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -9,7 +9,6 @@ int main()
|
||||
unittest
|
||||
{
|
||||
string input = "defghidef";
|
||||
p_context_t context;
|
||||
p_context_init(&context, input);
|
||||
assert(p_parse(&context) == P_SUCCESS);
|
||||
auto parser = new Testparser.Parser(input);
|
||||
assert(parser.parse() == true);
|
||||
}
|
||||
|
||||
@ -1,9 +0,0 @@
|
||||
#include "testparser.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include "testutils.h"
|
||||
|
||||
int main()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user