@@ -357,6 +357,8 @@ class Grammar: | |||||
exp.children = list(interleave(exp.children, expr)) | exp.children = list(interleave(exp.children, expr)) | ||||
if r == start: | if r == start: | ||||
exp.children = [expr] + exp.children | exp.children = [expr] + exp.children | ||||
for exp in tree.find_data('expr'): | |||||
exp.children[0] = T('expansion', list(interleave(exp.children[:1], expr))) | |||||
x = [T('expansion', [Token('RULE', x)]) for x in ignore_names] | x = [T('expansion', [Token('RULE', x)]) for x in ignore_names] | ||||
_ignore_tree = T('expr', [T('expansions', x), Token('OP', '?')]) | _ignore_tree = T('expr', [T('expansions', x), Token('OP', '?')]) | ||||
@@ -107,6 +107,7 @@ class Column: | |||||
class Parser: | class Parser: | ||||
def __init__(self, parser_conf): | def __init__(self, parser_conf): | ||||
self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start) | self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start) | ||||
self.start = parser_conf.start | self.start = parser_conf.start | ||||
@@ -154,7 +155,7 @@ class Parser: | |||||
next_set.add([item.advance(stream[i])]) | next_set.add([item.advance(stream[i])]) | ||||
if not next_set and token is not END_TOKEN: | if not next_set and token is not END_TOKEN: | ||||
expect = {i.expect for i in cur_set.to_scan} | |||||
expect = {i.expect[-1] for i in cur_set.to_scan} | |||||
raise UnexpectedToken(token, expect, stream, i) | raise UnexpectedToken(token, expect, stream, i) | ||||
return cur_set, next_set | return cur_set, next_set | ||||
@@ -5,7 +5,7 @@ import logging | |||||
from .test_trees import TestTrees | from .test_trees import TestTrees | ||||
# from .test_selectors import TestSelectors | # from .test_selectors import TestSelectors | ||||
from .test_parser import TestLalrStandard, TestEarleyStandard, TestLalrContextual, TestParsers, TestEarleyScanless | |||||
from .test_parser import TestLalrStandard, TestEarleyStandard, TestLalrContextual, TestParsers, TestEarleyScanless, TestEarley | |||||
# from .test_grammars import TestPythonG, TestConfigG | # from .test_grammars import TestPythonG, TestConfigG | ||||
logging.basicConfig(level=logging.INFO) | logging.basicConfig(level=logging.INFO) | ||||
@@ -48,7 +48,7 @@ class TestEarley(unittest.TestCase): | |||||
B: ("ab"|/[^b]/)* | B: ("ab"|/[^b]/)* | ||||
""", lexer=None) | """, lexer=None) | ||||
assertEqual( g.parse('abc'), 'abc') | |||||
self.assertEqual( g.parse('abc').children[0], 'abc') | |||||
def test_earley_scanless(self): | def test_earley_scanless(self): | ||||
g = Lark("""start: A "b" c | g = Lark("""start: A "b" c | ||||
@@ -58,6 +58,20 @@ class TestEarley(unittest.TestCase): | |||||
x = g.parse('aaaababc') | x = g.parse('aaaababc') | ||||
def test_earley_scanless2(self): | |||||
grammar = """ | |||||
start: statement+ | |||||
statement: "r" | |||||
| "c" /[a-z]/+ | |||||
%ignore " " | |||||
""" | |||||
program = """c b r""" | |||||
l = Lark(grammar, parser='earley', lexer=None) | |||||
l.parse(program) | |||||
def _make_parser_test(LEXER, PARSER): | def _make_parser_test(LEXER, PARSER): | ||||
def _Lark(grammar, **kwargs): | def _Lark(grammar, **kwargs): | ||||