diff --git a/lark/parser_frontends.py b/lark/parser_frontends.py index 7043dbc..0b36a75 100644 --- a/lark/parser_frontends.py +++ b/lark/parser_frontends.py @@ -27,7 +27,7 @@ class LALR(WithLexer): self.parser = lalr_parser.Parser(parser_conf) def parse(self, text): - tokens = list(self.lex(text)) + tokens = self.lex(text) return self.parser.parse(tokens) class LALR_ContextualLexer: @@ -160,7 +160,7 @@ class Earley(WithLexer): return [Terminal_Token(sym) if is_terminal(sym) else sym for sym in expansion] def parse(self, text): - tokens = list(self.lex(text)) + tokens = self.lex(text) return self.parser.parse(tokens) def get_frontend(parser, lexer): diff --git a/lark/parsers/earley.py b/lark/parsers/earley.py index 49f9ed7..b903167 100644 --- a/lark/parsers/earley.py +++ b/lark/parsers/earley.py @@ -168,7 +168,7 @@ class Parser: to_scan = cur_set.to_scan.get_news() for item in to_scan: if item.expect.match(token): - next_set.add([item.advance(stream[i])]) + next_set.add([item.advance(token)]) if not next_set and token is not END_TOKEN: expect = {i.expect for i in cur_set.to_scan} @@ -181,10 +181,12 @@ class Parser: column0.add(predict(start, column0)) cur_set = column0 - for i, char in enumerate(stream): - _, cur_set = process_column(i, char, cur_set) + i = 0 + for token in stream: + _, cur_set = process_column(i, token, cur_set) + i += 1 - last_set, _ = process_column(len(stream), END_TOKEN, cur_set) + last_set, _ = process_column(i, END_TOKEN, cur_set) # Parse ended. Now build a parse tree solutions = [n.tree for n in last_set.to_reduce