| @@ -6,11 +6,12 @@ | |||||
| # | # | ||||
| import json | import json | ||||
| from .json_parser import json_grammar | |||||
| from lark import Lark | from lark import Lark | ||||
| from lark.reconstruct import Reconstructor | from lark.reconstruct import Reconstructor | ||||
| from .json_parser import json_grammar | |||||
| def test(): | def test(): | ||||
| test_json = ''' | test_json = ''' | ||||
| @@ -24,7 +24,7 @@ class UnexpectedToken(ParseError): | |||||
| "Expected: %s\n" | "Expected: %s\n" | ||||
| "Context: %s" % (token, self.line, self.column, expected, context)) | "Context: %s" % (token, self.line, self.column, expected, context)) | ||||
| super(ParseError, self).__init__(message) | |||||
| super(UnexpectedToken, self).__init__(message) | |||||
| @@ -8,7 +8,7 @@ class Indenter: | |||||
| self.indent_level = [0] | self.indent_level = [0] | ||||
| def handle_NL(self, token): | def handle_NL(self, token): | ||||
| if (self.paren_level > 0): | |||||
| if self.paren_level > 0: | |||||
| return | return | ||||
| yield token | yield token | ||||
| @@ -7,7 +7,7 @@ from collections import defaultdict | |||||
| from .utils import STRING_TYPE | from .utils import STRING_TYPE | ||||
| from .load_grammar import load_grammar | from .load_grammar import load_grammar | ||||
| from .tree import Tree | from .tree import Tree | ||||
| from .common import GrammarError, LexerConf, ParserConf | |||||
| from .common import LexerConf, ParserConf | |||||
| from .lexer import Lexer | from .lexer import Lexer | ||||
| from .parse_tree_builder import ParseTreeBuilder | from .parse_tree_builder import ParseTreeBuilder | ||||
| @@ -2,7 +2,7 @@ | |||||
| import re | import re | ||||
| from .utils import Str, classify, STRING_TYPE | |||||
| from .utils import Str, classify | |||||
| from .common import is_terminal, PatternStr, PatternRE, TokenDef | from .common import is_terminal, PatternStr, PatternRE, TokenDef | ||||
| class LexError(Exception): | class LexError(Exception): | ||||
| @@ -13,7 +13,7 @@ class UnexpectedInput(LexError): | |||||
| context = seq[lex_pos:lex_pos+5] | context = seq[lex_pos:lex_pos+5] | ||||
| message = "No token defined for: '%s' in %r at line %d" % (seq[lex_pos], context, line) | message = "No token defined for: '%s' in %r at line %d" % (seq[lex_pos], context, line) | ||||
| super(LexError, self).__init__(message) | |||||
| super(UnexpectedInput, self).__init__(message) | |||||
| self.line = line | self.line = line | ||||
| self.column = column | self.column = column | ||||
| @@ -3,7 +3,7 @@ from itertools import chain | |||||
| import re | import re | ||||
| from ast import literal_eval | from ast import literal_eval | ||||
| from .lexer import Lexer, Token, UnexpectedInput | |||||
| from .lexer import Token, UnexpectedInput | |||||
| from .parse_tree_builder import ParseTreeBuilder | from .parse_tree_builder import ParseTreeBuilder | ||||
| from .parser_frontends import LALR | from .parser_frontends import LALR | ||||
| @@ -526,7 +526,7 @@ class GrammarLoader: | |||||
| token_tree = dict(g.token_defs)[dotted_path[-1]] | token_tree = dict(g.token_defs)[dotted_path[-1]] | ||||
| token_defs.append([name.value, token_tree]) | token_defs.append([name.value, token_tree]) | ||||
| else: | else: | ||||
| assert False, command | |||||
| assert False, stmt | |||||
| # Verify correctness 1 | # Verify correctness 1 | ||||
| @@ -5,7 +5,6 @@ from .lexer import Lexer, ContextualLexer, Token | |||||
| from .common import is_terminal, GrammarError, ParserConf | from .common import is_terminal, GrammarError, ParserConf | ||||
| from .parsers import lalr_parser, earley, nearley | from .parsers import lalr_parser, earley, nearley | ||||
| from .parsers.grammar_analysis import Rule | |||||
| from .tree import Transformer | from .tree import Transformer | ||||
| class WithLexer: | class WithLexer: | ||||
| @@ -126,7 +125,7 @@ class Nearley_NoLex: | |||||
| if is_terminal(sym): | if is_terminal(sym): | ||||
| regexp = self.token_by_name[sym].pattern.to_regexp() | regexp = self.token_by_name[sym].pattern.to_regexp() | ||||
| width = sre_parse.parse(regexp).getwidth() | width = sre_parse.parse(regexp).getwidth() | ||||
| if not width == (1,1): | |||||
| if width != (1,1): | |||||
| raise GrammarError('Dynamic lexing requires all tokens to have a width of 1 (%s is %s)' % (regexp, width)) | raise GrammarError('Dynamic lexing requires all tokens to have a width of 1 (%s is %s)' % (regexp, width)) | ||||
| yield sym, re.compile(regexp) | yield sym, re.compile(regexp) | ||||
| else: | else: | ||||
| @@ -168,7 +167,7 @@ class Earley_NoLex: | |||||
| if is_terminal(sym): | if is_terminal(sym): | ||||
| regexp = self.token_by_name[sym].pattern.to_regexp() | regexp = self.token_by_name[sym].pattern.to_regexp() | ||||
| width = sre_parse.parse(regexp).getwidth() | width = sre_parse.parse(regexp).getwidth() | ||||
| if not width == (1,1): | |||||
| if width != (1,1): | |||||
| raise GrammarError('Scanless parsing (lexer=None) requires all tokens to have a width of 1 (terminal %s: %s is %s)' % (sym, regexp, width)) | raise GrammarError('Scanless parsing (lexer=None) requires all tokens to have a width of 1 (terminal %s: %s is %s)' % (sym, regexp, width)) | ||||
| yield (re.compile(regexp).match, regexp) | yield (re.compile(regexp).match, regexp) | ||||
| else: | else: | ||||
| @@ -1,6 +1,6 @@ | |||||
| "My name is Earley" | "My name is Earley" | ||||
| from ..utils import classify, STRING_TYPE | |||||
| from ..utils import classify | |||||
| from ..common import ParseError, UnexpectedToken | from ..common import ParseError, UnexpectedToken | ||||
| try: | try: | ||||
| @@ -5,7 +5,6 @@ from .tree import Tree | |||||
| from .common import is_terminal, ParserConf, PatternStr | from .common import is_terminal, ParserConf, PatternStr | ||||
| from .lexer import Token | from .lexer import Token | ||||
| from .parsers import earley | from .parsers import earley | ||||
| from .lark import Lark | |||||
| @@ -68,7 +67,7 @@ class Reconstructor: | |||||
| return to_write | return to_write | ||||
| d = defaultdict(list) | d = defaultdict(list) | ||||
| for name, (expansions, options) in parser.rules.items(): | |||||
| for name, (expansions, _o) in parser.rules.items(): | |||||
| for expansion, alias in expansions: | for expansion, alias in expansions: | ||||
| if alias: | if alias: | ||||
| d[alias].append(expansion) | d[alias].append(expansion) | ||||
| @@ -104,4 +103,3 @@ class Reconstructor: | |||||
| def reconstruct(self, tree): | def reconstruct(self, tree): | ||||
| return ''.join(self._reconstruct(tree)) | return ''.join(self._reconstruct(tree)) | ||||
| @@ -60,7 +60,7 @@ def inline_args(f): | |||||
| return _f_func | return _f_func | ||||
| elif isinstance(f, (type, types.BuiltinFunctionType)): | elif isinstance(f, (type, types.BuiltinFunctionType)): | ||||
| @functools.wraps(f) | @functools.wraps(f) | ||||
| def _f_builtin(self, args): | |||||
| def _f_builtin(_self, args): | |||||
| return f(*args) | return f(*args) | ||||
| return _f_builtin | return _f_builtin | ||||
| else: | else: | ||||