| @@ -1305,8 +1305,7 @@ class ParseTreeBuilder: | |||||
| class LALR_Parser(object): | class LALR_Parser(object): | ||||
| def __init__(self, parser_conf, debug=False): | def __init__(self, parser_conf, debug=False): | ||||
| assert all(r.options is None or r.options.priority is None | |||||
| for r in parser_conf.rules), "LALR doesn't yet support prioritization" | |||||
| assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization" | |||||
| analysis = LALR_Analyzer(parser_conf, debug=debug) | analysis = LALR_Analyzer(parser_conf, debug=debug) | ||||
| analysis.compute_lookahead() | analysis.compute_lookahead() | ||||
| callbacks = parser_conf.callbacks | callbacks = parser_conf.callbacks | ||||
| @@ -1508,7 +1507,7 @@ class WithLexer(Serialize): | |||||
| inst.postlex = postlex | inst.postlex = postlex | ||||
| inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks) | inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks) | ||||
| return inst | return inst | ||||
| def _serialize(self, data, memo): | def _serialize(self, data, memo): | ||||
| data['parser'] = data['parser'].serialize(memo) | data['parser'] = data['parser'].serialize(memo) | ||||
| @@ -1740,14 +1739,14 @@ class Lark(Serialize): | |||||
| # This replaces the old 'resolve__antiscore_sum' option. | # This replaces the old 'resolve__antiscore_sum' option. | ||||
| if self.options.priority == 'invert': | if self.options.priority == 'invert': | ||||
| for rule in self.rules: | for rule in self.rules: | ||||
| if rule.options and rule.options.priority is not None: | |||||
| if rule.options.priority is not None: | |||||
| rule.options.priority = -rule.options.priority | rule.options.priority = -rule.options.priority | ||||
| # Else, if the user asked to disable priorities, strip them from the | # Else, if the user asked to disable priorities, strip them from the | ||||
| # rules. This allows the Earley parsers to skip an extra forest walk | # rules. This allows the Earley parsers to skip an extra forest walk | ||||
| # for improved performance, if you don't need them (or didn't specify any). | # for improved performance, if you don't need them (or didn't specify any). | ||||
| elif self.options.priority == None: | elif self.options.priority == None: | ||||
| for rule in self.rules: | for rule in self.rules: | ||||
| if rule.options and rule.options.priority is not None: | |||||
| if rule.options.priority is not None: | |||||
| rule.options.priority = None | rule.options.priority = None | ||||
| self.lexer_conf = LexerConf(self.terminals, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks) | self.lexer_conf = LexerConf(self.terminals, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks) | ||||
| @@ -81,7 +81,7 @@ class Rule(Serialize): | |||||
| self.expansion = expansion | self.expansion = expansion | ||||
| self.alias = alias | self.alias = alias | ||||
| self.order = order | self.order = order | ||||
| self.options = options | |||||
| self.options = options or RuleOptions() | |||||
| self._hash = hash((self.origin, tuple(self.expansion))) | self._hash = hash((self.origin, tuple(self.expansion))) | ||||
| def _deserialize(self): | def _deserialize(self): | ||||
| @@ -187,14 +187,14 @@ class Lark(Serialize): | |||||
| # This replaces the old 'resolve__antiscore_sum' option. | # This replaces the old 'resolve__antiscore_sum' option. | ||||
| if self.options.priority == 'invert': | if self.options.priority == 'invert': | ||||
| for rule in self.rules: | for rule in self.rules: | ||||
| if rule.options and rule.options.priority is not None: | |||||
| if rule.options.priority is not None: | |||||
| rule.options.priority = -rule.options.priority | rule.options.priority = -rule.options.priority | ||||
| # Else, if the user asked to disable priorities, strip them from the | # Else, if the user asked to disable priorities, strip them from the | ||||
| # rules. This allows the Earley parsers to skip an extra forest walk | # rules. This allows the Earley parsers to skip an extra forest walk | ||||
| # for improved performance, if you don't need them (or didn't specify any). | # for improved performance, if you don't need them (or didn't specify any). | ||||
| elif self.options.priority == None: | elif self.options.priority == None: | ||||
| for rule in self.rules: | for rule in self.rules: | ||||
| if rule.options and rule.options.priority is not None: | |||||
| if rule.options.priority is not None: | |||||
| rule.options.priority = None | rule.options.priority = None | ||||
| # TODO Deprecate lexer_callbacks? | # TODO Deprecate lexer_callbacks? | ||||
| @@ -503,7 +503,7 @@ class Grammar: | |||||
| ebnf_to_bnf = EBNF_to_BNF() | ebnf_to_bnf = EBNF_to_BNF() | ||||
| rules = [] | rules = [] | ||||
| for name, rule_tree, options in rule_defs: | for name, rule_tree, options in rule_defs: | ||||
| ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None | |||||
| ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options.keep_all_tokens else None | |||||
| tree = transformer.transform(rule_tree) | tree = transformer.transform(rule_tree) | ||||
| res = ebnf_to_bnf.transform(tree) | res = ebnf_to_bnf.transform(tree) | ||||
| rules.append((name, res, options)) | rules.append((name, res, options)) | ||||
| @@ -527,7 +527,7 @@ class Grammar: | |||||
| empty_indices = [x==_EMPTY for x in expansion] | empty_indices = [x==_EMPTY for x in expansion] | ||||
| if any(empty_indices): | if any(empty_indices): | ||||
| exp_options = copy(options) if options else RuleOptions() | |||||
| exp_options = copy(options) | |||||
| exp_options.empty_indices = empty_indices | exp_options.empty_indices = empty_indices | ||||
| expansion = [x for x in expansion if x!=_EMPTY] | expansion = [x for x in expansion if x!=_EMPTY] | ||||
| else: | else: | ||||
| @@ -225,12 +225,12 @@ class ParseTreeBuilder: | |||||
| def _init_builders(self, rules): | def _init_builders(self, rules): | ||||
| for rule in rules: | for rule in rules: | ||||
| options = rule.options | options = rule.options | ||||
| keep_all_tokens = self.always_keep_all_tokens or (options.keep_all_tokens if options else False) | |||||
| expand_single_child = options.expand1 if options else False | |||||
| keep_all_tokens = self.always_keep_all_tokens or options.keep_all_tokens | |||||
| expand_single_child = options.expand1 | |||||
| wrapper_chain = list(filter(None, [ | wrapper_chain = list(filter(None, [ | ||||
| (expand_single_child and not rule.alias) and ExpandSingleChild, | (expand_single_child and not rule.alias) and ExpandSingleChild, | ||||
| maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders and options else None), | |||||
| maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), | |||||
| self.propagate_positions and PropagatePositions, | self.propagate_positions and PropagatePositions, | ||||
| self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), | self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), | ||||
| ])) | ])) | ||||
| @@ -96,7 +96,7 @@ class Parser(object): | |||||
| assert all(isinstance(x, Symbol) for x in lark_rule.expansion) | assert all(isinstance(x, Symbol) for x in lark_rule.expansion) | ||||
| return Rule( | return Rule( | ||||
| lark_rule.origin, lark_rule.expansion, | lark_rule.origin, lark_rule.expansion, | ||||
| weight=lark_rule.options.priority if lark_rule.options and lark_rule.options.priority else 0, | |||||
| weight=lark_rule.options.priority if lark_rule.options.priority else 0, | |||||
| alias=lark_rule) | alias=lark_rule) | ||||
| def parse(self, tokenized, start): # pylint: disable=invalid-name | def parse(self, tokenized, start): # pylint: disable=invalid-name | ||||
| @@ -45,7 +45,7 @@ class Parser: | |||||
| # the priorities will be stripped from all rules before they reach us, allowing us to | # the priorities will be stripped from all rules before they reach us, allowing us to | ||||
| # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities | # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities | ||||
| # on any rules. | # on any rules. | ||||
| if self.forest_sum_visitor is None and rule.options and rule.options.priority is not None: | |||||
| if self.forest_sum_visitor is None and rule.options.priority is not None: | |||||
| self.forest_sum_visitor = ForestSumVisitor | self.forest_sum_visitor = ForestSumVisitor | ||||
| self.term_matcher = term_matcher | self.term_matcher = term_matcher | ||||
| @@ -250,7 +250,7 @@ class ForestSumVisitor(ForestVisitor): | |||||
| return iter(node.children) | return iter(node.children) | ||||
| def visit_packed_node_out(self, node): | def visit_packed_node_out(self, node): | ||||
| priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options and node.rule.options.priority else 0 | |||||
| priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options.priority else 0 | |||||
| priority += getattr(node.right, 'priority', 0) | priority += getattr(node.right, 'priority', 0) | ||||
| priority += getattr(node.left, 'priority', 0) | priority += getattr(node.left, 'priority', 0) | ||||
| node.priority = priority | node.priority = priority | ||||
| @@ -12,8 +12,7 @@ from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable | |||||
| ###{standalone | ###{standalone | ||||
| class LALR_Parser(object): | class LALR_Parser(object): | ||||
| def __init__(self, parser_conf, debug=False): | def __init__(self, parser_conf, debug=False): | ||||
| assert all(r.options is None or r.options.priority is None | |||||
| for r in parser_conf.rules), "LALR doesn't yet support prioritization" | |||||
| assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization" | |||||
| analysis = LALR_Analyzer(parser_conf, debug=debug) | analysis = LALR_Analyzer(parser_conf, debug=debug) | ||||
| analysis.compute_lalr() | analysis.compute_lalr() | ||||
| callbacks = parser_conf.callbacks | callbacks = parser_conf.callbacks | ||||
| @@ -108,7 +108,7 @@ class Reconstructor: | |||||
| self._match, resolve_ambiguity=True) | self._match, resolve_ambiguity=True) | ||||
| def _build_recons_rules(self, rules): | def _build_recons_rules(self, rules): | ||||
| expand1s = {r.origin for r in rules if r.options and r.options.expand1} | |||||
| expand1s = {r.origin for r in rules if r.options.expand1} | |||||
| aliases = defaultdict(list) | aliases = defaultdict(list) | ||||
| for r in rules: | for r in rules: | ||||