diff --git a/examples/standalone/json_parser.py b/examples/standalone/json_parser.py index d424f1b..73acf9c 100644 --- a/examples/standalone/json_parser.py +++ b/examples/standalone/json_parser.py @@ -1305,8 +1305,7 @@ class ParseTreeBuilder: class LALR_Parser(object): def __init__(self, parser_conf, debug=False): - assert all(r.options is None or r.options.priority is None - for r in parser_conf.rules), "LALR doesn't yet support prioritization" + assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization" analysis = LALR_Analyzer(parser_conf, debug=debug) analysis.compute_lookahead() callbacks = parser_conf.callbacks @@ -1508,7 +1507,7 @@ class WithLexer(Serialize): inst.postlex = postlex inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks) return inst - + def _serialize(self, data, memo): data['parser'] = data['parser'].serialize(memo) @@ -1740,14 +1739,14 @@ class Lark(Serialize): # This replaces the old 'resolve__antiscore_sum' option. if self.options.priority == 'invert': for rule in self.rules: - if rule.options and rule.options.priority is not None: + if rule.options.priority is not None: rule.options.priority = -rule.options.priority # Else, if the user asked to disable priorities, strip them from the # rules. This allows the Earley parsers to skip an extra forest walk # for improved performance, if you don't need them (or didn't specify any). elif self.options.priority == None: for rule in self.rules: - if rule.options and rule.options.priority is not None: + if rule.options.priority is not None: rule.options.priority = None self.lexer_conf = LexerConf(self.terminals, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks) diff --git a/lark/grammar.py b/lark/grammar.py index 91435b2..cf8cf64 100644 --- a/lark/grammar.py +++ b/lark/grammar.py @@ -81,7 +81,7 @@ class Rule(Serialize): self.expansion = expansion self.alias = alias self.order = order - self.options = options + self.options = options or RuleOptions() self._hash = hash((self.origin, tuple(self.expansion))) def _deserialize(self): diff --git a/lark/lark.py b/lark/lark.py index 6e51914..36cb4b6 100644 --- a/lark/lark.py +++ b/lark/lark.py @@ -187,14 +187,14 @@ class Lark(Serialize): # This replaces the old 'resolve__antiscore_sum' option. if self.options.priority == 'invert': for rule in self.rules: - if rule.options and rule.options.priority is not None: + if rule.options.priority is not None: rule.options.priority = -rule.options.priority # Else, if the user asked to disable priorities, strip them from the # rules. This allows the Earley parsers to skip an extra forest walk # for improved performance, if you don't need them (or didn't specify any). elif self.options.priority == None: for rule in self.rules: - if rule.options and rule.options.priority is not None: + if rule.options.priority is not None: rule.options.priority = None # TODO Deprecate lexer_callbacks? diff --git a/lark/load_grammar.py b/lark/load_grammar.py index bb8fc2f..2cd834c 100644 --- a/lark/load_grammar.py +++ b/lark/load_grammar.py @@ -503,7 +503,7 @@ class Grammar: ebnf_to_bnf = EBNF_to_BNF() rules = [] for name, rule_tree, options in rule_defs: - ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None + ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options.keep_all_tokens else None tree = transformer.transform(rule_tree) res = ebnf_to_bnf.transform(tree) rules.append((name, res, options)) @@ -527,7 +527,7 @@ class Grammar: empty_indices = [x==_EMPTY for x in expansion] if any(empty_indices): - exp_options = copy(options) if options else RuleOptions() + exp_options = copy(options) exp_options.empty_indices = empty_indices expansion = [x for x in expansion if x!=_EMPTY] else: diff --git a/lark/parse_tree_builder.py b/lark/parse_tree_builder.py index 6d298f4..4ee0071 100644 --- a/lark/parse_tree_builder.py +++ b/lark/parse_tree_builder.py @@ -225,12 +225,12 @@ class ParseTreeBuilder: def _init_builders(self, rules): for rule in rules: options = rule.options - keep_all_tokens = self.always_keep_all_tokens or (options.keep_all_tokens if options else False) - expand_single_child = options.expand1 if options else False + keep_all_tokens = self.always_keep_all_tokens or options.keep_all_tokens + expand_single_child = options.expand1 wrapper_chain = list(filter(None, [ (expand_single_child and not rule.alias) and ExpandSingleChild, - maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders and options else None), + maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), self.propagate_positions and PropagatePositions, self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), ])) diff --git a/lark/parsers/cyk.py b/lark/parsers/cyk.py index 7b25609..ff0924f 100644 --- a/lark/parsers/cyk.py +++ b/lark/parsers/cyk.py @@ -96,7 +96,7 @@ class Parser(object): assert all(isinstance(x, Symbol) for x in lark_rule.expansion) return Rule( lark_rule.origin, lark_rule.expansion, - weight=lark_rule.options.priority if lark_rule.options and lark_rule.options.priority else 0, + weight=lark_rule.options.priority if lark_rule.options.priority else 0, alias=lark_rule) def parse(self, tokenized, start): # pylint: disable=invalid-name diff --git a/lark/parsers/earley.py b/lark/parsers/earley.py index e18d26c..a4ffead 100644 --- a/lark/parsers/earley.py +++ b/lark/parsers/earley.py @@ -45,7 +45,7 @@ class Parser: # the priorities will be stripped from all rules before they reach us, allowing us to # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities # on any rules. - if self.forest_sum_visitor is None and rule.options and rule.options.priority is not None: + if self.forest_sum_visitor is None and rule.options.priority is not None: self.forest_sum_visitor = ForestSumVisitor self.term_matcher = term_matcher diff --git a/lark/parsers/earley_forest.py b/lark/parsers/earley_forest.py index e6179e6..c8b4f25 100644 --- a/lark/parsers/earley_forest.py +++ b/lark/parsers/earley_forest.py @@ -250,7 +250,7 @@ class ForestSumVisitor(ForestVisitor): return iter(node.children) def visit_packed_node_out(self, node): - priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options and node.rule.options.priority else 0 + priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options.priority else 0 priority += getattr(node.right, 'priority', 0) priority += getattr(node.left, 'priority', 0) node.priority = priority diff --git a/lark/parsers/lalr_parser.py b/lark/parsers/lalr_parser.py index 82c8bba..4265ca5 100644 --- a/lark/parsers/lalr_parser.py +++ b/lark/parsers/lalr_parser.py @@ -12,8 +12,7 @@ from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable ###{standalone class LALR_Parser(object): def __init__(self, parser_conf, debug=False): - assert all(r.options is None or r.options.priority is None - for r in parser_conf.rules), "LALR doesn't yet support prioritization" + assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization" analysis = LALR_Analyzer(parser_conf, debug=debug) analysis.compute_lalr() callbacks = parser_conf.callbacks diff --git a/lark/reconstruct.py b/lark/reconstruct.py index b7a6659..bd7b6a0 100644 --- a/lark/reconstruct.py +++ b/lark/reconstruct.py @@ -108,7 +108,7 @@ class Reconstructor: self._match, resolve_ambiguity=True) def _build_recons_rules(self, rules): - expand1s = {r.origin for r in rules if r.options and r.options.expand1} + expand1s = {r.origin for r in rules if r.options.expand1} aliases = defaultdict(list) for r in rules: