Browse Source

More cleanup

tags/gm/2021-09-23T00Z/github.com--lark-parser-lark/0.6.6
Erez Sh 6 years ago
parent
commit
4de71293e5
2 changed files with 1 additions and 23 deletions
  1. +0
    -3
      lark/parsers/earley.py
  2. +1
    -20
      lark/parsers/xearley.py

+ 0
- 3
lark/parsers/earley.py View File

@@ -153,7 +153,6 @@ class Parser:
match = self.term_matcher

# Cache for nodes & tokens created in a particular parse step.
token_cache = {}
columns = []
transitives = []

@@ -285,8 +284,6 @@ class Parser:
for token in stream:
self.predict_and_complete(i, to_scan, columns, transitives)

# Clear the node_cache and token_cache, which are only relevant for each
# step in the Earley pass.
to_scan = scan(i, token, to_scan)
i += 1



+ 1
- 20
lark/parsers/xearley.py View File

@@ -39,27 +39,12 @@ class Parser(BaseParser):
match = self.term_matcher

# Cache for nodes & tokens created in a particular parse step.
token_cache = {}
columns = []
transitives = []

text_line = 1
text_column = 1

def is_quasi_complete(item):
if item.is_complete:
return True

quasi = item.advance()
while not quasi.is_complete:
if quasi.expect not in self.NULLABLE:
return False
if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
return False
quasi = quasi.advance()
return True


def scan(i, to_scan):
"""The core Earley Scanner.

@@ -111,8 +96,7 @@ class Parser(BaseParser):
next_to_scan = set()
next_set = set()
columns.append(next_set)
next_transitives = dict()
transitives.append(next_transitives)
transitives.append({})

## 4) Process Tokens from delayed_matches.
# This is the core of the Earley scanner. Create an SPPF node for each Token,
@@ -171,9 +155,6 @@ class Parser(BaseParser):
for token in stream:
self.predict_and_complete(i, to_scan, columns, transitives)

# Clear the node_cache and token_cache, which are only relevant for each
# step in the Earley pass.
token_cache.clear()
to_scan = scan(i, to_scan)

if token == '\n':


Loading…
Cancel
Save