Преглед на файлове

Removed old versions of Earley parsers that are no longer used

tags/gm/2021-09-23T00Z/github.com--lark-parser-lark/0.5.1
Erez Shinan преди 6 години
родител
ревизия
eb007b297c
променени са 3 файла, в които са добавени 0 реда и са изтрити 336 реда
  1. +0
    -1
      lark/parsers/grammar_analysis.py
  2. +0
    -155
      lark/parsers/nearley.py
  3. +0
    -180
      lark/parsers/old_earley.py

+ 0
- 1
lark/parsers/grammar_analysis.py Целия файл

@@ -59,7 +59,6 @@ def calculate_sets(rules):

Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets"""
symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules}
symbols.add('$root') # what about other unused rules?

# foreach grammar rule X ::= Y(1) ... Y(k)
# if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then


+ 0
- 155
lark/parsers/nearley.py Целия файл

@@ -1,155 +0,0 @@
"My name is Earley"

from ..utils import classify
from ..common import ParseError, UnexpectedToken

try:
xrange
except NameError:
xrange = range

class MatchFailed(object):
pass

class AbortParseMatch(Exception):
pass


class Rule(object):
def __init__(self, name, symbols, postprocess):
self.name = name
self.symbols = symbols
self.postprocess = postprocess

class State(object):
def __init__(self, rule, expect, reference, data=None):
self.rule = rule
self.expect = expect
self.reference = reference
self.data = data or []

self.is_complete = (self.expect == len(self.rule.symbols))
if not self.is_complete:
self.expect_symbol = self.rule.symbols[self.expect]
self.is_terminal = isinstance(self.expect_symbol, tuple)
else:
self.is_terminal = False

def next_state(self, data):
return State(self.rule, self.expect+1, self.reference, self.data + [data])

def consume_terminal(self, inp):
if not self.is_complete and self.is_terminal:
# PORT: originally tests regexp

if self.expect_symbol[1] is not None:
match = self.expect_symbol[1].match(inp)
if match:
return self.next_state(inp)

elif self.expect_symbol[0] == inp.type:
return self.next_state(inp)

def consume_nonterminal(self, inp):
if not self.is_complete and not self.is_terminal:

if self.expect_symbol == inp:
return self.next_state(inp)

def process(self, location, ind, table, rules, added_rules):

if self.is_complete:
# Completed a rule
if self.rule.postprocess:
try:
self.data = self.rule.postprocess(self.data)
except AbortParseMatch:
self.data = MatchFailed

if self.data is not MatchFailed:
for s in table[self.reference]:
x = s.consume_nonterminal(self.rule.name)
if x:
x.data[-1] = self.data
x.epsilon_closure(location, ind, table)

else:
exp = self.rule.symbols[self.expect]
if isinstance(exp, tuple):
return

for r in rules[exp]:
assert r.name == exp
if r not in added_rules:
if r.symbols:
added_rules.add(r)
State(r, 0, location).epsilon_closure(location, ind, table)
else:
# Empty rule
new_copy = self.consume_nonterminal(r.name)
new_copy.data[-1] = r.postprocess([]) if r.postprocess else []

new_copy.epsilon_closure(location, ind, table)

def epsilon_closure(self, location, ind, table):
col = table[location]
col.append(self)

if not self.is_complete:
for i in xrange(ind):
state = col[i]
if state.is_complete and state.reference == location:
x = self.consume_nonterminal(state.rule.name)
if x:
x.data[-1] = state.data
x.epsilon_closure(location, ind, table)


class Parser(object):
def __init__(self, rules, start=None):
self.rules = [Rule(r['name'], r['symbols'], r.get('postprocess', None)) for r in rules]
self.rules_by_name = classify(self.rules, lambda r: r.name)
self.start = start or self.rules[0].name

def advance_to(self, table, added_rules):
n = len(table)-1
for w, s in enumerate(table[n]):
s.process(n, w, table, self.rules_by_name, added_rules)

def parse(self, stream):
initial_rules = set(self.rules_by_name[self.start])
table = [[State(r, 0, 0) for r in initial_rules]]
self.advance_to(table, initial_rules)

i = 0

while i < len(stream):
col = []

token = stream[i]
for s in table[-1]:
x = s.consume_terminal(token)
if x:
col.append(x)

if not col:
expected = {s.expect_symbol for s in table[-1] if s.is_terminal}
raise UnexpectedToken(stream[i], expected, stream, i)

table.append(col)
self.advance_to(table, set())

i += 1

res = list(self.finish(table))
if not res:
raise ParseError('Incomplete parse')
return res

def finish(self, table):
for t in table[-1]:
if (t.rule.name == self.start
and t.expect == len(t.rule.symbols)
and t.reference == 0
and t.data is not MatchFailed):
yield t.data

+ 0
- 180
lark/parsers/old_earley.py Целия файл

@@ -1,180 +0,0 @@
"This module implements an Earley Parser"

# The algorithm keeps track of each state set, using a corresponding Column instance.
# Column keeps track of new items using NewsList instances.
#
# Author: Erez Shinan (2017)
# Email : erezshin@gmail.com

from ..common import ParseError, UnexpectedToken, is_terminal
from .grammar_analysis import GrammarAnalyzer

class EndToken:
type = '$end'

END_TOKEN = EndToken()

class Item(object):
def __init__(self, rule, ptr, start, data):
self.rule = rule
self.ptr = ptr
self.start = start
self.data = data

@property
def expect(self):
return self.rule.expansion[self.ptr]

@property
def is_complete(self):
return self.ptr == len(self.rule.expansion)

def advance(self, data):
return Item(self.rule, self.ptr+1, self.start, self.data + [data])

def __eq__(self, other):
return self.start is other.start and self.ptr == other.ptr and self.rule == other.rule
def __hash__(self):
return hash((self.rule, self.ptr, id(self.start)))

def __repr__(self):
before = map(str, self.rule.expansion[:self.ptr])
after = map(str, self.rule.expansion[self.ptr:])
return '<(%d) %s : %s * %s>' % (id(self.start), self.rule.origin, ' '.join(before), ' '.join(after))


class NewsList(list):
"Keeps track of newly added items (append-only)"

def __init__(self, initial=None):
list.__init__(self, initial or [])
self.last_iter = 0

def get_news(self):
i = self.last_iter
self.last_iter = len(self)
return self[i:]


class Column:
"An entry in the table, aka Earley Chart"
def __init__(self):
self.to_reduce = NewsList()
self.to_predict = NewsList()
self.to_scan = NewsList()
self.item_count = 0

self.added = set()

def add(self, items):
"""Sort items into scan/predict/reduce newslists

Makes sure only unique items are added.
"""

added = self.added
for item in items:

if item.is_complete:

# (We must allow repetition of empty rules)
# if item.rule.expansion:

# This is an important test to avoid infinite-loops,
# For example for the rule:
# a: a | "b"
# If we can detect these cases statically, we can remove
# this test an gain a tiny performance boost
#
# if item in added:
# continue
# added.add(item)

self.to_reduce.append(item)
else:
if is_terminal(item.expect):
self.to_scan.append(item)
else:
if item in added:
continue
added.add(item)
self.to_predict.append(item)

self.item_count += 1 # Only count if actually added

def __nonzero__(self):
return bool(self.item_count)

class Parser:
def __init__(self, parser_conf):

self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start)
self.start = parser_conf.start

self.postprocess = {}
self.predictions = {}
for rule in self.analysis.rules:
if rule.origin != '$root': # XXX kinda ugly
a = rule.alias
self.postprocess[rule] = a if callable(a) else getattr(parser_conf.callback, a)
self.predictions[rule.origin] = [x.rule for x in self.analysis.expand_rule(rule.origin)]

def parse(self, stream, start=None):
# Define parser functions
start = start or self.start

def predict(nonterm, i):
assert not is_terminal(nonterm), nonterm
return [Item(rule, 0, i, []) for rule in self.predictions[nonterm]]

def complete(item):
name = item.rule.origin
item.data = self.postprocess[item.rule](item.data)
return [i.advance(item.data) for i in item.start.to_predict if i.expect == name]

def process_column(i, token, cur_set):
next_set = Column()

while True:
to_predict = {x.expect for x in cur_set.to_predict.get_news()
if x.ptr} # if not part of an already predicted batch
to_reduce = cur_set.to_reduce.get_news()
if not (to_predict or to_reduce):
break

for nonterm in to_predict:
cur_set.add( predict(nonterm, cur_set) )
for item in to_reduce:
cur_set.add( complete(item) )


if token is not END_TOKEN:
for item in cur_set.to_scan.get_news():
match = item.expect[0](token) if callable(item.expect[0]) else item.expect[0] == token.type
if match:
next_set.add([item.advance(stream[i])])

if not next_set and token is not END_TOKEN:
expect = {i.expect[-1] for i in cur_set.to_scan}
raise UnexpectedToken(token, expect, stream, i)

return cur_set, next_set

# Main loop starts
column0 = Column()
column0.add(predict(start, column0))

cur_set = column0
for i, char in enumerate(stream):
_, cur_set = process_column(i, char, cur_set)

last_set, _ = process_column(len(stream), END_TOKEN, cur_set)

# Parse ended. Now build a parse tree
solutions = [n.data for n in last_set.to_reduce
if n.rule.origin==start and n.start is column0]

if not solutions:
raise ParseError('Incomplete parse: Could not find a solution to input')

return solutions

Зареждане…
Отказ
Запис