This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

332 lines
15 KiB

  1. """This module implements an scanerless Earley parser.
  2. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here:
  3. https://www.sciencedirect.com/science/article/pii/S1571066108001497
  4. That is probably the best reference for understanding the algorithm here.
  5. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format
  6. is better documented here:
  7. http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/
  8. """
  9. from collections import deque
  10. from ..tree import Tree
  11. from ..visitors import Transformer_InPlace, v_args
  12. from ..exceptions import UnexpectedEOF, UnexpectedToken
  13. from ..utils import logger
  14. from .grammar_analysis import GrammarAnalyzer
  15. from ..grammar import NonTerminal
  16. from .earley_common import Item, TransitiveItem
  17. from .earley_forest import ForestSumVisitor, SymbolNode, ForestToParseTree
  18. class Parser:
  19. def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=False, tree_class=Tree):
  20. analysis = GrammarAnalyzer(parser_conf)
  21. self.parser_conf = parser_conf
  22. self.resolve_ambiguity = resolve_ambiguity
  23. self.debug = debug
  24. self.tree_class = tree_class
  25. self.FIRST = analysis.FIRST
  26. self.NULLABLE = analysis.NULLABLE
  27. self.callbacks = parser_conf.callbacks
  28. self.predictions = {}
  29. ## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than
  30. # the slow 'isupper' in is_terminal.
  31. self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term }
  32. self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term }
  33. self.forest_sum_visitor = None
  34. for rule in parser_conf.rules:
  35. if rule.origin not in self.predictions:
  36. self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)]
  37. ## Detect if any rules have priorities set. If the user specified priority = "none" then
  38. # the priorities will be stripped from all rules before they reach us, allowing us to
  39. # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities
  40. # on any rules.
  41. if self.forest_sum_visitor is None and rule.options.priority is not None:
  42. self.forest_sum_visitor = ForestSumVisitor
  43. self.term_matcher = term_matcher
  44. def predict_and_complete(self, i, to_scan, columns, transitives):
  45. """The core Earley Predictor and Completer.
  46. At each stage of the input, we handling any completed items (things
  47. that matched on the last cycle) and use those to predict what should
  48. come next in the input stream. The completions and any predicted
  49. non-terminals are recursively processed until we reach a set of,
  50. which can be added to the scan list for the next scanner cycle."""
  51. # Held Completions (H in E.Scotts paper).
  52. node_cache = {}
  53. held_completions = {}
  54. column = columns[i]
  55. # R (items) = Ei (column.items)
  56. items = deque(column)
  57. while items:
  58. item = items.pop() # remove an element, A say, from R
  59. ### The Earley completer
  60. if item.is_complete: ### (item.s == string)
  61. if item.node is None:
  62. label = (item.s, item.start, i)
  63. item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  64. item.node.add_family(item.s, item.rule, item.start, None, None)
  65. # create_leo_transitives(item.rule.origin, item.start)
  66. ###R Joop Leo right recursion Completer
  67. if item.rule.origin in transitives[item.start]:
  68. transitive = transitives[item.start][item.s]
  69. if transitive.previous in transitives[transitive.column]:
  70. root_transitive = transitives[transitive.column][transitive.previous]
  71. else:
  72. root_transitive = transitive
  73. new_item = Item(transitive.rule, transitive.ptr, transitive.start)
  74. label = (root_transitive.s, root_transitive.start, i)
  75. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  76. new_item.node.add_path(root_transitive, item.node)
  77. if new_item.expect in self.TERMINALS:
  78. # Add (B :: aC.B, h, y) to Q
  79. to_scan.add(new_item)
  80. elif new_item not in column:
  81. # Add (B :: aC.B, h, y) to Ei and R
  82. column.add(new_item)
  83. items.append(new_item)
  84. ###R Regular Earley completer
  85. else:
  86. # Empty has 0 length. If we complete an empty symbol in a particular
  87. # parse step, we need to be able to use that same empty symbol to complete
  88. # any predictions that result, that themselves require empty. Avoids
  89. # infinite recursion on empty symbols.
  90. # held_completions is 'H' in E.Scott's paper.
  91. is_empty_item = item.start == i
  92. if is_empty_item:
  93. held_completions[item.rule.origin] = item.node
  94. originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s]
  95. for originator in originators:
  96. new_item = originator.advance()
  97. label = (new_item.s, originator.start, i)
  98. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  99. new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node)
  100. if new_item.expect in self.TERMINALS:
  101. # Add (B :: aC.B, h, y) to Q
  102. to_scan.add(new_item)
  103. elif new_item not in column:
  104. # Add (B :: aC.B, h, y) to Ei and R
  105. column.add(new_item)
  106. items.append(new_item)
  107. ### The Earley predictor
  108. elif item.expect in self.NON_TERMINALS: ### (item.s == lr0)
  109. new_items = []
  110. for rule in self.predictions[item.expect]:
  111. new_item = Item(rule, 0, i)
  112. new_items.append(new_item)
  113. # Process any held completions (H).
  114. if item.expect in held_completions:
  115. new_item = item.advance()
  116. label = (new_item.s, item.start, i)
  117. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  118. new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect])
  119. new_items.append(new_item)
  120. for new_item in new_items:
  121. if new_item.expect in self.TERMINALS:
  122. to_scan.add(new_item)
  123. elif new_item not in column:
  124. column.add(new_item)
  125. items.append(new_item)
  126. def _parse(self, stream, columns, to_scan, start_symbol=None):
  127. def is_quasi_complete(item):
  128. if item.is_complete:
  129. return True
  130. quasi = item.advance()
  131. while not quasi.is_complete:
  132. if quasi.expect not in self.NULLABLE:
  133. return False
  134. if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
  135. return False
  136. quasi = quasi.advance()
  137. return True
  138. def create_leo_transitives(origin, start):
  139. visited = set()
  140. to_create = []
  141. trule = None
  142. previous = None
  143. ### Recursively walk backwards through the Earley sets until we find the
  144. # first transitive candidate. If this is done continuously, we shouldn't
  145. # have to walk more than 1 hop.
  146. while True:
  147. if origin in transitives[start]:
  148. previous = trule = transitives[start][origin]
  149. break
  150. is_empty_rule = not self.FIRST[origin]
  151. if is_empty_rule:
  152. break
  153. candidates = [ candidate for candidate in columns[start] if candidate.expect is not None and origin == candidate.expect ]
  154. if len(candidates) != 1:
  155. break
  156. originator = next(iter(candidates))
  157. if originator is None or originator in visited:
  158. break
  159. visited.add(originator)
  160. if not is_quasi_complete(originator):
  161. break
  162. trule = originator.advance()
  163. if originator.start != start:
  164. visited.clear()
  165. to_create.append((origin, start, originator))
  166. origin = originator.rule.origin
  167. start = originator.start
  168. # If a suitable Transitive candidate is not found, bail.
  169. if trule is None:
  170. return
  171. #### Now walk forwards and create Transitive Items in each set we walked through; and link
  172. # each transitive item to the next set forwards.
  173. while to_create:
  174. origin, start, originator = to_create.pop()
  175. titem = None
  176. if previous is not None:
  177. titem = previous.next_titem = TransitiveItem(origin, trule, originator, previous.column)
  178. else:
  179. titem = TransitiveItem(origin, trule, originator, start)
  180. previous = transitives[start][origin] = titem
  181. def scan(i, token, to_scan):
  182. """The core Earley Scanner.
  183. This is a custom implementation of the scanner that uses the
  184. Lark lexer to match tokens. The scan list is built by the
  185. Earley predictor, based on the previously completed tokens.
  186. This ensures that at each phase of the parse we have a custom
  187. lexer context, allowing for more complex ambiguities."""
  188. next_to_scan = set()
  189. next_set = set()
  190. columns.append(next_set)
  191. transitives.append({})
  192. node_cache = {}
  193. for item in set(to_scan):
  194. if match(item.expect, token):
  195. new_item = item.advance()
  196. label = (new_item.s, new_item.start, i)
  197. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  198. new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
  199. if new_item.expect in self.TERMINALS:
  200. # add (B ::= Aai+1.B, h, y) to Q'
  201. next_to_scan.add(new_item)
  202. else:
  203. # add (B ::= Aa+1.B, h, y) to Ei+1
  204. next_set.add(new_item)
  205. if not next_set and not next_to_scan:
  206. expect = {i.expect.name for i in to_scan}
  207. raise UnexpectedToken(token, expect, considered_rules = set(to_scan))
  208. return next_to_scan
  209. # Define parser functions
  210. match = self.term_matcher
  211. # Cache for nodes & tokens created in a particular parse step.
  212. transitives = [{}]
  213. ## The main Earley loop.
  214. # Run the Prediction/Completion cycle for any Items in the current Earley set.
  215. # Completions will be added to the SPPF tree, and predictions will be recursively
  216. # processed down to terminals/empty nodes to be added to the scanner for the next
  217. # step.
  218. i = 0
  219. for token in stream:
  220. self.predict_and_complete(i, to_scan, columns, transitives)
  221. to_scan = scan(i, token, to_scan)
  222. i += 1
  223. self.predict_and_complete(i, to_scan, columns, transitives)
  224. ## Column is now the final column in the parse.
  225. assert i == len(columns)-1
  226. return to_scan
  227. def parse(self, stream, start):
  228. assert start, start
  229. start_symbol = NonTerminal(start)
  230. columns = [set()]
  231. to_scan = set() # The scan buffer. 'Q' in E.Scott's paper.
  232. ## Predict for the start_symbol.
  233. # Add predicted items to the first Earley set (for the predictor) if they
  234. # result in a non-terminal, or the scanner if they result in a terminal.
  235. for rule in self.predictions[start_symbol]:
  236. item = Item(rule, 0, 0)
  237. if item.expect in self.TERMINALS:
  238. to_scan.add(item)
  239. else:
  240. columns[0].add(item)
  241. to_scan = self._parse(stream, columns, to_scan, start_symbol)
  242. # If the parse was successful, the start
  243. # symbol should have been completed in the last step of the Earley cycle, and will be in
  244. # this column. Find the item for the start_symbol, which is the root of the SPPF tree.
  245. solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0]
  246. if self.debug:
  247. from .earley_forest import ForestToPyDotVisitor
  248. try:
  249. debug_walker = ForestToPyDotVisitor()
  250. except ImportError:
  251. logger.warning("Cannot find dependency 'pydot', will not generate sppf debug image")
  252. else:
  253. debug_walker.visit(solutions[0], "sppf.png")
  254. if not solutions:
  255. expected_tokens = [t.expect for t in to_scan]
  256. raise UnexpectedEOF(expected_tokens)
  257. elif len(solutions) > 1:
  258. assert False, 'Earley should not generate multiple start symbol items!'
  259. if self.tree_class is not None:
  260. # Perform our SPPF -> AST conversion
  261. transformer = ForestToParseTree(self.tree_class, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity)
  262. return transformer.transform(solutions[0])
  263. # return the root of the SPPF
  264. return solutions[0]
  265. class ApplyCallbacks(Transformer_InPlace):
  266. def __init__(self, postprocess):
  267. self.postprocess = postprocess
  268. @v_args(meta=True)
  269. def drv(self, children, meta):
  270. return self.postprocess[meta.rule](children)