This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

378 lines
18 KiB

  1. """This module implements an experimental Earley parser with a dynamic lexer
  2. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here:
  3. https://www.sciencedirect.com/science/article/pii/S1571066108001497
  4. That is probably the best reference for understanding the algorithm here.
  5. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format
  6. is better documented here:
  7. http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/
  8. Instead of running a lexer beforehand, or using a costy char-by-char method, this parser
  9. uses regular expressions by necessity, achieving high-performance while maintaining all of
  10. Earley's power in parsing any CFG.
  11. """
  12. # Author: Erez Shinan (2017)
  13. # Email : erezshin@gmail.com
  14. from collections import defaultdict, deque
  15. from ..exceptions import ParseError, UnexpectedCharacters
  16. from ..lexer import Token
  17. from .grammar_analysis import GrammarAnalyzer
  18. from ..grammar import NonTerminal, Terminal
  19. from .earley import ApplyCallbacks
  20. from .earley_common import Item, TransitiveItem
  21. from .earley_forest import ForestToTreeVisitor, ForestToAmbiguousTreeVisitor, ForestSumVisitor, ForestToPyDotVisitor, SymbolNode
  22. class Parser:
  23. def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False):
  24. analysis = GrammarAnalyzer(parser_conf)
  25. self.parser_conf = parser_conf
  26. self.resolve_ambiguity = resolve_ambiguity
  27. self.ignore = [Terminal(t) for t in ignore]
  28. self.complete_lex = complete_lex
  29. self.FIRST = analysis.FIRST
  30. self.NULLABLE = analysis.NULLABLE
  31. self.callbacks = {}
  32. self.predictions = {}
  33. ## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than
  34. # the slow 'isupper' in is_terminal; or even called sym.is_term directly.
  35. self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term }
  36. self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term }
  37. self.forest_sum_visitor = None
  38. for rule in parser_conf.rules:
  39. self.callbacks[rule] = getattr(parser_conf.callback, rule.alias or rule.origin, None)
  40. self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)]
  41. ## Detect if any rules have priorities set. If the user specified priority = "none" then
  42. # the priorities will be stripped from all rules before they reach us, allowing us to
  43. # skip the extra tree walk.
  44. if self.forest_sum_visitor is None and rule.options and rule.options.priority is not None:
  45. self.forest_sum_visitor = ForestSumVisitor()
  46. if resolve_ambiguity:
  47. self.forest_tree_visitor = ForestToTreeVisitor(self.callbacks, self.forest_sum_visitor)
  48. else:
  49. self.forest_tree_visitor = ForestToAmbiguousTreeVisitor(self.callbacks, self.forest_sum_visitor)
  50. self.term_matcher = term_matcher
  51. def parse(self, stream, start_symbol=None):
  52. start_symbol = NonTerminal(start_symbol or self.parser_conf.start)
  53. delayed_matches = defaultdict(list)
  54. match = self.term_matcher
  55. # Held Completions (H in E.Scotts paper).
  56. held_completions = {}
  57. # Cache for nodes & tokens created in a particular parse step.
  58. node_cache = {}
  59. token_cache = {}
  60. columns = []
  61. transitives = []
  62. text_line = 1
  63. text_column = 1
  64. def is_quasi_complete(item):
  65. if item.is_complete:
  66. return True
  67. quasi = item.advance()
  68. while not quasi.is_complete:
  69. if quasi.expect not in self.NULLABLE:
  70. return False
  71. if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
  72. return False
  73. quasi = quasi.advance()
  74. return True
  75. def create_leo_transitives(origin, start):
  76. visited = set()
  77. to_create = []
  78. trule = None
  79. previous = None
  80. ### Recursively walk backwards through the Earley sets until we find the
  81. # first transitive candidate. If this is done continuously, we shouldn't
  82. # have to walk more than 1 hop.
  83. while True:
  84. if origin in transitives[start]:
  85. previous = trule = transitives[start][origin]
  86. break
  87. is_empty_rule = not self.FIRST[origin]
  88. if is_empty_rule:
  89. break
  90. candidates = [ candidate for candidate in columns[start] if candidate.expect is not None and origin == candidate.expect ]
  91. if len(candidates) != 1:
  92. break
  93. originator = next(iter(candidates))
  94. if originator is None or originator in visited:
  95. break
  96. visited.add(originator)
  97. if not is_quasi_complete(originator):
  98. break
  99. trule = originator.advance()
  100. if originator.start != start:
  101. visited.clear()
  102. to_create.append((origin, start, originator))
  103. origin = originator.rule.origin
  104. start = originator.start
  105. # If a suitable Transitive candidate is not found, bail.
  106. if trule is None:
  107. return
  108. #### Now walk forwards and create Transitive Items in each set we walked through; and link
  109. # each transitive item to the next set forwards.
  110. while to_create:
  111. origin, start, originator = to_create.pop()
  112. titem = None
  113. if previous is not None:
  114. titem = previous.next_titem = TransitiveItem(origin, trule, originator, previous.column)
  115. else:
  116. titem = TransitiveItem(origin, trule, originator, start)
  117. previous = transitives[start][origin] = titem
  118. def predict_and_complete(i, to_scan):
  119. """The core Earley Predictor and Completer.
  120. At each stage of the input, we handling any completed items (things
  121. that matched on the last cycle) and use those to predict what should
  122. come next in the input stream. The completions and any predicted
  123. non-terminals are recursively processed until we reach a set of,
  124. which can be added to the scan list for the next scanner cycle."""
  125. held_completions.clear()
  126. column = columns[i]
  127. # R (items) = Ei (column.items)
  128. items = deque(column)
  129. while items:
  130. item = items.pop() # remove an element, A say, from R
  131. ### The Earley completer
  132. if item.is_complete: ### (item.s == string)
  133. if item.node is None:
  134. label = (item.s, item.start, i)
  135. item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  136. item.node.add_family(item.s, item.rule, item.start, None, None)
  137. # create_leo_transitives(item.rule.origin, item.start)
  138. ###R Joop Leo right recursion Completer
  139. if item.rule.origin in transitives[item.start]:
  140. transitive = transitives[item.start][item.s]
  141. if transitive.previous in transitives[transitive.column]:
  142. root_transitive = transitives[transitive.column][transitive.previous]
  143. else:
  144. root_transitive = transitive
  145. new_item = Item(transitive.rule, transitive.ptr, transitive.start)
  146. label = (root_transitive.s, root_transitive.start, i)
  147. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  148. new_item.node.add_path(root_transitive, item.node)
  149. if new_item.expect in self.TERMINALS:
  150. # Add (B :: aC.B, h, y) to Q
  151. to_scan.add(new_item)
  152. elif new_item not in column:
  153. # Add (B :: aC.B, h, y) to Ei and R
  154. column.add(new_item)
  155. items.append(new_item)
  156. ###R Regular Earley completer
  157. else:
  158. # Empty has 0 length. If we complete an empty symbol in a particular
  159. # parse step, we need to be able to use that same empty symbol to complete
  160. # any predictions that result, that themselves require empty. Avoids
  161. # infinite recursion on empty symbols.
  162. # held_completions is 'H' in E.Scott's paper.
  163. is_empty_item = item.start == i
  164. if is_empty_item:
  165. held_completions[item.rule.origin] = item.node
  166. originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s]
  167. for originator in originators:
  168. new_item = originator.advance()
  169. label = (new_item.s, originator.start, i)
  170. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  171. new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node)
  172. if new_item.expect in self.TERMINALS:
  173. # Add (B :: aC.B, h, y) to Q
  174. to_scan.add(new_item)
  175. elif new_item not in column:
  176. # Add (B :: aC.B, h, y) to Ei and R
  177. column.add(new_item)
  178. items.append(new_item)
  179. ### The Earley predictor
  180. elif item.expect in self.NON_TERMINALS: ### (item.s == lr0)
  181. new_items = []
  182. for rule in self.predictions[item.expect]:
  183. new_item = Item(rule, 0, i)
  184. new_items.append(new_item)
  185. # Process any held completions (H).
  186. if item.expect in held_completions:
  187. new_item = item.advance()
  188. label = (new_item.s, item.start, i)
  189. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  190. new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect])
  191. new_items.append(new_item)
  192. for new_item in new_items:
  193. if new_item.expect in self.TERMINALS:
  194. to_scan.add(new_item)
  195. elif new_item not in column:
  196. column.add(new_item)
  197. items.append(new_item)
  198. def scan(i, to_scan):
  199. """The core Earley Scanner.
  200. This is a custom implementation of the scanner that uses the
  201. Lark lexer to match tokens. The scan list is built by the
  202. Earley predictor, based on the previously completed tokens.
  203. This ensures that at each phase of the parse we have a custom
  204. lexer context, allowing for more complex ambiguities."""
  205. # 1) Loop the expectations and ask the lexer to match.
  206. # Since regexp is forward looking on the input stream, and we only
  207. # want to process tokens when we hit the point in the stream at which
  208. # they complete, we push all tokens into a buffer (delayed_matches), to
  209. # be held possibly for a later parse step when we reach the point in the
  210. # input stream at which they complete.
  211. for item in set(to_scan):
  212. m = match(item.expect, stream, i)
  213. if m:
  214. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  215. delayed_matches[m.end()].append( (item, i, t) )
  216. if self.complete_lex:
  217. s = m.group(0)
  218. for j in range(1, len(s)):
  219. m = match(item.expect, s[:-j])
  220. if m:
  221. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  222. delayed_matches[i+m.end()].append( (item, i, t) )
  223. # Remove any items that successfully matched in this pass from the to_scan buffer.
  224. # This ensures we don't carry over tokens that already matched, if we're ignoring below.
  225. to_scan.remove(item)
  226. # 3) Process any ignores. This is typically used for e.g. whitespace.
  227. # We carry over any unmatched items from the to_scan buffer to be matched again after
  228. # the ignore. This should allow us to use ignored symbols in non-terminals to implement
  229. # e.g. mandatory spacing.
  230. for x in self.ignore:
  231. m = match(x, stream, i)
  232. if m:
  233. # Carry over any items still in the scan buffer, to past the end of the ignored items.
  234. delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ])
  235. # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed.
  236. delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol])
  237. next_to_scan = set()
  238. next_set = set()
  239. columns.append(next_set)
  240. next_transitives = dict()
  241. transitives.append(next_transitives)
  242. ## 4) Process Tokens from delayed_matches.
  243. # This is the core of the Earley scanner. Create an SPPF node for each Token,
  244. # and create the symbol node in the SPPF tree. Advance the item that completed,
  245. # and add the resulting new item to either the Earley set (for processing by the
  246. # completer/predictor) or the to_scan buffer for the next parse step.
  247. for item, start, token in delayed_matches[i+1]:
  248. if token is not None:
  249. token.end_line = text_line
  250. token.end_column = text_column + 1
  251. new_item = item.advance()
  252. label = (new_item.s, new_item.start, i)
  253. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  254. new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
  255. else:
  256. new_item = item
  257. if new_item.expect in self.TERMINALS:
  258. # add (B ::= Aai+1.B, h, y) to Q'
  259. next_to_scan.add(new_item)
  260. else:
  261. # add (B ::= Aa+1.B, h, y) to Ei+1
  262. next_set.add(new_item)
  263. del delayed_matches[i+1] # No longer needed, so unburden memory
  264. if not next_set and not delayed_matches and not next_to_scan:
  265. raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect for item in to_scan}, set(to_scan))
  266. return next_to_scan
  267. # Main loop starts
  268. columns.append(set())
  269. transitives.append(dict())
  270. ## The scan buffer. 'Q' in E.Scott's paper.
  271. to_scan = set()
  272. ## Predict for the start_symbol.
  273. # Add predicted items to the first Earley set (for the predictor) if they
  274. # result in a non-terminal, or the scanner if they result in a terminal.
  275. for rule in self.predictions[start_symbol]:
  276. item = Item(rule, 0, 0)
  277. if item.expect in self.TERMINALS:
  278. to_scan.add(item)
  279. else:
  280. columns[0].add(item)
  281. ## The main Earley loop.
  282. # Run the Prediction/Completion cycle for any Items in the current Earley set.
  283. # Completions will be added to the SPPF tree, and predictions will be recursively
  284. # processed down to terminals/empty nodes to be added to the scanner for the next
  285. # step.
  286. i = 0
  287. for token in stream:
  288. predict_and_complete(i, to_scan)
  289. # Clear the node_cache and token_cache, which are only relevant for each
  290. # step in the Earley pass.
  291. node_cache.clear()
  292. token_cache.clear()
  293. node_cache.clear()
  294. to_scan = scan(i, to_scan)
  295. if token == '\n':
  296. text_line += 1
  297. text_column = 1
  298. else:
  299. text_column += 1
  300. i += 1
  301. predict_and_complete(i, to_scan)
  302. ## Column is now the final column in the parse. If the parse was successful, the start
  303. # symbol should have been completed in the last step of the Earley cycle, and will be in
  304. # this column. Find the item for the start_symbol, which is the root of the SPPF tree.
  305. solutions = [n.node for n in columns[i] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0]
  306. if not solutions:
  307. expected_tokens = [t.expect for t in to_scan]
  308. raise ParseError('Unexpected end of input! Expecting a terminal of: %s' % expected_tokens)
  309. elif len(solutions) > 1:
  310. raise Exception('Earley should not generate more than one start symbol - bug')
  311. # Perform our SPPF -> AST conversion using the right ForestVisitor.
  312. return self.forest_tree_visitor.go(solutions[0])