This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

202 lines
9.0 KiB

  1. """This module implements an experimental Earley parser with a dynamic lexer
  2. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here:
  3. https://www.sciencedirect.com/science/article/pii/S1571066108001497
  4. That is probably the best reference for understanding the algorithm here.
  5. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format
  6. is better documented here:
  7. http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/
  8. Instead of running a lexer beforehand, or using a costy char-by-char method, this parser
  9. uses regular expressions by necessity, achieving high-performance while maintaining all of
  10. Earley's power in parsing any CFG.
  11. """
  12. # Author: Erez Shinan (2017)
  13. # Email : erezshin@gmail.com
  14. from collections import defaultdict, deque
  15. from ..exceptions import ParseError, UnexpectedCharacters
  16. from ..lexer import Token
  17. from .grammar_analysis import GrammarAnalyzer
  18. from ..grammar import NonTerminal, Terminal
  19. from .earley import ApplyCallbacks, Parser as BaseParser
  20. from .earley_common import Item, TransitiveItem
  21. from .earley_forest import ForestToTreeVisitor, ForestToAmbiguousTreeVisitor, ForestSumVisitor, ForestToPyDotVisitor, SymbolNode
  22. class Parser(BaseParser):
  23. def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False):
  24. BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity)
  25. self.ignore = [Terminal(t) for t in ignore]
  26. self.complete_lex = complete_lex
  27. def parse(self, stream, start_symbol=None):
  28. start_symbol = NonTerminal(start_symbol or self.parser_conf.start)
  29. delayed_matches = defaultdict(list)
  30. match = self.term_matcher
  31. # Cache for nodes & tokens created in a particular parse step.
  32. token_cache = {}
  33. columns = []
  34. transitives = []
  35. text_line = 1
  36. text_column = 1
  37. def is_quasi_complete(item):
  38. if item.is_complete:
  39. return True
  40. quasi = item.advance()
  41. while not quasi.is_complete:
  42. if quasi.expect not in self.NULLABLE:
  43. return False
  44. if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
  45. return False
  46. quasi = quasi.advance()
  47. return True
  48. def scan(i, to_scan):
  49. """The core Earley Scanner.
  50. This is a custom implementation of the scanner that uses the
  51. Lark lexer to match tokens. The scan list is built by the
  52. Earley predictor, based on the previously completed tokens.
  53. This ensures that at each phase of the parse we have a custom
  54. lexer context, allowing for more complex ambiguities."""
  55. node_cache = {}
  56. # 1) Loop the expectations and ask the lexer to match.
  57. # Since regexp is forward looking on the input stream, and we only
  58. # want to process tokens when we hit the point in the stream at which
  59. # they complete, we push all tokens into a buffer (delayed_matches), to
  60. # be held possibly for a later parse step when we reach the point in the
  61. # input stream at which they complete.
  62. for item in set(to_scan):
  63. m = match(item.expect, stream, i)
  64. if m:
  65. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  66. delayed_matches[m.end()].append( (item, i, t) )
  67. if self.complete_lex:
  68. s = m.group(0)
  69. for j in range(1, len(s)):
  70. m = match(item.expect, s[:-j])
  71. if m:
  72. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  73. delayed_matches[i+m.end()].append( (item, i, t) )
  74. # Remove any items that successfully matched in this pass from the to_scan buffer.
  75. # This ensures we don't carry over tokens that already matched, if we're ignoring below.
  76. to_scan.remove(item)
  77. # 3) Process any ignores. This is typically used for e.g. whitespace.
  78. # We carry over any unmatched items from the to_scan buffer to be matched again after
  79. # the ignore. This should allow us to use ignored symbols in non-terminals to implement
  80. # e.g. mandatory spacing.
  81. for x in self.ignore:
  82. m = match(x, stream, i)
  83. if m:
  84. # Carry over any items still in the scan buffer, to past the end of the ignored items.
  85. delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ])
  86. # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed.
  87. delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol])
  88. next_to_scan = set()
  89. next_set = set()
  90. columns.append(next_set)
  91. next_transitives = dict()
  92. transitives.append(next_transitives)
  93. ## 4) Process Tokens from delayed_matches.
  94. # This is the core of the Earley scanner. Create an SPPF node for each Token,
  95. # and create the symbol node in the SPPF tree. Advance the item that completed,
  96. # and add the resulting new item to either the Earley set (for processing by the
  97. # completer/predictor) or the to_scan buffer for the next parse step.
  98. for item, start, token in delayed_matches[i+1]:
  99. if token is not None:
  100. token.end_line = text_line
  101. token.end_column = text_column + 1
  102. new_item = item.advance()
  103. label = (new_item.s, new_item.start, i)
  104. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  105. new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
  106. else:
  107. new_item = item
  108. if new_item.expect in self.TERMINALS:
  109. # add (B ::= Aai+1.B, h, y) to Q'
  110. next_to_scan.add(new_item)
  111. else:
  112. # add (B ::= Aa+1.B, h, y) to Ei+1
  113. next_set.add(new_item)
  114. del delayed_matches[i+1] # No longer needed, so unburden memory
  115. if not next_set and not delayed_matches and not next_to_scan:
  116. raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect for item in to_scan}, set(to_scan))
  117. return next_to_scan
  118. # Main loop starts
  119. columns.append(set())
  120. transitives.append(dict())
  121. ## The scan buffer. 'Q' in E.Scott's paper.
  122. to_scan = set()
  123. ## Predict for the start_symbol.
  124. # Add predicted items to the first Earley set (for the predictor) if they
  125. # result in a non-terminal, or the scanner if they result in a terminal.
  126. for rule in self.predictions[start_symbol]:
  127. item = Item(rule, 0, 0)
  128. if item.expect in self.TERMINALS:
  129. to_scan.add(item)
  130. else:
  131. columns[0].add(item)
  132. ## The main Earley loop.
  133. # Run the Prediction/Completion cycle for any Items in the current Earley set.
  134. # Completions will be added to the SPPF tree, and predictions will be recursively
  135. # processed down to terminals/empty nodes to be added to the scanner for the next
  136. # step.
  137. i = 0
  138. for token in stream:
  139. self.predict_and_complete(i, to_scan, columns, transitives)
  140. # Clear the node_cache and token_cache, which are only relevant for each
  141. # step in the Earley pass.
  142. token_cache.clear()
  143. to_scan = scan(i, to_scan)
  144. if token == '\n':
  145. text_line += 1
  146. text_column = 1
  147. else:
  148. text_column += 1
  149. i += 1
  150. self.predict_and_complete(i, to_scan, columns, transitives)
  151. ## Column is now the final column in the parse. If the parse was successful, the start
  152. # symbol should have been completed in the last step of the Earley cycle, and will be in
  153. # this column. Find the item for the start_symbol, which is the root of the SPPF tree.
  154. solutions = [n.node for n in columns[i] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0]
  155. if not solutions:
  156. expected_tokens = [t.expect for t in to_scan]
  157. raise ParseError('Unexpected end of input! Expecting a terminal of: %s' % expected_tokens)
  158. elif len(solutions) > 1:
  159. raise Exception('Earley should not generate more than one start symbol - bug')
  160. # Perform our SPPF -> AST conversion using the right ForestVisitor.
  161. return self.forest_tree_visitor.go(solutions[0])