This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

198 lines
6.8 KiB

  1. import re
  2. from functools import partial
  3. from .utils import get_regexp_width, Serialize
  4. from .parsers.grammar_analysis import GrammarAnalyzer
  5. from .lexer import TraditionalLexer, ContextualLexer, Lexer, Token
  6. from .parsers import earley, xearley, cyk
  7. from .parsers.lalr_parser import LALR_Parser
  8. from .grammar import Rule
  9. from .tree import Tree
  10. ###{standalone
  11. def get_frontend(parser, lexer):
  12. if parser=='lalr':
  13. if lexer is None:
  14. raise ValueError('The LALR parser requires use of a lexer')
  15. elif lexer == 'standard':
  16. return LALR_TraditionalLexer
  17. elif lexer == 'contextual':
  18. return LALR_ContextualLexer
  19. elif issubclass(lexer, Lexer):
  20. return partial(LALR_CustomLexer, lexer)
  21. else:
  22. raise ValueError('Unknown lexer: %s' % lexer)
  23. elif parser=='earley':
  24. if lexer=='standard':
  25. return Earley
  26. elif lexer=='dynamic':
  27. return XEarley
  28. elif lexer=='dynamic_complete':
  29. return XEarley_CompleteLex
  30. elif lexer=='contextual':
  31. raise ValueError('The Earley parser does not support the contextual parser')
  32. else:
  33. raise ValueError('Unknown lexer: %s' % lexer)
  34. elif parser == 'cyk':
  35. if lexer == 'standard':
  36. return CYK
  37. else:
  38. raise ValueError('CYK parser requires using standard parser.')
  39. else:
  40. raise ValueError('Unknown parser: %s' % parser)
  41. class WithLexer(Serialize):
  42. lexer = None
  43. parser = None
  44. lexer_conf = None
  45. __serialize_fields__ = 'parser', 'lexer'
  46. __serialize_namespace__ = Rule, ContextualLexer, TraditionalLexer
  47. @classmethod
  48. def deserialize(cls, data, memo, callbacks, postlex):
  49. inst = super(WithLexer, cls).deserialize(data, memo)
  50. inst.postlex = postlex
  51. inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks)
  52. return inst
  53. def _serialize(self, data, memo):
  54. data['parser'] = data['parser'].serialize(memo)
  55. def init_traditional_lexer(self, lexer_conf):
  56. self.lexer_conf = lexer_conf
  57. self.lexer = TraditionalLexer(lexer_conf.tokens, ignore=lexer_conf.ignore, user_callbacks=lexer_conf.callbacks)
  58. self.postlex = lexer_conf.postlex
  59. def init_contextual_lexer(self, lexer_conf):
  60. self.lexer_conf = lexer_conf
  61. self.postlex = lexer_conf.postlex
  62. states = {idx:list(t.keys()) for idx, t in self.parser._parse_table.states.items()}
  63. always_accept = self.postlex.always_accept if self.postlex else ()
  64. self.lexer = ContextualLexer(lexer_conf.tokens, states,
  65. ignore=lexer_conf.ignore,
  66. always_accept=always_accept,
  67. user_callbacks=lexer_conf.callbacks)
  68. def lex(self, text):
  69. stream = self.lexer.lex(text)
  70. return self.postlex.process(stream) if self.postlex else stream
  71. def parse(self, text):
  72. token_stream = self.lex(text)
  73. sps = self.lexer.set_parser_state
  74. return self.parser.parse(token_stream, *[sps] if sps is not NotImplemented else [])
  75. class LALR_TraditionalLexer(WithLexer):
  76. def __init__(self, lexer_conf, parser_conf, options=None):
  77. debug = options.debug if options else False
  78. self.parser = LALR_Parser(parser_conf, debug=debug)
  79. self.init_traditional_lexer(lexer_conf)
  80. class LALR_ContextualLexer(WithLexer):
  81. def __init__(self, lexer_conf, parser_conf, options=None):
  82. debug = options.debug if options else False
  83. self.parser = LALR_Parser(parser_conf, debug=debug)
  84. self.init_contextual_lexer(lexer_conf)
  85. ###}
  86. class LALR_CustomLexer(WithLexer):
  87. def __init__(self, lexer_cls, lexer_conf, parser_conf, options=None):
  88. self.parser = LALR_Parser(parser_conf)
  89. self.lexer_conf = lexer_conf
  90. self.lexer = lexer_cls(lexer_conf)
  91. def tokenize_text(text):
  92. line = 1
  93. col_start_pos = 0
  94. for i, ch in enumerate(text):
  95. if '\n' in ch:
  96. line += ch.count('\n')
  97. col_start_pos = i + ch.rindex('\n')
  98. yield Token('CHAR', ch, line=line, column=i - col_start_pos)
  99. class Earley(WithLexer):
  100. def __init__(self, lexer_conf, parser_conf, options=None):
  101. self.init_traditional_lexer(lexer_conf)
  102. resolve_ambiguity = options.ambiguity == 'resolve'
  103. self.parser = earley.Parser(parser_conf, self.match, resolve_ambiguity=resolve_ambiguity)
  104. def match(self, term, token):
  105. return term.name == token.type
  106. class XEarley:
  107. def __init__(self, lexer_conf, parser_conf, options=None, **kw):
  108. self.token_by_name = {t.name:t for t in lexer_conf.tokens}
  109. self._prepare_match(lexer_conf)
  110. resolve_ambiguity = options.ambiguity == 'resolve'
  111. self.parser = xearley.Parser(parser_conf,
  112. self.match,
  113. ignore=lexer_conf.ignore,
  114. resolve_ambiguity=resolve_ambiguity,
  115. **kw
  116. )
  117. def match(self, term, text, index=0):
  118. return self.regexps[term.name].match(text, index)
  119. def _prepare_match(self, lexer_conf):
  120. self.regexps = {}
  121. for t in lexer_conf.tokens:
  122. if t.priority != 1:
  123. raise ValueError("Dynamic Earley doesn't support weights on terminals", t, t.priority)
  124. regexp = t.pattern.to_regexp()
  125. try:
  126. width = get_regexp_width(regexp)[0]
  127. except ValueError:
  128. raise ValueError("Bad regexp in token %s: %s" % (t.name, regexp))
  129. else:
  130. if width == 0:
  131. raise ValueError("Dynamic Earley doesn't allow zero-width regexps", t)
  132. self.regexps[t.name] = re.compile(regexp)
  133. def parse(self, text):
  134. return self.parser.parse(text)
  135. class XEarley_CompleteLex(XEarley):
  136. def __init__(self, *args, **kw):
  137. XEarley.__init__(self, *args, complete_lex=True, **kw)
  138. class CYK(WithLexer):
  139. def __init__(self, lexer_conf, parser_conf, options=None):
  140. self.init_traditional_lexer(lexer_conf)
  141. self._analysis = GrammarAnalyzer(parser_conf)
  142. self._parser = cyk.Parser(parser_conf.rules, parser_conf.start)
  143. self.callbacks = parser_conf.callbacks
  144. def parse(self, text):
  145. tokens = list(self.lex(text))
  146. parse = self._parser.parse(tokens)
  147. parse = self._transform(parse)
  148. return parse
  149. def _transform(self, tree):
  150. subtrees = list(tree.iter_subtrees())
  151. for subtree in subtrees:
  152. subtree.children = [self._apply_callback(c) if isinstance(c, Tree) else c for c in subtree.children]
  153. return self._apply_callback(tree)
  154. def _apply_callback(self, tree):
  155. return self.callbacks[tree.rule](tree.children)