This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

200 lines
6.7 KiB

  1. import re
  2. import sre_parse
  3. from .lexer import Lexer, ContextualLexer, Token
  4. from .common import is_terminal, GrammarError, ParserConf
  5. from .parsers import lalr_parser, earley, nearley
  6. from .tree import Transformer
  7. class WithLexer:
  8. def __init__(self, lexer_conf):
  9. self.lexer_conf = lexer_conf
  10. self.lexer = Lexer(lexer_conf.tokens, ignore=lexer_conf.ignore)
  11. def lex(self, text):
  12. stream = self.lexer.lex(text)
  13. if self.lexer_conf.postlex:
  14. return self.lexer_conf.postlex.process(stream)
  15. else:
  16. return stream
  17. class LALR(WithLexer):
  18. def __init__(self, lexer_conf, parser_conf):
  19. WithLexer.__init__(self, lexer_conf)
  20. self.parser_conf = parser_conf
  21. self.parser = lalr_parser.Parser(parser_conf)
  22. def parse(self, text):
  23. tokens = list(self.lex(text))
  24. return self.parser.parse(tokens)
  25. class LALR_ContextualLexer:
  26. def __init__(self, lexer_conf, parser_conf):
  27. self.lexer_conf = lexer_conf
  28. self.parser_conf = parser_conf
  29. self.parser = lalr_parser.Parser(parser_conf)
  30. d = {idx:t.keys() for idx, t in self.parser.analysis.states_idx.items()}
  31. self.lexer = ContextualLexer(lexer_conf.tokens, d, ignore=lexer_conf.ignore,
  32. always_accept=lexer_conf.postlex.always_accept
  33. if lexer_conf.postlex else ())
  34. def parse(self, text):
  35. tokens = self.lexer.lex(text)
  36. if self.lexer_conf.postlex:
  37. tokens = self.lexer_conf.postlex.process(tokens)
  38. return self.parser.parse(tokens, self.lexer.set_parser_state)
  39. class Nearley(WithLexer):
  40. def __init__(self, lexer_conf, parser_conf):
  41. WithLexer.__init__(self, lexer_conf)
  42. rules = [{'name':n,
  43. 'symbols': self._prepare_expansion(x),
  44. 'postprocess': getattr(parser_conf.callback, a)}
  45. for n,x,a in parser_conf.rules]
  46. self.parser = nearley.Parser(rules, parser_conf.start)
  47. def _prepare_expansion(self, expansion):
  48. return [(sym, None) if is_terminal(sym) else sym for sym in expansion]
  49. def parse(self, text):
  50. tokens = list(self.lex(text))
  51. res = self.parser.parse(tokens)
  52. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  53. return res[0]
  54. class Earley(WithLexer):
  55. def __init__(self, lexer_conf, parser_conf):
  56. WithLexer.__init__(self, lexer_conf)
  57. rules = [(n, self._prepare_expansion(x), a)
  58. for n,x,a in parser_conf.rules]
  59. self.parser = earley.Parser(ParserConf(rules, parser_conf.callback, parser_conf.start))
  60. def _prepare_expansion(self, expansion):
  61. return [(sym,) if is_terminal(sym) else sym for sym in expansion]
  62. def parse(self, text):
  63. tokens = list(self.lex(text))
  64. res = self.parser.parse(tokens)
  65. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  66. return res[0]
  67. def tokenize_text(text):
  68. new_text = []
  69. line = 1
  70. col_start_pos = 0
  71. for i, ch in enumerate(text):
  72. if '\n' in ch:
  73. line += ch.count('\n')
  74. col_start_pos = i + ch.rindex('\n')
  75. new_text.append(Token('CHAR', ch, line=line, column=i - col_start_pos))
  76. return new_text
  77. class Nearley_NoLex:
  78. def __init__(self, lexer_conf, parser_conf):
  79. self.tokens_to_convert = {name: '__token_'+name for name, tree, _ in parser_conf.rules if is_terminal(name)}
  80. rules = []
  81. for name, exp, alias in parser_conf.rules:
  82. name = self.tokens_to_convert.get(name, name)
  83. exp = [self.tokens_to_convert.get(x, x) for x in exp]
  84. rules.append((name, exp, alias))
  85. self.token_by_name = {t.name:t for t in lexer_conf.tokens}
  86. rules = [{'name':n,
  87. 'symbols': list(self._prepare_expansion(x)),
  88. 'postprocess': getattr(parser_conf.callback, a)}
  89. for n,x,a in rules]
  90. self.parser = nearley.Parser(rules, parser_conf.start)
  91. def _prepare_expansion(self, expansion):
  92. for sym in expansion:
  93. if is_terminal(sym):
  94. regexp = self.token_by_name[sym].pattern.to_regexp()
  95. width = sre_parse.parse(regexp).getwidth()
  96. if width != (1,1):
  97. raise GrammarError('Dynamic lexing requires all tokens to have a width of 1 (%s is %s)' % (regexp, width))
  98. yield sym, re.compile(regexp)
  99. else:
  100. yield sym
  101. def parse(self, text):
  102. new_text = tokenize_text(text)
  103. res = self.parser.parse(new_text)
  104. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  105. res = res[0]
  106. class RestoreTokens(Transformer):
  107. pass
  108. for t in self.tokens_to_convert:
  109. setattr(RestoreTokens, t, ''.join)
  110. res = RestoreTokens().transform(res)
  111. return res
  112. class Earley_NoLex:
  113. def __init__(self, lexer_conf, parser_conf):
  114. self.token_by_name = {t.name:t for t in lexer_conf.tokens}
  115. rules = [(n, list(self._prepare_expansion(x)), a) for n,x,a in parser_conf.rules]
  116. self.parser = earley.Parser(ParserConf(rules, parser_conf.callback, parser_conf.start))
  117. def _prepare_expansion(self, expansion):
  118. for sym in expansion:
  119. if is_terminal(sym):
  120. regexp = self.token_by_name[sym].pattern.to_regexp()
  121. width = sre_parse.parse(regexp).getwidth()
  122. if width != (1,1):
  123. raise GrammarError('Scanless parsing (lexer=None) requires all tokens to have a width of 1 (terminal %s: %s is %s)' % (sym, regexp, width))
  124. yield (re.compile(regexp).match, regexp)
  125. else:
  126. yield sym
  127. def parse(self, text):
  128. new_text = tokenize_text(text)
  129. res = self.parser.parse(new_text)
  130. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  131. return res[0]
  132. def get_frontend(parser, lexer):
  133. if parser=='lalr':
  134. if lexer is None:
  135. raise ValueError('The LALR parser requires use of a lexer')
  136. elif lexer == 'standard':
  137. return LALR
  138. elif lexer == 'contextual':
  139. return LALR_ContextualLexer
  140. else:
  141. raise ValueError('Unknown lexer: %s' % lexer)
  142. elif parser=='earley':
  143. if lexer is None:
  144. return Earley_NoLex
  145. elif lexer=='standard':
  146. return Earley
  147. elif lexer=='contextual':
  148. raise ValueError('The Earley parser does not support the contextual parser')
  149. else:
  150. raise ValueError('Unknown lexer: %s' % lexer)
  151. else:
  152. raise ValueError('Unknown parser: %s' % parser)