This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
 
 

105 行
3.5 KiB

  1. import re
  2. import sre_parse
  3. from .lexer import Lexer, ContextualLexer
  4. from .common import is_terminal, GrammarError
  5. from .parsers import lalr_parser, earley
  6. class WithLexer:
  7. def __init__(self, lexer_conf):
  8. self.lexer_conf = lexer_conf
  9. self.lexer = Lexer(lexer_conf.tokens, ignore=lexer_conf.ignore)
  10. def lex(self, text):
  11. stream = self.lexer.lex(text)
  12. if self.lexer_conf.postlex:
  13. return self.lexer_conf.postlex.process(stream)
  14. else:
  15. return stream
  16. class LALR(WithLexer):
  17. def __init__(self, lexer_conf, parser_conf):
  18. WithLexer.__init__(self, lexer_conf)
  19. self.parser_conf = parser_conf
  20. self.parser = lalr_parser.Parser(parser_conf)
  21. def parse(self, text):
  22. tokens = list(self.lex(text))
  23. return self.parser.parse(tokens)
  24. class LALR_ContextualLexer:
  25. def __init__(self, lexer_conf, parser_conf):
  26. self.lexer_conf = lexer_conf
  27. self.parser_conf = parser_conf
  28. self.parser = lalr_parser.Parser(parser_conf)
  29. d = {idx:t.keys() for idx, t in self.parser.analysis.states_idx.items()}
  30. self.lexer = ContextualLexer(lexer_conf.tokens, d, ignore=lexer_conf.ignore,
  31. always_accept=lexer_conf.postlex.always_accept
  32. if lexer_conf.postlex else ())
  33. def parse(self, text):
  34. tokens = self.lexer.lex(text)
  35. if self.lexer_conf.postlex:
  36. tokens = self.lexer_conf.postlex.process(tokens)
  37. return self.parser.parse(tokens, self.lexer.set_parser_state)
  38. class Earley(WithLexer):
  39. def __init__(self, lexer_conf, parser_conf):
  40. WithLexer.__init__(self, lexer_conf)
  41. rules = [{'name':n,
  42. 'symbols': list(self._prepare_expansion(x)),
  43. 'postprocess': getattr(parser_conf.callback, a)}
  44. for n,x,a in parser_conf.rules]
  45. self.parser = earley.Parser(rules, parser_conf.start)
  46. def _prepare_expansion(self, expansion):
  47. for sym in expansion:
  48. if is_terminal(sym):
  49. yield sym, None
  50. else:
  51. yield sym
  52. def parse(self, text):
  53. tokens = list(self.lex(text))
  54. res = self.parser.parse(tokens)
  55. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  56. return res[0]
  57. class Earley_NoLex:
  58. def __init__(self, lexer_conf, parser_conf):
  59. self.token_by_name = {t.name:t for t in lexer_conf.tokens}
  60. rules = [{'name':n,
  61. 'symbols': list(self._prepare_expansion(x)),
  62. 'postprocess': getattr(parser_conf.callback, a)}
  63. for n,x,a in parser_conf.rules]
  64. self.parser = earley.Parser(rules, parser_conf.start)
  65. def _prepare_expansion(self, expansion):
  66. for sym in expansion:
  67. if is_terminal(sym):
  68. regexp = self.token_by_name[sym].to_regexp()
  69. width = sre_parse.parse(regexp).getwidth()
  70. if not width == (1,1):
  71. raise GrammarError('Dynamic lexing requires all tokens to have a width of 1 (%s is %s)' % (regexp, width))
  72. yield sym, re.compile(regexp)
  73. else:
  74. yield sym
  75. def parse(self, text):
  76. res = self.parser.parse(text)
  77. assert len(res) ==1 , 'Ambiguious Parse! Not handled yet'
  78. return res[0]
  79. ENGINE_DICT = { 'lalr': LALR, 'earley': Earley, 'earley_nolex': Earley_NoLex, 'lalr_contextual_lexer': LALR_ContextualLexer }