This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

129 lines
4.1 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..grammar import Rule
  8. from ..utils import Enumerator
  9. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  10. class Parser(object):
  11. def __init__(self, parser_conf, debug=False):
  12. assert all(r.options is None or r.options.priority is None
  13. for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  14. analysis = LALR_Analyzer(parser_conf, debug=debug)
  15. analysis.compute_lookahead()
  16. callbacks = parser_conf.callbacks
  17. self._parse_table = analysis.parse_table
  18. self.parser_conf = parser_conf
  19. self.parser = _Parser(analysis.parse_table, callbacks)
  20. self.parse = self.parser.parse
  21. def serialize(self):
  22. tokens = Enumerator()
  23. rules = Enumerator()
  24. states = {
  25. state: {tokens.get(token): ((1, rules.get(arg)) if action is Reduce else (0, arg))
  26. for token, (action, arg) in actions.items()}
  27. for state, actions in self._parse_table.states.items()
  28. }
  29. return {
  30. 'tokens': tokens.reversed(),
  31. 'rules': {idx: r.serialize() for idx, r in rules.reversed().items()},
  32. 'states': states,
  33. 'start_state': self._parse_table.start_state,
  34. 'end_state': self._parse_table.end_state,
  35. }
  36. @classmethod
  37. def deserialize(cls, data, callbacks):
  38. tokens = data['tokens']
  39. rules = {idx: Rule.deserialize(r) for idx, r in data['rules'].items()}
  40. states = {
  41. state: {tokens[token]: ((Reduce, rules[arg]) if action==1 else (Shift, arg))
  42. for token, (action, arg) in actions.items()}
  43. for state, actions in data['states'].items()
  44. }
  45. parse_table = IntParseTable(states, data['start_state'], data['end_state'])
  46. inst = cls.__new__(cls)
  47. inst.parser = _Parser(parse_table, callbacks)
  48. inst.parse = inst.parser.parse
  49. return inst
  50. ###{standalone
  51. class _Parser:
  52. def __init__(self, parse_table, callbacks):
  53. self.states = parse_table.states
  54. self.start_state = parse_table.start_state
  55. self.end_state = parse_table.end_state
  56. self.callbacks = callbacks
  57. def parse(self, seq, set_state=None):
  58. token = None
  59. stream = iter(seq)
  60. states = self.states
  61. state_stack = [self.start_state]
  62. value_stack = []
  63. if set_state: set_state(self.start_state)
  64. def get_action(token):
  65. state = state_stack[-1]
  66. try:
  67. return states[state][token.type]
  68. except KeyError:
  69. expected = [s for s in states[state].keys() if s.isupper()]
  70. raise UnexpectedToken(token, expected, state=state)
  71. def reduce(rule):
  72. size = len(rule.expansion)
  73. if size:
  74. s = value_stack[-size:]
  75. del state_stack[-size:]
  76. del value_stack[-size:]
  77. else:
  78. s = []
  79. value = self.callbacks[rule](s)
  80. _action, new_state = states[state_stack[-1]][rule.origin.name]
  81. assert _action is Shift
  82. state_stack.append(new_state)
  83. value_stack.append(value)
  84. # Main LALR-parser loop
  85. for token in stream:
  86. while True:
  87. action, arg = get_action(token)
  88. assert arg != self.end_state
  89. if action is Shift:
  90. state_stack.append(arg)
  91. value_stack.append(token)
  92. if set_state: set_state(arg)
  93. break # next token
  94. else:
  95. reduce(arg)
  96. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  97. while True:
  98. _action, arg = get_action(token)
  99. if _action is Shift:
  100. assert arg == self.end_state
  101. val ,= value_stack
  102. return val
  103. else:
  104. reduce(arg)
  105. ###}