This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

105 lines
3.2 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..grammar import Rule
  8. from ..utils import Enumerator, Serialize
  9. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  10. class Parser:
  11. def __init__(self, parser_conf, debug=False):
  12. assert all(r.options is None or r.options.priority is None
  13. for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  14. analysis = LALR_Analyzer(parser_conf, debug=debug)
  15. analysis.compute_lookahead()
  16. callbacks = parser_conf.callbacks
  17. self._parse_table = analysis.parse_table
  18. self.parser_conf = parser_conf
  19. self.parser = _Parser(analysis.parse_table, callbacks)
  20. @classmethod
  21. def deserialize(cls, data, callbacks):
  22. inst = cls.__new__(cls)
  23. inst.parser = _Parser(IntParseTable.deserialize(data), callbacks)
  24. return inst
  25. def serialize(self):
  26. return self._parse_table.serialize()
  27. def parse(self, *args):
  28. return self.parser.parse(*args)
  29. ###{standalone
  30. class _Parser:
  31. def __init__(self, parse_table, callbacks):
  32. self.states = parse_table.states
  33. self.start_state = parse_table.start_state
  34. self.end_state = parse_table.end_state
  35. self.callbacks = callbacks
  36. def parse(self, seq, set_state=None):
  37. token = None
  38. stream = iter(seq)
  39. states = self.states
  40. state_stack = [self.start_state]
  41. value_stack = []
  42. if set_state: set_state(self.start_state)
  43. def get_action(token):
  44. state = state_stack[-1]
  45. try:
  46. return states[state][token.type]
  47. except KeyError:
  48. expected = [s for s in states[state].keys() if s.isupper()]
  49. raise UnexpectedToken(token, expected, state=state)
  50. def reduce(rule):
  51. size = len(rule.expansion)
  52. if size:
  53. s = value_stack[-size:]
  54. del state_stack[-size:]
  55. del value_stack[-size:]
  56. else:
  57. s = []
  58. value = self.callbacks[rule](s)
  59. _action, new_state = states[state_stack[-1]][rule.origin.name]
  60. assert _action is Shift
  61. state_stack.append(new_state)
  62. value_stack.append(value)
  63. # Main LALR-parser loop
  64. for token in stream:
  65. while True:
  66. action, arg = get_action(token)
  67. assert arg != self.end_state
  68. if action is Shift:
  69. state_stack.append(arg)
  70. value_stack.append(token)
  71. if set_state: set_state(arg)
  72. break # next token
  73. else:
  74. reduce(arg)
  75. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  76. while True:
  77. _action, arg = get_action(token)
  78. if _action is Shift:
  79. assert arg == self.end_state
  80. val ,= value_stack
  81. return val
  82. else:
  83. reduce(arg)
  84. ###}