This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

120 lines
3.9 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..utils import Enumerator, Serialize
  8. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  9. from .lalr_puppet import ParserPuppet
  10. ###{standalone
  11. class LALR_Parser(object):
  12. def __init__(self, parser_conf, debug=False):
  13. assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  14. analysis = LALR_Analyzer(parser_conf, debug=debug)
  15. analysis.compute_lalr()
  16. callbacks = parser_conf.callbacks
  17. self._parse_table = analysis.parse_table
  18. self.parser_conf = parser_conf
  19. self.parser = _Parser(analysis.parse_table, callbacks, debug)
  20. @classmethod
  21. def deserialize(cls, data, memo, callbacks):
  22. inst = cls.__new__(cls)
  23. inst._parse_table = IntParseTable.deserialize(data, memo)
  24. inst.parser = _Parser(inst._parse_table, callbacks)
  25. return inst
  26. def serialize(self, memo):
  27. return self._parse_table.serialize(memo)
  28. def parse(self, *args):
  29. return self.parser.parse(*args)
  30. class _Parser:
  31. def __init__(self, parse_table, callbacks, debug=False):
  32. self.parse_table = parse_table
  33. self.callbacks = callbacks
  34. self.debug = debug
  35. def parse(self, seq, start, set_state=None, value_stack=None, state_stack=None):
  36. token = None
  37. stream = iter(seq)
  38. states = self.parse_table.states
  39. start_state = self.parse_table.start_states[start]
  40. end_state = self.parse_table.end_states[start]
  41. state_stack = state_stack or [start_state]
  42. value_stack = value_stack or []
  43. if set_state: set_state(start_state)
  44. def get_action(token):
  45. state = state_stack[-1]
  46. try:
  47. return states[state][token.type]
  48. except KeyError:
  49. expected = {s for s in states[state].keys() if s.isupper()}
  50. try:
  51. puppet = ParserPuppet(self, state_stack, value_stack, start, stream, set_state)
  52. except NameError: # For standalone parser
  53. puppet = None
  54. raise UnexpectedToken(token, expected, state=state, puppet=puppet)
  55. def reduce(rule):
  56. size = len(rule.expansion)
  57. if size:
  58. s = value_stack[-size:]
  59. del state_stack[-size:]
  60. del value_stack[-size:]
  61. else:
  62. s = []
  63. value = self.callbacks[rule](s)
  64. _action, new_state = states[state_stack[-1]][rule.origin.name]
  65. assert _action is Shift
  66. state_stack.append(new_state)
  67. value_stack.append(value)
  68. # Main LALR-parser loop
  69. try:
  70. for token in stream:
  71. while True:
  72. action, arg = get_action(token)
  73. assert arg != end_state
  74. if action is Shift:
  75. state_stack.append(arg)
  76. value_stack.append(token)
  77. if set_state: set_state(arg)
  78. break # next token
  79. else:
  80. reduce(arg)
  81. except Exception as e:
  82. if self.debug:
  83. print("")
  84. print("STATE STACK DUMP")
  85. print("----------------")
  86. for i, s in enumerate(state_stack):
  87. print('%d)' % i , s)
  88. print("")
  89. raise
  90. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  91. while True:
  92. _action, arg = get_action(token)
  93. assert(_action is Reduce)
  94. reduce(arg)
  95. if state_stack[-1] == end_state:
  96. return value_stack[-1]
  97. ###}