This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

117 lines
3.6 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..utils import Enumerator, Serialize
  8. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  9. ###{standalone
  10. class LALR_Parser(object):
  11. def __init__(self, parser_conf, debug=False):
  12. assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  13. analysis = LALR_Analyzer(parser_conf, debug=debug)
  14. analysis.compute_lalr()
  15. callbacks = parser_conf.callbacks
  16. self._parse_table = analysis.parse_table
  17. self.parser_conf = parser_conf
  18. self.parser = _Parser(analysis.parse_table, callbacks, debug)
  19. @classmethod
  20. def deserialize(cls, data, memo, callbacks):
  21. inst = cls.__new__(cls)
  22. inst._parse_table = IntParseTable.deserialize(data, memo)
  23. inst.parser = _Parser(inst._parse_table, callbacks)
  24. return inst
  25. def serialize(self, memo):
  26. return self._parse_table.serialize(memo)
  27. def parse(self, *args):
  28. return self.parser.parse(*args)
  29. class _Parser:
  30. def __init__(self, parse_table, callbacks, debug=False):
  31. self.states = parse_table.states
  32. self.start_states = parse_table.start_states
  33. self.end_states = parse_table.end_states
  34. self.callbacks = callbacks
  35. self.debug = debug
  36. def parse(self, seq, start, set_state=None):
  37. token = None
  38. stream = iter(seq)
  39. states = self.states
  40. start_state = self.start_states[start]
  41. end_state = self.end_states[start]
  42. state_stack = [start_state]
  43. value_stack = []
  44. if set_state: set_state(start_state)
  45. def get_action(token):
  46. state = state_stack[-1]
  47. try:
  48. return states[state][token.type]
  49. except KeyError:
  50. expected = [s for s in states[state].keys() if s.isupper()]
  51. raise UnexpectedToken(token, expected, state=state)
  52. def reduce(rule):
  53. size = len(rule.expansion)
  54. if size:
  55. s = value_stack[-size:]
  56. del state_stack[-size:]
  57. del value_stack[-size:]
  58. else:
  59. s = []
  60. value = self.callbacks[rule](s)
  61. _action, new_state = states[state_stack[-1]][rule.origin.name]
  62. assert _action is Shift
  63. state_stack.append(new_state)
  64. value_stack.append(value)
  65. # Main LALR-parser loop
  66. try:
  67. for token in stream:
  68. while True:
  69. action, arg = get_action(token)
  70. assert arg != end_state
  71. if action is Shift:
  72. state_stack.append(arg)
  73. value_stack.append(token)
  74. if set_state: set_state(arg)
  75. break # next token
  76. else:
  77. reduce(arg)
  78. except Exception as e:
  79. if self.debug:
  80. print("")
  81. print("STATE STACK DUMP")
  82. print("----------------")
  83. for i, s in enumerate(state_stack):
  84. print('%d)' % i , s)
  85. print("")
  86. raise
  87. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  88. while True:
  89. _action, arg = get_action(token)
  90. assert(_action is Reduce)
  91. reduce(arg)
  92. if state_stack[-1] == end_state:
  93. return value_stack[-1]
  94. ###}