This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

105 lines
3.2 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..utils import Enumerator, Serialize
  8. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  9. ###{standalone
  10. class LALR_Parser(object):
  11. def __init__(self, parser_conf, debug=False):
  12. assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  13. analysis = LALR_Analyzer(parser_conf, debug=debug)
  14. analysis.compute_lalr()
  15. callbacks = parser_conf.callbacks
  16. self._parse_table = analysis.parse_table
  17. self.parser_conf = parser_conf
  18. self.parser = _Parser(analysis.parse_table, callbacks)
  19. @classmethod
  20. def deserialize(cls, data, memo, callbacks):
  21. inst = cls.__new__(cls)
  22. inst._parse_table = IntParseTable.deserialize(data, memo)
  23. inst.parser = _Parser(inst._parse_table, callbacks)
  24. return inst
  25. def serialize(self, memo):
  26. return self._parse_table.serialize(memo)
  27. def parse(self, *args):
  28. return self.parser.parse(*args)
  29. class _Parser:
  30. def __init__(self, parse_table, callbacks):
  31. self.states = parse_table.states
  32. self.start_states = parse_table.start_states
  33. self.end_states = parse_table.end_states
  34. self.callbacks = callbacks
  35. def parse(self, seq, start, set_state=None):
  36. token = None
  37. stream = iter(seq)
  38. states = self.states
  39. start_state = self.start_states[start]
  40. end_state = self.end_states[start]
  41. state_stack = [start_state]
  42. value_stack = []
  43. if set_state: set_state(start_state)
  44. def get_action(token):
  45. state = state_stack[-1]
  46. try:
  47. return states[state][token.type]
  48. except KeyError:
  49. expected = [s for s in states[state].keys() if s.isupper()]
  50. raise UnexpectedToken(token, expected, state=state)
  51. def reduce(rule):
  52. size = len(rule.expansion)
  53. if size:
  54. s = value_stack[-size:]
  55. del state_stack[-size:]
  56. del value_stack[-size:]
  57. else:
  58. s = []
  59. value = self.callbacks[rule](s)
  60. _action, new_state = states[state_stack[-1]][rule.origin.name]
  61. assert _action is Shift
  62. state_stack.append(new_state)
  63. value_stack.append(value)
  64. # Main LALR-parser loop
  65. for token in stream:
  66. while True:
  67. action, arg = get_action(token)
  68. assert arg != end_state
  69. if action is Shift:
  70. state_stack.append(arg)
  71. value_stack.append(token)
  72. if set_state: set_state(arg)
  73. break # next token
  74. else:
  75. reduce(arg)
  76. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  77. while True:
  78. _action, arg = get_action(token)
  79. assert(_action is Reduce)
  80. reduce(arg)
  81. if state_stack[-1] == end_state:
  82. return value_stack[-1]
  83. ###}