This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

118 lines
3.6 KiB

  1. """This module implements a LALR(1) Parser
  2. """
  3. # Author: Erez Shinan (2017)
  4. # Email : erezshin@gmail.com
  5. from ..exceptions import UnexpectedToken
  6. from ..lexer import Token
  7. from ..utils import Enumerator, Serialize
  8. from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
  9. import time
  10. ###{standalone
  11. class LALR_Parser(object):
  12. def __init__(self, parser_conf, debug=False):
  13. assert all(r.options is None or r.options.priority is None
  14. for r in parser_conf.rules), "LALR doesn't yet support prioritization"
  15. analysis = LALR_Analyzer(parser_conf, debug=debug)
  16. analysis.compute_lr0_states()
  17. analysis.compute_reads_relations()
  18. analysis.compute_read_sets()
  19. analysis.compute_includes_lookback()
  20. analysis.compute_follow_sets()
  21. analysis.compute_lookaheads()
  22. analysis.compute_lalr1_states()
  23. callbacks = parser_conf.callbacks
  24. self._parse_table = analysis.parse_table
  25. self.parser_conf = parser_conf
  26. self.parser = _Parser(analysis.parse_table, callbacks)
  27. @classmethod
  28. def deserialize(cls, data, memo, callbacks):
  29. inst = cls.__new__(cls)
  30. inst._parse_table = IntParseTable.deserialize(data, memo)
  31. inst.parser = _Parser(inst._parse_table, callbacks)
  32. return inst
  33. def serialize(self, memo):
  34. return self._parse_table.serialize(memo)
  35. def parse(self, *args):
  36. return self.parser.parse(*args)
  37. class _Parser:
  38. def __init__(self, parse_table, callbacks):
  39. self.states = parse_table.states
  40. self.start_states = parse_table.start_states
  41. self.end_states = parse_table.end_states
  42. self.callbacks = callbacks
  43. def parse(self, seq, start, set_state=None):
  44. token = None
  45. stream = iter(seq)
  46. states = self.states
  47. start_state = self.start_states[start]
  48. end_state = self.end_states[start]
  49. state_stack = [start_state]
  50. value_stack = []
  51. if set_state: set_state(start_state)
  52. def get_action(token):
  53. state = state_stack[-1]
  54. try:
  55. return states[state][token.type]
  56. except KeyError:
  57. expected = [s for s in states[state].keys() if s.isupper()]
  58. raise UnexpectedToken(token, expected, state=state)
  59. def reduce(rule):
  60. size = len(rule.expansion)
  61. if size:
  62. s = value_stack[-size:]
  63. del state_stack[-size:]
  64. del value_stack[-size:]
  65. else:
  66. s = []
  67. value = self.callbacks[rule](s)
  68. _action, new_state = states[state_stack[-1]][rule.origin.name]
  69. assert _action is Shift
  70. state_stack.append(new_state)
  71. value_stack.append(value)
  72. if state_stack[-1] == end_state:
  73. return True
  74. return False
  75. # Main LALR-parser loop
  76. for token in stream:
  77. while True:
  78. action, arg = get_action(token)
  79. assert arg != end_state
  80. if action is Shift:
  81. state_stack.append(arg)
  82. value_stack.append(token)
  83. if set_state: set_state(arg)
  84. break # next token
  85. else:
  86. reduce(arg)
  87. token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
  88. while True:
  89. _action, arg = get_action(token)
  90. assert(_action is Reduce)
  91. if reduce(arg):
  92. return value_stack[-1]
  93. ###}