This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

140 lines
4.5 KiB

  1. from ..common import ParseError, UnexpectedToken, is_terminal
  2. from .grammar_analysis import GrammarAnalyzer
  3. class EndToken(str):
  4. type = '$end'
  5. class Item:
  6. def __init__(self, rule, ptr, start, data):
  7. self.rule = rule
  8. self.ptr = ptr
  9. self.start = start
  10. self.data = data
  11. @property
  12. def expect(self):
  13. return self.rule.expansion[self.ptr]
  14. @property
  15. def is_complete(self):
  16. return self.ptr == len(self.rule.expansion)
  17. def advance(self, data):
  18. return Item(self.rule, self.ptr+1, self.start, self.data + [data])
  19. def __eq__(self, other):
  20. return self.start == other.start and self.ptr == other.ptr and self.rule == other.rule
  21. def __hash__(self):
  22. return hash((self.rule, self.ptr, self.start))
  23. def __repr__(self):
  24. before = map(str, self.rule.expansion[:self.ptr])
  25. after = map(str, self.rule.expansion[self.ptr:])
  26. return '<(%d) %s : %s * %s>' % (self.start, self.rule.origin, ' '.join(before), ' '.join(after))
  27. class NewsList(list):
  28. def __init__(self, initial=None):
  29. list.__init__(self, initial or [])
  30. self.last_iter = 0
  31. def get_news(self):
  32. i = self.last_iter
  33. self.last_iter = len(self)
  34. return self[i:]
  35. class Column:
  36. def __init__(self):
  37. self.to_reduce = NewsList()
  38. self.to_predict = NewsList()
  39. self.to_scan = NewsList()
  40. self.item_count = 0
  41. def add(self, items):
  42. self.item_count += len(items)
  43. for item in items:
  44. if item.is_complete:
  45. if item not in self.to_reduce: # Avoid infinite loop
  46. self.to_reduce.append(item)
  47. elif is_terminal(item.expect):
  48. self.to_scan.append(item)
  49. else:
  50. self.to_predict.append(item)
  51. def __nonzero__(self):
  52. return bool(self.item_count)
  53. class Parser:
  54. def __init__(self, parser_conf):
  55. self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start)
  56. self.start = parser_conf.start
  57. self.postprocess = {}
  58. self.predictions = {}
  59. for rule in self.analysis.rules:
  60. if rule.origin != '$root': # XXX kinda ugly
  61. a = rule.alias
  62. self.postprocess[rule] = a if callable(a) else getattr(parser_conf.callback, a)
  63. self.predictions[rule.origin] = [x.rule for x in self.analysis.expand_rule(rule.origin)]
  64. def parse(self, stream):
  65. # Define parser functions
  66. def predict(nonterm, i):
  67. assert not is_terminal(nonterm), nonterm
  68. return [Item(rule, 0, i, []) for rule in self.predictions[nonterm]]
  69. def complete(item, table):
  70. name = item.rule.origin
  71. item.data = self.postprocess[item.rule](item.data)
  72. return [i.advance(item.data) for i in table[item.start].to_predict
  73. if i.expect == name]
  74. def process_column(i, token):
  75. assert i == len(table)-1
  76. cur_set = table[i]
  77. next_set = Column()
  78. while True:
  79. to_predict = {x.expect for x in cur_set.to_predict.get_news()
  80. if x.ptr} # if not part of an already predicted batch
  81. to_reduce = cur_set.to_reduce.get_news()
  82. if not (to_predict or to_reduce):
  83. break
  84. for nonterm in to_predict:
  85. cur_set.add( predict(nonterm, i) )
  86. for item in to_reduce:
  87. cur_set.add( complete(item, table) )
  88. for item in cur_set.to_scan.get_news():
  89. match = item.expect[0](token) if callable(item.expect[0]) else item.expect[0] == token.type
  90. if match:
  91. next_set.add([item.advance(stream[i])])
  92. if not next_set and token.type != '$end':
  93. expect = [i.expect for i in cur_set.to_scan]
  94. raise UnexpectedToken(token, expect, stream, i)
  95. table.append(next_set)
  96. # Main loop starts
  97. table = [Column()]
  98. table[0].add(predict(self.start, 0))
  99. for i, char in enumerate(stream):
  100. process_column(i, char)
  101. process_column(len(stream), EndToken())
  102. # Parse ended. Now build a parse tree
  103. solutions = [n.data for n in table[len(stream)].to_reduce
  104. if n.rule.origin==self.start and n.start==0]
  105. if not solutions:
  106. raise ParseError('Incomplete parse: Could not find a solution to input')
  107. return solutions