This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

224 рядки
7.7 KiB

  1. ## Lexer Implementation
  2. import re
  3. import sre_parse
  4. from .utils import Str, classify
  5. from .common import is_terminal, PatternStr, PatternRE, TokenDef
  6. class LexError(Exception):
  7. pass
  8. class UnexpectedInput(LexError):
  9. def __init__(self, seq, lex_pos, line, column):
  10. context = seq[lex_pos:lex_pos+5]
  11. message = "No token defined for: '%s' in %r at line %d" % (seq[lex_pos], context, line)
  12. super(UnexpectedInput, self).__init__(message)
  13. self.line = line
  14. self.column = column
  15. self.context = context
  16. class Token(Str):
  17. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None):
  18. inst = Str.__new__(cls, value)
  19. inst.type = type_
  20. inst.pos_in_stream = pos_in_stream
  21. inst.value = value
  22. inst.line = line
  23. inst.column = column
  24. return inst
  25. @classmethod
  26. def new_borrow_pos(cls, type_, value, borrow_t):
  27. inst = cls(type_, value, borrow_t.pos_in_stream)
  28. inst.line = borrow_t.line
  29. inst.column = borrow_t.column
  30. return inst
  31. def __repr__(self):
  32. return 'Token(%s, %r)' % (self.type, self.value)
  33. class Regex:
  34. def __init__(self, pattern, flags=()):
  35. self.pattern = pattern
  36. self.flags = flags
  37. def _regexp_has_newline(r):
  38. return '\n' in r or '\\n' in r or ('(?s)' in r and '.' in r)
  39. def _create_unless_callback(strs):
  40. def unless_callback(t):
  41. if t in strs:
  42. t.type = strs[t]
  43. return t
  44. return unless_callback
  45. def _create_unless(tokens):
  46. tokens_by_type = classify(tokens, lambda t: type(t.pattern))
  47. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  48. embedded_strs = set()
  49. callback = {}
  50. for retok in tokens_by_type.get(PatternRE, []):
  51. unless = {}
  52. for strtok in tokens_by_type.get(PatternStr, []):
  53. s = strtok.pattern.value
  54. m = re.match(retok.pattern.value, s)
  55. if m and m.group(0) == s:
  56. embedded_strs.add(strtok.name)
  57. unless[s] = strtok.name
  58. if unless:
  59. callback[retok.name] = _create_unless_callback(unless)
  60. tokens = [t for t in tokens if t.name not in embedded_strs]
  61. return tokens, callback
  62. class Lexer(object):
  63. def __init__(self, tokens, ignore=()):
  64. assert all(isinstance(t, TokenDef) for t in tokens), tokens
  65. self.ignore = ignore
  66. self.newline_char = '\n'
  67. tokens = list(tokens)
  68. # Sanitization
  69. for t in tokens:
  70. try:
  71. re.compile(t.pattern.to_regexp())
  72. except:
  73. raise LexError("Cannot compile token: %s: %s" % (t.name, t.pattern))
  74. width = sre_parse.parse(t.pattern.to_regexp()).getwidth()
  75. if width[0] == 0:
  76. raise LexError("Lexer does not allow zero-width tokens. (%s: %s)" % (t.name, t.pattern))
  77. token_names = {t.name for t in tokens}
  78. for t in ignore:
  79. if t not in token_names:
  80. raise LexError("Token '%s' was marked to ignore but it is not defined!" % t)
  81. # Init
  82. self.newline_types = [t.name for t in tokens if _regexp_has_newline(t.pattern.to_regexp())]
  83. self.ignore_types = [t for t in ignore]
  84. tokens, self.callback = _create_unless(tokens)
  85. assert all(self.callback.values())
  86. tokens.sort(key=lambda x:(x.pattern.priority, len(x.pattern.value)), reverse=True)
  87. self.tokens = tokens
  88. self.mres = self._build_mres(tokens, len(tokens))
  89. def _build_mres(self, tokens, max_size):
  90. # Python sets an unreasonable group limit (currently 100) in its re module
  91. # Worse, the only way to know we reached it is by catching an AssertionError!
  92. # This function recursively tries less and less groups until it's successful.
  93. mres = []
  94. while tokens:
  95. try:
  96. mre = re.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()) for t in tokens[:max_size]))
  97. except AssertionError: # Yes, this is what Python provides us.. :/
  98. return self._build_mres(tokens, max_size//2)
  99. mres.append((mre, {i:n for n,i in mre.groupindex.items()} ))
  100. tokens = tokens[max_size:]
  101. return mres
  102. def lex(self, stream):
  103. lex_pos = 0
  104. line = 1
  105. col_start_pos = 0
  106. newline_types = list(self.newline_types)
  107. ignore_types = list(self.ignore_types)
  108. while True:
  109. for mre, type_from_index in self.mres:
  110. m = mre.match(stream, lex_pos)
  111. if m:
  112. value = m.group(0)
  113. type_ = type_from_index[m.lastindex]
  114. if type_ not in ignore_types:
  115. t = Token(type_, value, lex_pos, line, lex_pos - col_start_pos)
  116. if t.type in self.callback:
  117. t = self.callback[t.type](t)
  118. yield t
  119. if type_ in newline_types:
  120. newlines = value.count(self.newline_char)
  121. if newlines:
  122. line += newlines
  123. col_start_pos = lex_pos + value.rindex(self.newline_char)
  124. lex_pos += len(value)
  125. break
  126. else:
  127. if lex_pos < len(stream):
  128. raise UnexpectedInput(stream, lex_pos, line, lex_pos - col_start_pos)
  129. break
  130. class ContextualLexer:
  131. def __init__(self, tokens, states, ignore=(), always_accept=()):
  132. tokens_by_name = {}
  133. for t in tokens:
  134. assert t.name not in tokens_by_name, t
  135. tokens_by_name[t.name] = t
  136. lexer_by_tokens = {}
  137. self.lexers = {}
  138. for state, accepts in states.items():
  139. key = frozenset(accepts)
  140. try:
  141. lexer = lexer_by_tokens[key]
  142. except KeyError:
  143. accepts = set(accepts) # For python3
  144. accepts |= set(ignore)
  145. accepts |= set(always_accept)
  146. state_tokens = [tokens_by_name[n] for n in accepts if is_terminal(n) and n!='$end']
  147. lexer = Lexer(state_tokens, ignore=ignore)
  148. lexer_by_tokens[key] = lexer
  149. self.lexers[state] = lexer
  150. self.root_lexer = Lexer(tokens, ignore=ignore)
  151. self.set_parser_state(None) # Needs to be set on the outside
  152. def set_parser_state(self, state):
  153. self.parser_state = state
  154. def lex(self, stream):
  155. lex_pos = 0
  156. line = 1
  157. col_start_pos = 0
  158. newline_types = list(self.root_lexer.newline_types)
  159. ignore_types = list(self.root_lexer.ignore_types)
  160. while True:
  161. lexer = self.lexers[self.parser_state]
  162. for mre, type_from_index in lexer.mres:
  163. m = mre.match(stream, lex_pos)
  164. if m:
  165. value = m.group(0)
  166. type_ = type_from_index[m.lastindex]
  167. if type_ not in ignore_types:
  168. t = Token(type_, value, lex_pos, line, lex_pos - col_start_pos)
  169. if t.type in lexer.callback:
  170. t = lexer.callback[t.type](t)
  171. yield t
  172. if type_ in newline_types:
  173. newlines = value.count(lexer.newline_char)
  174. if newlines:
  175. line += newlines
  176. col_start_pos = lex_pos + value.rindex(lexer.newline_char)
  177. lex_pos += len(value)
  178. break
  179. else:
  180. if lex_pos < len(stream):
  181. print("Allowed tokens:", lexer.tokens)
  182. raise UnexpectedInput(stream, lex_pos, line, lex_pos - col_start_pos)
  183. break