This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

362 lines
12 KiB

  1. ## Lexer Implementation
  2. import re
  3. from .utils import Str, classify, get_regexp_width, Py36, Serialize
  4. from .exceptions import UnexpectedCharacters, LexError
  5. ###{standalone
  6. class Pattern(Serialize):
  7. __serialize_fields__ = 'value', 'flags'
  8. def __init__(self, value, flags=()):
  9. self.value = value
  10. self.flags = frozenset(flags)
  11. def __repr__(self):
  12. return repr(self.to_regexp())
  13. # Pattern Hashing assumes all subclasses have a different priority!
  14. def __hash__(self):
  15. return hash((type(self), self.value, self.flags))
  16. def __eq__(self, other):
  17. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  18. def to_regexp(self):
  19. raise NotImplementedError()
  20. if Py36:
  21. # Python 3.6 changed syntax for flags in regular expression
  22. def _get_flags(self, value):
  23. for f in self.flags:
  24. value = ('(?%s:%s)' % (f, value))
  25. return value
  26. else:
  27. def _get_flags(self, value):
  28. for f in self.flags:
  29. value = ('(?%s)' % f) + value
  30. return value
  31. class PatternStr(Pattern):
  32. def to_regexp(self):
  33. return self._get_flags(re.escape(self.value))
  34. @property
  35. def min_width(self):
  36. return len(self.value)
  37. max_width = min_width
  38. class PatternRE(Pattern):
  39. def to_regexp(self):
  40. return self._get_flags(self.value)
  41. @property
  42. def min_width(self):
  43. return get_regexp_width(self.to_regexp())[0]
  44. @property
  45. def max_width(self):
  46. return get_regexp_width(self.to_regexp())[1]
  47. class TerminalDef(Serialize):
  48. __serialize_fields__ = 'name', 'pattern', 'priority'
  49. __serialize_namespace__ = PatternStr, PatternRE
  50. def __init__(self, name, pattern, priority=1):
  51. assert isinstance(pattern, Pattern), pattern
  52. self.name = name
  53. self.pattern = pattern
  54. self.priority = priority
  55. def __repr__(self):
  56. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  57. class Token(Str):
  58. __slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column')
  59. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None):
  60. try:
  61. self = super(Token, cls).__new__(cls, value)
  62. except UnicodeDecodeError:
  63. value = value.decode('latin1')
  64. self = super(Token, cls).__new__(cls, value)
  65. self.type = type_
  66. self.pos_in_stream = pos_in_stream
  67. self.value = value
  68. self.line = line
  69. self.column = column
  70. self.end_line = end_line
  71. self.end_column = end_column
  72. return self
  73. @classmethod
  74. def new_borrow_pos(cls, type_, value, borrow_t):
  75. return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column)
  76. def __reduce__(self):
  77. return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column, ))
  78. def __repr__(self):
  79. return 'Token(%s, %r)' % (self.type, self.value)
  80. def __deepcopy__(self, memo):
  81. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  82. def __eq__(self, other):
  83. if isinstance(other, Token) and self.type != other.type:
  84. return False
  85. return Str.__eq__(self, other)
  86. __hash__ = Str.__hash__
  87. class LineCounter:
  88. def __init__(self):
  89. self.newline_char = '\n'
  90. self.char_pos = 0
  91. self.line = 1
  92. self.column = 1
  93. self.line_start_pos = 0
  94. def feed(self, token, test_newline=True):
  95. """Consume a token and calculate the new line & column.
  96. As an optional optimization, set test_newline=False is token doesn't contain a newline.
  97. """
  98. if test_newline:
  99. newlines = token.count(self.newline_char)
  100. if newlines:
  101. self.line += newlines
  102. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  103. self.char_pos += len(token)
  104. self.column = self.char_pos - self.line_start_pos + 1
  105. class _Lex:
  106. "Built to serve both Lexer and ContextualLexer"
  107. def __init__(self, lexer, state=None):
  108. self.lexer = lexer
  109. self.state = state
  110. def lex(self, stream, newline_types, ignore_types):
  111. newline_types = frozenset(newline_types)
  112. ignore_types = frozenset(ignore_types)
  113. line_ctr = LineCounter()
  114. last_token = None
  115. while line_ctr.char_pos < len(stream):
  116. lexer = self.lexer
  117. for mre, type_from_index in lexer.mres:
  118. m = mre.match(stream, line_ctr.char_pos)
  119. if not m:
  120. continue
  121. t = None
  122. value = m.group(0)
  123. type_ = type_from_index[m.lastindex]
  124. if type_ not in ignore_types:
  125. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  126. if t.type in lexer.callback:
  127. t = lexer.callback[t.type](t)
  128. if not isinstance(t, Token):
  129. raise ValueError("Callbacks must return a token (returned %r)" % t)
  130. last_token = t
  131. yield t
  132. else:
  133. if type_ in lexer.callback:
  134. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  135. lexer.callback[type_](t)
  136. line_ctr.feed(value, type_ in newline_types)
  137. if t:
  138. t.end_line = line_ctr.line
  139. t.end_column = line_ctr.column
  140. break
  141. else:
  142. allowed = {v for m, tfi in lexer.mres for v in tfi.values()}
  143. raise UnexpectedCharacters(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, state=self.state, token_history=last_token and [last_token])
  144. class UnlessCallback:
  145. def __init__(self, mres):
  146. self.mres = mres
  147. def __call__(self, t):
  148. for mre, type_from_index in self.mres:
  149. m = mre.match(t.value)
  150. if m:
  151. t.type = type_from_index[m.lastindex]
  152. break
  153. return t
  154. class CallChain:
  155. def __init__(self, callback1, callback2, cond):
  156. self.callback1 = callback1
  157. self.callback2 = callback2
  158. self.cond = cond
  159. def __call__(self, t):
  160. t2 = self.callback1(t)
  161. return self.callback2(t) if self.cond(t2) else t2
  162. def _create_unless(terminals):
  163. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  164. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  165. embedded_strs = set()
  166. callback = {}
  167. for retok in tokens_by_type.get(PatternRE, []):
  168. unless = [] # {}
  169. for strtok in tokens_by_type.get(PatternStr, []):
  170. if strtok.priority > retok.priority:
  171. continue
  172. s = strtok.pattern.value
  173. m = re.match(retok.pattern.to_regexp(), s)
  174. if m and m.group(0) == s:
  175. unless.append(strtok)
  176. if strtok.pattern.flags <= retok.pattern.flags:
  177. embedded_strs.add(strtok)
  178. if unless:
  179. callback[retok.name] = UnlessCallback(build_mres(unless, match_whole=True))
  180. terminals = [t for t in terminals if t not in embedded_strs]
  181. return terminals, callback
  182. def _build_mres(terminals, max_size, match_whole):
  183. # Python sets an unreasonable group limit (currently 100) in its re module
  184. # Worse, the only way to know we reached it is by catching an AssertionError!
  185. # This function recursively tries less and less groups until it's successful.
  186. postfix = '$' if match_whole else ''
  187. mres = []
  188. while terminals:
  189. try:
  190. mre = re.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()+postfix) for t in terminals[:max_size]))
  191. except AssertionError: # Yes, this is what Python provides us.. :/
  192. return _build_mres(terminals, max_size//2, match_whole)
  193. # terms_from_name = {t.name: t for t in terminals[:max_size]}
  194. mres.append((mre, {i:n for n,i in mre.groupindex.items()} ))
  195. terminals = terminals[max_size:]
  196. return mres
  197. def build_mres(terminals, match_whole=False):
  198. return _build_mres(terminals, len(terminals), match_whole)
  199. def _regexp_has_newline(r):
  200. """Expressions that may indicate newlines in a regexp:
  201. - newlines (\n)
  202. - escaped newline (\\n)
  203. - anything but ([^...])
  204. - any-char (.) when the flag (?s) exists
  205. """
  206. return '\n' in r or '\\n' in r or '[^' in r or ('(?s' in r and '.' in r)
  207. class Lexer(object):
  208. """Lexer interface
  209. Method Signatures:
  210. lex(self, stream) -> Iterator[Token]
  211. set_parser_state(self, state) # Optional
  212. """
  213. set_parser_state = NotImplemented
  214. lex = NotImplemented
  215. class TraditionalLexer(Lexer):
  216. def __init__(self, terminals, ignore=(), user_callbacks={}):
  217. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  218. terminals = list(terminals)
  219. # Sanitization
  220. for t in terminals:
  221. try:
  222. re.compile(t.pattern.to_regexp())
  223. except:
  224. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  225. if t.pattern.min_width == 0:
  226. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  227. assert set(ignore) <= {t.name for t in terminals}, (ignore, terminals)
  228. # Init
  229. self.newline_types = [t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())]
  230. self.ignore_types = list(ignore)
  231. terminals.sort(key=lambda x:(-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  232. self.terminals = terminals
  233. self.user_callbacks = user_callbacks
  234. self.build()
  235. def build(self):
  236. terminals, self.callback = _create_unless(self.terminals)
  237. assert all(self.callback.values())
  238. for type_, f in self.user_callbacks.items():
  239. if type_ in self.callback:
  240. # Already a callback there, probably UnlessCallback
  241. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  242. else:
  243. self.callback[type_] = f
  244. self.mres = build_mres(terminals)
  245. def lex(self, stream):
  246. return _Lex(self).lex(stream, self.newline_types, self.ignore_types)
  247. class ContextualLexer(Lexer):
  248. def __init__(self, terminals, states, ignore=(), always_accept=(), user_callbacks={}):
  249. tokens_by_name = {}
  250. for t in terminals:
  251. assert t.name not in tokens_by_name, t
  252. tokens_by_name[t.name] = t
  253. lexer_by_tokens = {}
  254. self.lexers = {}
  255. for state, accepts in states.items():
  256. key = frozenset(accepts)
  257. try:
  258. lexer = lexer_by_tokens[key]
  259. except KeyError:
  260. accepts = set(accepts) | set(ignore) | set(always_accept)
  261. state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name]
  262. lexer = TraditionalLexer(state_tokens, ignore=ignore, user_callbacks=user_callbacks)
  263. lexer_by_tokens[key] = lexer
  264. self.lexers[state] = lexer
  265. self.root_lexer = TraditionalLexer(terminals, ignore=ignore, user_callbacks=user_callbacks)
  266. self.set_parser_state(None) # Needs to be set on the outside
  267. def set_parser_state(self, state):
  268. self.parser_state = state
  269. def lex(self, stream):
  270. l = _Lex(self.lexers[self.parser_state], self.parser_state)
  271. for x in l.lex(stream, self.root_lexer.newline_types, self.root_lexer.ignore_types):
  272. yield x
  273. l.lexer = self.lexers[self.parser_state]
  274. l.state = self.parser_state
  275. ###}