This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

408 lines
13 KiB

  1. ## Lexer Implementation
  2. import re
  3. from .utils import Str, classify, get_regexp_width, Py36
  4. from .exceptions import UnexpectedCharacters, LexError
  5. class Pattern(object):
  6. def __init__(self, value, flags=()):
  7. self.value = value
  8. self.flags = frozenset(flags)
  9. def __repr__(self):
  10. return repr(self.to_regexp())
  11. # Pattern Hashing assumes all subclasses have a different priority!
  12. def __hash__(self):
  13. return hash((type(self), self.value, self.flags))
  14. def __eq__(self, other):
  15. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  16. def to_regexp(self):
  17. raise NotImplementedError()
  18. if Py36:
  19. # Python 3.6 changed syntax for flags in regular expression
  20. def _get_flags(self, value):
  21. for f in self.flags:
  22. value = ('(?%s:%s)' % (f, value))
  23. return value
  24. else:
  25. def _get_flags(self, value):
  26. for f in self.flags:
  27. value = ('(?%s)' % f) + value
  28. return value
  29. @classmethod
  30. def deserialize(cls, data):
  31. class_ = {
  32. 's': PatternStr,
  33. 're': PatternRE,
  34. }[data[0]]
  35. value, flags = data[1:]
  36. return class_(value, frozenset(flags))
  37. class PatternStr(Pattern):
  38. def to_regexp(self):
  39. return self._get_flags(re.escape(self.value))
  40. @property
  41. def min_width(self):
  42. return len(self.value)
  43. max_width = min_width
  44. def serialize(self):
  45. return ['s', self.value, list(self.flags)]
  46. class PatternRE(Pattern):
  47. def to_regexp(self):
  48. return self._get_flags(self.value)
  49. @property
  50. def min_width(self):
  51. return get_regexp_width(self.to_regexp())[0]
  52. @property
  53. def max_width(self):
  54. return get_regexp_width(self.to_regexp())[1]
  55. def serialize(self):
  56. return ['re', self.value, list(self.flags)]
  57. class TerminalDef(object):
  58. def __init__(self, name, pattern, priority=1):
  59. assert isinstance(pattern, Pattern), pattern
  60. self.name = name
  61. self.pattern = pattern
  62. self.priority = priority
  63. def __repr__(self):
  64. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  65. def serialize(self):
  66. return [self.name, self.pattern.serialize(), self.priority]
  67. @classmethod
  68. def deserialize(cls, data):
  69. name, pattern, priority = data
  70. return cls(name, Pattern.deserialize(pattern), priority)
  71. ###{standalone
  72. class Token(Str):
  73. __slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column')
  74. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None):
  75. try:
  76. self = super(Token, cls).__new__(cls, value)
  77. except UnicodeDecodeError:
  78. value = value.decode('latin1')
  79. self = super(Token, cls).__new__(cls, value)
  80. self.type = type_
  81. self.pos_in_stream = pos_in_stream
  82. self.value = value
  83. self.line = line
  84. self.column = column
  85. self.end_line = end_line
  86. self.end_column = end_column
  87. return self
  88. @classmethod
  89. def new_borrow_pos(cls, type_, value, borrow_t):
  90. return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column)
  91. def __reduce__(self):
  92. return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column, ))
  93. def __repr__(self):
  94. return 'Token(%s, %r)' % (self.type, self.value)
  95. def __deepcopy__(self, memo):
  96. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  97. def __eq__(self, other):
  98. if isinstance(other, Token) and self.type != other.type:
  99. return False
  100. return Str.__eq__(self, other)
  101. __hash__ = Str.__hash__
  102. class LineCounter:
  103. def __init__(self):
  104. self.newline_char = '\n'
  105. self.char_pos = 0
  106. self.line = 1
  107. self.column = 1
  108. self.line_start_pos = 0
  109. def feed(self, token, test_newline=True):
  110. """Consume a token and calculate the new line & column.
  111. As an optional optimization, set test_newline=False is token doesn't contain a newline.
  112. """
  113. if test_newline:
  114. newlines = token.count(self.newline_char)
  115. if newlines:
  116. self.line += newlines
  117. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  118. self.char_pos += len(token)
  119. self.column = self.char_pos - self.line_start_pos + 1
  120. class _Lex:
  121. "Built to serve both Lexer and ContextualLexer"
  122. def __init__(self, lexer, state=None):
  123. self.lexer = lexer
  124. self.state = state
  125. def lex(self, stream, newline_types, ignore_types):
  126. newline_types = frozenset(newline_types)
  127. ignore_types = frozenset(ignore_types)
  128. line_ctr = LineCounter()
  129. while line_ctr.char_pos < len(stream):
  130. lexer = self.lexer
  131. for mre, type_from_index in lexer.mres:
  132. m = mre.match(stream, line_ctr.char_pos)
  133. if not m:
  134. continue
  135. t = None
  136. value = m.group(0)
  137. type_ = type_from_index[m.lastindex]
  138. if type_ not in ignore_types:
  139. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  140. if t.type in lexer.callback:
  141. t = lexer.callback[t.type](t)
  142. if not isinstance(t, Token):
  143. raise ValueError("Callbacks must return a token (returned %r)" % t)
  144. yield t
  145. else:
  146. if type_ in lexer.callback:
  147. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  148. lexer.callback[type_](t)
  149. line_ctr.feed(value, type_ in newline_types)
  150. if t:
  151. t.end_line = line_ctr.line
  152. t.end_column = line_ctr.column
  153. break
  154. else:
  155. allowed = [v for m, tfi in lexer.mres for v in tfi.values()]
  156. raise UnexpectedCharacters(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, state=self.state)
  157. class UnlessCallback:
  158. def __init__(self, mres):
  159. self.mres = mres
  160. def __call__(self, t):
  161. for mre, type_from_index in self.mres:
  162. m = mre.match(t.value)
  163. if m:
  164. t.type = type_from_index[m.lastindex]
  165. break
  166. return t
  167. class CallChain:
  168. def __init__(self, callback1, callback2, cond):
  169. self.callback1 = callback1
  170. self.callback2 = callback2
  171. self.cond = cond
  172. def __call__(self, t):
  173. t2 = self.callback1(t)
  174. return self.callback2(t) if self.cond(t2) else t2
  175. ###}
  176. def _create_unless(terminals):
  177. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  178. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  179. embedded_strs = set()
  180. callback = {}
  181. for retok in tokens_by_type.get(PatternRE, []):
  182. unless = [] # {}
  183. for strtok in tokens_by_type.get(PatternStr, []):
  184. if strtok.priority > retok.priority:
  185. continue
  186. s = strtok.pattern.value
  187. m = re.match(retok.pattern.to_regexp(), s)
  188. if m and m.group(0) == s:
  189. unless.append(strtok)
  190. if strtok.pattern.flags <= retok.pattern.flags:
  191. embedded_strs.add(strtok)
  192. if unless:
  193. callback[retok.name] = UnlessCallback(build_mres(unless, match_whole=True))
  194. terminals = [t for t in terminals if t not in embedded_strs]
  195. return terminals, callback
  196. def _build_mres(terminals, max_size, match_whole):
  197. # Python sets an unreasonable group limit (currently 100) in its re module
  198. # Worse, the only way to know we reached it is by catching an AssertionError!
  199. # This function recursively tries less and less groups until it's successful.
  200. postfix = '$' if match_whole else ''
  201. mres = []
  202. while terminals:
  203. try:
  204. mre = re.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()+postfix) for t in terminals[:max_size]))
  205. except AssertionError: # Yes, this is what Python provides us.. :/
  206. return _build_mres(terminals, max_size//2, match_whole)
  207. # terms_from_name = {t.name: t for t in terminals[:max_size]}
  208. mres.append((mre, {i:n for n,i in mre.groupindex.items()} ))
  209. terminals = terminals[max_size:]
  210. return mres
  211. def build_mres(terminals, match_whole=False):
  212. return _build_mres(terminals, len(terminals), match_whole)
  213. def _regexp_has_newline(r):
  214. """Expressions that may indicate newlines in a regexp:
  215. - newlines (\n)
  216. - escaped newline (\\n)
  217. - anything but ([^...])
  218. - any-char (.) when the flag (?s) exists
  219. """
  220. return '\n' in r or '\\n' in r or '[^' in r or ('(?s' in r and '.' in r)
  221. class Lexer:
  222. """Lexer interface
  223. Method Signatures:
  224. lex(self, stream) -> Iterator[Token]
  225. set_parser_state(self, state) # Optional
  226. """
  227. set_parser_state = NotImplemented
  228. lex = NotImplemented
  229. @classmethod
  230. def deserialize(cls, data):
  231. class_ = {
  232. 'traditional': TraditionalLexer,
  233. 'contextual': ContextualLexer,
  234. }[data['type']]
  235. return class_.deserialize(data)
  236. class TraditionalLexer(Lexer):
  237. def __init__(self, terminals, ignore=(), user_callbacks={}):
  238. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  239. terminals = list(terminals)
  240. # Sanitization
  241. for t in terminals:
  242. try:
  243. re.compile(t.pattern.to_regexp())
  244. except:
  245. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  246. if t.pattern.min_width == 0:
  247. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  248. assert set(ignore) <= {t.name for t in terminals}
  249. # Init
  250. self.newline_types = [t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())]
  251. self.ignore_types = list(ignore)
  252. terminals.sort(key=lambda x:(-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  253. terminals, self.callback = _create_unless(terminals)
  254. assert all(self.callback.values())
  255. for type_, f in user_callbacks.items():
  256. if type_ in self.callback:
  257. # Already a callback there, probably UnlessCallback
  258. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  259. else:
  260. self.callback[type_] = f
  261. self.terminals = terminals
  262. self.mres = build_mres(terminals)
  263. def lex(self, stream):
  264. return _Lex(self).lex(stream, self.newline_types, self.ignore_types)
  265. def serialize(self):
  266. return {
  267. 'type': 'traditional',
  268. 'terminals': [t.serialize() for t in self.terminals],
  269. 'ignore_types': self.ignore_types,
  270. 'newline_types': self.newline_types,
  271. }
  272. @classmethod
  273. def deserialize(cls, data):
  274. inst = cls.__new__(cls)
  275. inst.terminals = [TerminalDef.deserialize(t) for t in data['terminals']]
  276. inst.mres = build_mres(inst.terminals)
  277. inst.ignore_types = data['ignore_types']
  278. inst.newline_types = data['newline_types']
  279. inst.callback = {} # TODO implement
  280. return inst
  281. class ContextualLexer(Lexer):
  282. def __init__(self, terminals, states, ignore=(), always_accept=(), user_callbacks={}):
  283. tokens_by_name = {}
  284. for t in terminals:
  285. assert t.name not in tokens_by_name, t
  286. tokens_by_name[t.name] = t
  287. lexer_by_tokens = {}
  288. self.lexers = {}
  289. for state, accepts in states.items():
  290. key = frozenset(accepts)
  291. try:
  292. lexer = lexer_by_tokens[key]
  293. except KeyError:
  294. accepts = set(accepts) | set(ignore) | set(always_accept)
  295. state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name]
  296. lexer = TraditionalLexer(state_tokens, ignore=ignore, user_callbacks=user_callbacks)
  297. lexer_by_tokens[key] = lexer
  298. self.lexers[state] = lexer
  299. self.root_lexer = TraditionalLexer(terminals, ignore=ignore, user_callbacks=user_callbacks)
  300. self.set_parser_state(None) # Needs to be set on the outside
  301. def set_parser_state(self, state):
  302. self.parser_state = state
  303. def lex(self, stream):
  304. l = _Lex(self.lexers[self.parser_state], self.parser_state)
  305. for x in l.lex(stream, self.root_lexer.newline_types, self.root_lexer.ignore_types):
  306. yield x
  307. l.lexer = self.lexers[self.parser_state]
  308. l.state = self.parser_state
  309. def serialize(self):
  310. return {
  311. 'type': 'contextual',
  312. 'root_lexer': self.root_lexer.serialize(),
  313. 'lexers': {state: lexer.serialize() for state, lexer in self.lexers.items()}
  314. }
  315. @classmethod
  316. def deserialize(cls, data):
  317. inst = cls.__new__(cls)
  318. inst.lexers = {state:Lexer.deserialize(lexer) for state, lexer in data['lexers'].items()}
  319. inst.root_lexer = TraditionalLexer.deserialize(data['root_lexer'])
  320. return inst