This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

435 lines
15 KiB

  1. ## Lexer Implementation
  2. import re
  3. from .utils import Str, classify, get_regexp_width, Py36, Serialize
  4. from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
  5. ###{standalone
  6. from copy import copy
  7. class Pattern(Serialize):
  8. def __init__(self, value, flags=()):
  9. self.value = value
  10. self.flags = frozenset(flags)
  11. def __repr__(self):
  12. return repr(self.to_regexp())
  13. # Pattern Hashing assumes all subclasses have a different priority!
  14. def __hash__(self):
  15. return hash((type(self), self.value, self.flags))
  16. def __eq__(self, other):
  17. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  18. def to_regexp(self):
  19. raise NotImplementedError()
  20. if Py36:
  21. # Python 3.6 changed syntax for flags in regular expression
  22. def _get_flags(self, value):
  23. for f in self.flags:
  24. value = ('(?%s:%s)' % (f, value))
  25. return value
  26. else:
  27. def _get_flags(self, value):
  28. for f in self.flags:
  29. value = ('(?%s)' % f) + value
  30. return value
  31. class PatternStr(Pattern):
  32. __serialize_fields__ = 'value', 'flags'
  33. type = "str"
  34. def to_regexp(self):
  35. return self._get_flags(re.escape(self.value))
  36. @property
  37. def min_width(self):
  38. return len(self.value)
  39. max_width = min_width
  40. class PatternRE(Pattern):
  41. __serialize_fields__ = 'value', 'flags', '_width'
  42. type = "re"
  43. def to_regexp(self):
  44. return self._get_flags(self.value)
  45. _width = None
  46. def _get_width(self):
  47. if self._width is None:
  48. self._width = get_regexp_width(self.to_regexp())
  49. return self._width
  50. @property
  51. def min_width(self):
  52. return self._get_width()[0]
  53. @property
  54. def max_width(self):
  55. return self._get_width()[1]
  56. class TerminalDef(Serialize):
  57. __serialize_fields__ = 'name', 'pattern', 'priority'
  58. __serialize_namespace__ = PatternStr, PatternRE
  59. def __init__(self, name, pattern, priority=1):
  60. assert isinstance(pattern, Pattern), pattern
  61. self.name = name
  62. self.pattern = pattern
  63. self.priority = priority
  64. def __repr__(self):
  65. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  66. class Token(Str):
  67. """Token of a lexer.
  68. When using a lexer, the resulting tokens in the trees will be of the
  69. Token class, which inherits from Python's string. So, normal string
  70. comparisons and operations will work as expected. Tokens also have other
  71. useful attributes.
  72. Attributes:
  73. type_: Name of the token (as specified in grammar)
  74. pos_in_stream: The index of the token in the text
  75. line: The line of the token in the text (starting with 1)
  76. column: The column of the token in the text (starting with 1)
  77. end_line: The line where the token ends
  78. end_column: The next column after the end of the token. For example,
  79. if the token is a single character with a column value of 4,
  80. end_column will be 5.
  81. end_pos: the index where the token ends (basically pos_in_stream +
  82. len(token))
  83. """
  84. __slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
  85. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
  86. try:
  87. self = super(Token, cls).__new__(cls, value)
  88. except UnicodeDecodeError:
  89. value = value.decode('latin1')
  90. self = super(Token, cls).__new__(cls, value)
  91. self.type = type_
  92. self.pos_in_stream = pos_in_stream
  93. self.value = value
  94. self.line = line
  95. self.column = column
  96. self.end_line = end_line
  97. self.end_column = end_column
  98. self.end_pos = end_pos
  99. return self
  100. def update(self, type_=None, value=None):
  101. return Token.new_borrow_pos(
  102. type_ if type_ is not None else self.type,
  103. value if value is not None else self.value,
  104. self
  105. )
  106. @classmethod
  107. def new_borrow_pos(cls, type_, value, borrow_t):
  108. return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
  109. def __reduce__(self):
  110. return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column, ))
  111. def __repr__(self):
  112. return 'Token(%s, %r)' % (self.type, self.value)
  113. def __deepcopy__(self, memo):
  114. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  115. def __eq__(self, other):
  116. if isinstance(other, Token) and self.type != other.type:
  117. return False
  118. return Str.__eq__(self, other)
  119. __hash__ = Str.__hash__
  120. class LineCounter:
  121. def __init__(self, newline_char):
  122. self.newline_char = newline_char
  123. self.char_pos = 0
  124. self.line = 1
  125. self.column = 1
  126. self.line_start_pos = 0
  127. def feed(self, token, test_newline=True):
  128. """Consume a token and calculate the new line & column.
  129. As an optional optimization, set test_newline=False is token doesn't contain a newline.
  130. """
  131. if test_newline:
  132. newlines = token.count(self.newline_char)
  133. if newlines:
  134. self.line += newlines
  135. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  136. self.char_pos += len(token)
  137. self.column = self.char_pos - self.line_start_pos + 1
  138. class _Lex:
  139. "Built to serve both Lexer and ContextualLexer"
  140. def __init__(self, lexer, state=None):
  141. self.lexer = lexer
  142. self.state = state
  143. def lex(self, stream, newline_types, ignore_types):
  144. newline_types = frozenset(newline_types)
  145. ignore_types = frozenset(ignore_types)
  146. line_ctr = LineCounter('\n' if not self.lexer.use_bytes else b'\n')
  147. last_token = None
  148. while line_ctr.char_pos < len(stream):
  149. lexer = self.lexer
  150. res = lexer.match(stream, line_ctr.char_pos)
  151. if not res:
  152. allowed = {v for m, tfi in lexer.mres for v in tfi.values()} - ignore_types
  153. if not allowed:
  154. allowed = {"<END-OF-FILE>"}
  155. raise UnexpectedCharacters(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, state=self.state, token_history=last_token and [last_token])
  156. value, type_ = res
  157. if type_ not in ignore_types:
  158. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  159. line_ctr.feed(value, type_ in newline_types)
  160. t.end_line = line_ctr.line
  161. t.end_column = line_ctr.column
  162. t.end_pos = line_ctr.char_pos
  163. if t.type in lexer.callback:
  164. t = lexer.callback[t.type](t)
  165. if not isinstance(t, Token):
  166. raise ValueError("Callbacks must return a token (returned %r)" % t)
  167. yield t
  168. last_token = t
  169. else:
  170. if type_ in lexer.callback:
  171. t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  172. lexer.callback[type_](t2)
  173. line_ctr.feed(value, type_ in newline_types)
  174. class UnlessCallback:
  175. def __init__(self, mres):
  176. self.mres = mres
  177. def __call__(self, t):
  178. for mre, type_from_index in self.mres:
  179. m = mre.match(t.value)
  180. if m:
  181. t.type = type_from_index[m.lastindex]
  182. break
  183. return t
  184. class CallChain:
  185. def __init__(self, callback1, callback2, cond):
  186. self.callback1 = callback1
  187. self.callback2 = callback2
  188. self.cond = cond
  189. def __call__(self, t):
  190. t2 = self.callback1(t)
  191. return self.callback2(t) if self.cond(t2) else t2
  192. def _create_unless(terminals, g_regex_flags, re_, use_bytes):
  193. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  194. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  195. embedded_strs = set()
  196. callback = {}
  197. for retok in tokens_by_type.get(PatternRE, []):
  198. unless = [] # {}
  199. for strtok in tokens_by_type.get(PatternStr, []):
  200. if strtok.priority > retok.priority:
  201. continue
  202. s = strtok.pattern.value
  203. m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags)
  204. if m and m.group(0) == s:
  205. unless.append(strtok)
  206. if strtok.pattern.flags <= retok.pattern.flags:
  207. embedded_strs.add(strtok)
  208. if unless:
  209. callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
  210. terminals = [t for t in terminals if t not in embedded_strs]
  211. return terminals, callback
  212. def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_, use_bytes):
  213. # Python sets an unreasonable group limit (currently 100) in its re module
  214. # Worse, the only way to know we reached it is by catching an AssertionError!
  215. # This function recursively tries less and less groups until it's successful.
  216. postfix = '$' if match_whole else ''
  217. mres = []
  218. while terminals:
  219. pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
  220. if use_bytes:
  221. pattern = pattern.encode('latin-1')
  222. try:
  223. mre = re_.compile(pattern, g_regex_flags)
  224. except AssertionError: # Yes, this is what Python provides us.. :/
  225. return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_, use_bytes)
  226. # terms_from_name = {t.name: t for t in terminals[:max_size]}
  227. mres.append((mre, {i:n for n,i in mre.groupindex.items()} ))
  228. terminals = terminals[max_size:]
  229. return mres
  230. def build_mres(terminals, g_regex_flags, re_, use_bytes, match_whole=False):
  231. return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_, use_bytes)
  232. def _regexp_has_newline(r):
  233. r"""Expressions that may indicate newlines in a regexp:
  234. - newlines (\n)
  235. - escaped newline (\\n)
  236. - anything but ([^...])
  237. - any-char (.) when the flag (?s) exists
  238. - spaces (\s)
  239. """
  240. return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
  241. class Lexer(object):
  242. """Lexer interface
  243. Method Signatures:
  244. lex(self, stream) -> Iterator[Token]
  245. """
  246. lex = NotImplemented
  247. class TraditionalLexer(Lexer):
  248. def __init__(self, conf):
  249. terminals = list(conf.tokens)
  250. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  251. self.re = conf.re_module
  252. if not conf.skip_validation:
  253. # Sanitization
  254. for t in terminals:
  255. try:
  256. self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
  257. except self.re.error:
  258. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  259. if t.pattern.min_width == 0:
  260. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  261. assert set(conf.ignore) <= {t.name for t in terminals}
  262. # Init
  263. self.newline_types = [t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())]
  264. self.ignore_types = list(conf.ignore)
  265. terminals.sort(key=lambda x:(-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  266. self.terminals = terminals
  267. self.user_callbacks = conf.callbacks
  268. self.g_regex_flags = conf.g_regex_flags
  269. self.use_bytes = conf.use_bytes
  270. self._mres = None
  271. # self.build(g_regex_flags)
  272. def _build(self):
  273. terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, re_=self.re, use_bytes=self.use_bytes)
  274. assert all(self.callback.values())
  275. for type_, f in self.user_callbacks.items():
  276. if type_ in self.callback:
  277. # Already a callback there, probably UnlessCallback
  278. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  279. else:
  280. self.callback[type_] = f
  281. self._mres = build_mres(terminals, self.g_regex_flags, self.re, self.use_bytes)
  282. @property
  283. def mres(self):
  284. if self._mres is None:
  285. self._build()
  286. return self._mres
  287. def match(self, stream, pos):
  288. for mre, type_from_index in self.mres:
  289. m = mre.match(stream, pos)
  290. if m:
  291. return m.group(0), type_from_index[m.lastindex]
  292. def lex(self, stream):
  293. return _Lex(self).lex(stream, self.newline_types, self.ignore_types)
  294. class ContextualLexer(Lexer):
  295. def __init__(self, conf, states, always_accept=()):
  296. terminals = list(conf.tokens)
  297. tokens_by_name = {}
  298. for t in terminals:
  299. assert t.name not in tokens_by_name, t
  300. tokens_by_name[t.name] = t
  301. trad_conf = copy(conf)
  302. trad_conf.tokens = terminals
  303. lexer_by_tokens = {}
  304. self.lexers = {}
  305. for state, accepts in states.items():
  306. key = frozenset(accepts)
  307. try:
  308. lexer = lexer_by_tokens[key]
  309. except KeyError:
  310. accepts = set(accepts) | set(conf.ignore) | set(always_accept)
  311. state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name]
  312. lexer_conf = copy(trad_conf)
  313. lexer_conf.tokens = state_tokens
  314. lexer = TraditionalLexer(lexer_conf)
  315. lexer_by_tokens[key] = lexer
  316. self.lexers[state] = lexer
  317. assert trad_conf.tokens is terminals
  318. self.root_lexer = TraditionalLexer(trad_conf)
  319. def lex(self, stream, get_parser_state):
  320. parser_state = get_parser_state()
  321. l = _Lex(self.lexers[parser_state], parser_state)
  322. try:
  323. for x in l.lex(stream, self.root_lexer.newline_types, self.root_lexer.ignore_types):
  324. yield x
  325. parser_state = get_parser_state()
  326. l.lexer = self.lexers[parser_state]
  327. l.state = parser_state # For debug only, no need to worry about multithreading
  328. except UnexpectedCharacters as e:
  329. # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined,
  330. # but not in the current context.
  331. # This tests the input against the global context, to provide a nicer error.
  332. root_match = self.root_lexer.match(stream, e.pos_in_stream)
  333. if not root_match:
  334. raise
  335. value, type_ = root_match
  336. t = Token(type_, value, e.pos_in_stream, e.line, e.column)
  337. raise UnexpectedToken(t, e.allowed, state=e.state)
  338. ###}