This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

453 lines
16 KiB

  1. # Lexer Implementation
  2. import re
  3. from .utils import Str, classify, get_regexp_width, Py36, Serialize, suppress
  4. from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
  5. ###{standalone
  6. from copy import copy
  7. class Pattern(Serialize):
  8. def __init__(self, value, flags=(), raw=None):
  9. self.value = value
  10. self.flags = frozenset(flags)
  11. self.raw = raw
  12. def __repr__(self):
  13. return repr(self.to_regexp())
  14. # Pattern Hashing assumes all subclasses have a different priority!
  15. def __hash__(self):
  16. return hash((type(self), self.value, self.flags))
  17. def __eq__(self, other):
  18. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  19. def to_regexp(self):
  20. raise NotImplementedError()
  21. if Py36:
  22. # Python 3.6 changed syntax for flags in regular expression
  23. def _get_flags(self, value):
  24. for f in self.flags:
  25. value = ('(?%s:%s)' % (f, value))
  26. return value
  27. else:
  28. def _get_flags(self, value):
  29. for f in self.flags:
  30. value = ('(?%s)' % f) + value
  31. return value
  32. class PatternStr(Pattern):
  33. __serialize_fields__ = 'value', 'flags'
  34. type = "str"
  35. def to_regexp(self):
  36. return self._get_flags(re.escape(self.value))
  37. @property
  38. def min_width(self):
  39. return len(self.value)
  40. max_width = min_width
  41. class PatternRE(Pattern):
  42. __serialize_fields__ = 'value', 'flags', '_width'
  43. type = "re"
  44. def to_regexp(self):
  45. return self._get_flags(self.value)
  46. _width = None
  47. def _get_width(self):
  48. if self._width is None:
  49. self._width = get_regexp_width(self.to_regexp())
  50. return self._width
  51. @property
  52. def min_width(self):
  53. return self._get_width()[0]
  54. @property
  55. def max_width(self):
  56. return self._get_width()[1]
  57. class TerminalDef(Serialize):
  58. __serialize_fields__ = 'name', 'pattern', 'priority', 'user_repr'
  59. __serialize_namespace__ = PatternStr, PatternRE
  60. def __init__(self, name, pattern, priority=1, user_repr=None):
  61. assert isinstance(pattern, Pattern), pattern
  62. self.name = name
  63. self.pattern = pattern
  64. self.priority = priority
  65. self.user_repr = user_repr or name
  66. def __repr__(self):
  67. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  68. class Token(Str):
  69. """A string with meta-information, that is produced by the lexer.
  70. When parsing text, the resulting chunks of the input that haven't been discarded,
  71. will end up in the tree as Token instances. The Token class inherits from Python's ``str``,
  72. so normal string comparisons and operations will work as expected.
  73. Attributes:
  74. type: Name of the token (as specified in grammar)
  75. value: Value of the token (redundant, as ``token.value == token`` will always be true)
  76. pos_in_stream: The index of the token in the text
  77. line: The line of the token in the text (starting with 1)
  78. column: The column of the token in the text (starting with 1)
  79. end_line: The line where the token ends
  80. end_column: The next column after the end of the token. For example,
  81. if the token is a single character with a column value of 4,
  82. end_column will be 5.
  83. end_pos: the index where the token ends (basically ``pos_in_stream + len(token)``)
  84. """
  85. __slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
  86. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
  87. try:
  88. self = super(Token, cls).__new__(cls, value)
  89. except UnicodeDecodeError:
  90. value = value.decode('latin1')
  91. self = super(Token, cls).__new__(cls, value)
  92. self.type = type_
  93. self.pos_in_stream = pos_in_stream
  94. self.value = value
  95. self.line = line
  96. self.column = column
  97. self.end_line = end_line
  98. self.end_column = end_column
  99. self.end_pos = end_pos
  100. return self
  101. def update(self, type_=None, value=None):
  102. return Token.new_borrow_pos(
  103. type_ if type_ is not None else self.type,
  104. value if value is not None else self.value,
  105. self
  106. )
  107. @classmethod
  108. def new_borrow_pos(cls, type_, value, borrow_t):
  109. return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
  110. def __reduce__(self):
  111. return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column))
  112. def __repr__(self):
  113. return 'Token(%r, %r)' % (self.type, self.value)
  114. def __deepcopy__(self, memo):
  115. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  116. def __eq__(self, other):
  117. if isinstance(other, Token) and self.type != other.type:
  118. return False
  119. return Str.__eq__(self, other)
  120. __hash__ = Str.__hash__
  121. class LineCounter:
  122. __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'
  123. def __init__(self, newline_char):
  124. self.newline_char = newline_char
  125. self.char_pos = 0
  126. self.line = 1
  127. self.column = 1
  128. self.line_start_pos = 0
  129. def feed(self, token, test_newline=True):
  130. """Consume a token and calculate the new line & column.
  131. As an optional optimization, set test_newline=False if token doesn't contain a newline.
  132. """
  133. if test_newline:
  134. newlines = token.count(self.newline_char)
  135. if newlines:
  136. self.line += newlines
  137. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  138. self.char_pos += len(token)
  139. self.column = self.char_pos - self.line_start_pos + 1
  140. class UnlessCallback:
  141. def __init__(self, mres):
  142. self.mres = mres
  143. def __call__(self, t):
  144. for mre, type_from_index in self.mres:
  145. m = mre.match(t.value)
  146. if m:
  147. t.type = type_from_index[m.lastindex]
  148. break
  149. return t
  150. class CallChain:
  151. def __init__(self, callback1, callback2, cond):
  152. self.callback1 = callback1
  153. self.callback2 = callback2
  154. self.cond = cond
  155. def __call__(self, t):
  156. t2 = self.callback1(t)
  157. return self.callback2(t) if self.cond(t2) else t2
  158. def _create_unless(terminals, g_regex_flags, re_, use_bytes):
  159. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  160. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  161. embedded_strs = set()
  162. callback = {}
  163. for retok in tokens_by_type.get(PatternRE, []):
  164. unless = []
  165. for strtok in tokens_by_type.get(PatternStr, []):
  166. if strtok.priority > retok.priority:
  167. continue
  168. s = strtok.pattern.value
  169. m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags)
  170. if m and m.group(0) == s:
  171. unless.append(strtok)
  172. if strtok.pattern.flags <= retok.pattern.flags:
  173. embedded_strs.add(strtok)
  174. if unless:
  175. callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
  176. terminals = [t for t in terminals if t not in embedded_strs]
  177. return terminals, callback
  178. def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_, use_bytes):
  179. # Python sets an unreasonable group limit (currently 100) in its re module
  180. # Worse, the only way to know we reached it is by catching an AssertionError!
  181. # This function recursively tries less and less groups until it's successful.
  182. postfix = '$' if match_whole else ''
  183. mres = []
  184. while terminals:
  185. pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
  186. if use_bytes:
  187. pattern = pattern.encode('latin-1')
  188. try:
  189. mre = re_.compile(pattern, g_regex_flags)
  190. except AssertionError: # Yes, this is what Python provides us.. :/
  191. return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_, use_bytes)
  192. mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
  193. terminals = terminals[max_size:]
  194. return mres
  195. def build_mres(terminals, g_regex_flags, re_, use_bytes, match_whole=False):
  196. return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_, use_bytes)
  197. def _regexp_has_newline(r):
  198. r"""Expressions that may indicate newlines in a regexp:
  199. - newlines (\n)
  200. - escaped newline (\\n)
  201. - anything but ([^...])
  202. - any-char (.) when the flag (?s) exists
  203. - spaces (\s)
  204. """
  205. return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
  206. class Lexer(object):
  207. """Lexer interface
  208. Method Signatures:
  209. lex(self, text) -> Iterator[Token]
  210. """
  211. lex = NotImplemented
  212. def make_lexer_state(self, text):
  213. line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n')
  214. return LexerState(text, line_ctr)
  215. class TraditionalLexer(Lexer):
  216. def __init__(self, conf):
  217. terminals = list(conf.terminals)
  218. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  219. self.re = conf.re_module
  220. if not conf.skip_validation:
  221. # Sanitization
  222. for t in terminals:
  223. try:
  224. self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
  225. except self.re.error:
  226. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  227. if t.pattern.min_width == 0:
  228. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  229. assert set(conf.ignore) <= {t.name for t in terminals}
  230. # Init
  231. self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))
  232. self.ignore_types = frozenset(conf.ignore)
  233. terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  234. self.terminals = terminals
  235. self.user_callbacks = conf.callbacks
  236. self.g_regex_flags = conf.g_regex_flags
  237. self.use_bytes = conf.use_bytes
  238. self._mres = None
  239. def _build(self):
  240. terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
  241. assert all(self.callback.values())
  242. for type_, f in self.user_callbacks.items():
  243. if type_ in self.callback:
  244. # Already a callback there, probably UnlessCallback
  245. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  246. else:
  247. self.callback[type_] = f
  248. self._mres = build_mres(terminals, self.g_regex_flags, self.re, self.use_bytes)
  249. @property
  250. def mres(self):
  251. if self._mres is None:
  252. self._build()
  253. return self._mres
  254. def match(self, text, pos):
  255. for mre, type_from_index in self.mres:
  256. m = mre.match(text, pos)
  257. if m:
  258. return m.group(0), type_from_index[m.lastindex]
  259. def lex(self, state, parser_state):
  260. with suppress(EOFError):
  261. while True:
  262. yield self.next_token(state, parser_state)
  263. def next_token(self, lex_state, parser_state=None):
  264. line_ctr = lex_state.line_ctr
  265. while line_ctr.char_pos < len(lex_state.text):
  266. res = self.match(lex_state.text, line_ctr.char_pos)
  267. if not res:
  268. allowed = {v for m, tfi in self.mres for v in tfi.values()} - self.ignore_types
  269. if not allowed:
  270. allowed = {"<END-OF-FILE>"}
  271. raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,
  272. allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],
  273. state=parser_state, _all_terminals=self.terminals)
  274. value, type_ = res
  275. if type_ not in self.ignore_types:
  276. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  277. line_ctr.feed(value, type_ in self.newline_types)
  278. t.end_line = line_ctr.line
  279. t.end_column = line_ctr.column
  280. t.end_pos = line_ctr.char_pos
  281. if t.type in self.callback:
  282. t = self.callback[t.type](t)
  283. if not isinstance(t, Token):
  284. raise LexError("Callbacks must return a token (returned %r)" % t)
  285. lex_state.last_token = t
  286. return t
  287. else:
  288. if type_ in self.callback:
  289. t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  290. self.callback[type_](t2)
  291. line_ctr.feed(value, type_ in self.newline_types)
  292. # EOF
  293. raise EOFError(self)
  294. class LexerState:
  295. __slots__ = 'text', 'line_ctr', 'last_token'
  296. def __init__(self, text, line_ctr, last_token=None):
  297. self.text = text
  298. self.line_ctr = line_ctr
  299. self.last_token = last_token
  300. def __copy__(self):
  301. return type(self)(self.text, copy(self.line_ctr), self.last_token)
  302. class ContextualLexer(Lexer):
  303. def __init__(self, conf, states, always_accept=()):
  304. terminals = list(conf.terminals)
  305. tokens_by_name = {}
  306. for t in terminals:
  307. assert t.name not in tokens_by_name, t
  308. tokens_by_name[t.name] = t
  309. trad_conf = copy(conf)
  310. trad_conf.terminals = terminals
  311. lexer_by_tokens = {}
  312. self.lexers = {}
  313. for state, accepts in states.items():
  314. key = frozenset(accepts)
  315. try:
  316. lexer = lexer_by_tokens[key]
  317. except KeyError:
  318. accepts = set(accepts) | set(conf.ignore) | set(always_accept)
  319. state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name]
  320. lexer_conf = copy(trad_conf)
  321. lexer_conf.terminals = state_tokens
  322. lexer = TraditionalLexer(lexer_conf)
  323. lexer_by_tokens[key] = lexer
  324. self.lexers[state] = lexer
  325. assert trad_conf.terminals is terminals
  326. self.root_lexer = TraditionalLexer(trad_conf)
  327. def make_lexer_state(self, text):
  328. return self.root_lexer.make_lexer_state(text)
  329. def lex(self, lexer_state, parser_state):
  330. try:
  331. while True:
  332. lexer = self.lexers[parser_state.position]
  333. yield lexer.next_token(lexer_state, parser_state)
  334. except EOFError:
  335. pass
  336. except UnexpectedCharacters as e:
  337. # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context.
  338. # This tests the input against the global context, to provide a nicer error.
  339. token = self.root_lexer.next_token(lexer_state)
  340. raise UnexpectedToken(token, e.allowed, state=parser_state.position, token_history=[lexer_state.last_token], all_terminals=self.root_lexer.terminals)
  341. class LexerThread:
  342. """A thread that ties a lexer instance and a lexer state, to be used by the parser"""
  343. def __init__(self, lexer, text):
  344. self.lexer = lexer
  345. self.state = lexer.make_lexer_state(text)
  346. def lex(self, parser_state):
  347. return self.lexer.lex(self.state, parser_state)
  348. ###}