This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

537 lines
18 KiB

  1. # Lexer Implementation
  2. from abc import abstractmethod, ABC
  3. import re
  4. from contextlib import suppress
  5. from .utils import classify, get_regexp_width, Py36, Serialize
  6. from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
  7. ###{standalone
  8. from copy import copy
  9. from types import ModuleType
  10. from typing import (
  11. TypeVar, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any,
  12. Pattern as REPattern, ClassVar, TYPE_CHECKING
  13. )
  14. if TYPE_CHECKING:
  15. from .common import LexerConf
  16. class Pattern(Serialize, ABC):
  17. value: str
  18. flags: Collection[str]
  19. raw: Optional[str]
  20. type: ClassVar[str]
  21. def __init__(self, value: str, flags: Collection[str]=(), raw: Optional[str]=None) -> None:
  22. self.value = value
  23. self.flags = frozenset(flags)
  24. self.raw = raw
  25. def __repr__(self):
  26. return repr(self.to_regexp())
  27. # Pattern Hashing assumes all subclasses have a different priority!
  28. def __hash__(self):
  29. return hash((type(self), self.value, self.flags))
  30. def __eq__(self, other):
  31. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  32. @abstractmethod
  33. def to_regexp(self) -> str:
  34. raise NotImplementedError()
  35. @property
  36. @abstractmethod
  37. def min_width(self) -> int:
  38. raise NotImplementedError()
  39. @property
  40. @abstractmethod
  41. def max_width(self) -> int:
  42. raise NotImplementedError()
  43. if Py36:
  44. # Python 3.6 changed syntax for flags in regular expression
  45. def _get_flags(self, value):
  46. for f in self.flags:
  47. value = ('(?%s:%s)' % (f, value))
  48. return value
  49. else:
  50. def _get_flags(self, value):
  51. for f in self.flags:
  52. value = ('(?%s)' % f) + value
  53. return value
  54. class PatternStr(Pattern):
  55. __serialize_fields__ = 'value', 'flags'
  56. type: ClassVar[str] = "str"
  57. def to_regexp(self) -> str:
  58. return self._get_flags(re.escape(self.value))
  59. @property
  60. def min_width(self) -> int:
  61. return len(self.value)
  62. @property
  63. def max_width(self) -> int:
  64. return len(self.value)
  65. class PatternRE(Pattern):
  66. __serialize_fields__ = 'value', 'flags', '_width'
  67. type: ClassVar[str] = "re"
  68. def to_regexp(self) -> str:
  69. return self._get_flags(self.value)
  70. _width = None
  71. def _get_width(self):
  72. if self._width is None:
  73. self._width = get_regexp_width(self.to_regexp())
  74. return self._width
  75. @property
  76. def min_width(self) -> int:
  77. return self._get_width()[0]
  78. @property
  79. def max_width(self) -> int:
  80. return self._get_width()[1]
  81. class TerminalDef(Serialize):
  82. __serialize_fields__ = 'name', 'pattern', 'priority'
  83. __serialize_namespace__ = PatternStr, PatternRE
  84. name: str
  85. pattern: Pattern
  86. priority: int
  87. def __init__(self, name: str, pattern: Pattern, priority: int=1) -> None:
  88. assert isinstance(pattern, Pattern), pattern
  89. self.name = name
  90. self.pattern = pattern
  91. self.priority = priority
  92. def __repr__(self):
  93. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  94. def user_repr(self) -> str:
  95. if self.name.startswith('__'): # We represent a generated terminal
  96. return self.pattern.raw or self.name
  97. else:
  98. return self.name
  99. _T = TypeVar('_T')
  100. class Token(str):
  101. """A string with meta-information, that is produced by the lexer.
  102. When parsing text, the resulting chunks of the input that haven't been discarded,
  103. will end up in the tree as Token instances. The Token class inherits from Python's ``str``,
  104. so normal string comparisons and operations will work as expected.
  105. Attributes:
  106. type: Name of the token (as specified in grammar)
  107. value: Value of the token (redundant, as ``token.value == token`` will always be true)
  108. start_pos: The index of the token in the text
  109. line: The line of the token in the text (starting with 1)
  110. column: The column of the token in the text (starting with 1)
  111. end_line: The line where the token ends
  112. end_column: The next column after the end of the token. For example,
  113. if the token is a single character with a column value of 4,
  114. end_column will be 5.
  115. end_pos: the index where the token ends (basically ``start_pos + len(token)``)
  116. """
  117. __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
  118. type: str
  119. start_pos: int
  120. value: Any
  121. line: int
  122. column: int
  123. end_line: int
  124. end_column: int
  125. end_pos: int
  126. def __new__(cls, type_, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
  127. try:
  128. self = super(Token, cls).__new__(cls, value)
  129. except UnicodeDecodeError:
  130. value = value.decode('latin1')
  131. self = super(Token, cls).__new__(cls, value)
  132. self.type = type_
  133. self.start_pos = start_pos
  134. self.value = value
  135. self.line = line
  136. self.column = column
  137. self.end_line = end_line
  138. self.end_column = end_column
  139. self.end_pos = end_pos
  140. return self
  141. def update(self, type_: Optional[str]=None, value: Optional[Any]=None) -> 'Token':
  142. return Token.new_borrow_pos(
  143. type_ if type_ is not None else self.type,
  144. value if value is not None else self.value,
  145. self
  146. )
  147. @classmethod
  148. def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T:
  149. return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
  150. def __reduce__(self):
  151. return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column))
  152. def __repr__(self):
  153. return 'Token(%r, %r)' % (self.type, self.value)
  154. def __deepcopy__(self, memo):
  155. return Token(self.type, self.value, self.start_pos, self.line, self.column)
  156. def __eq__(self, other):
  157. if isinstance(other, Token) and self.type != other.type:
  158. return False
  159. return str.__eq__(self, other)
  160. __hash__ = str.__hash__
  161. class LineCounter:
  162. __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'
  163. def __init__(self, newline_char):
  164. self.newline_char = newline_char
  165. self.char_pos = 0
  166. self.line = 1
  167. self.column = 1
  168. self.line_start_pos = 0
  169. def __eq__(self, other):
  170. if not isinstance(other, LineCounter):
  171. return NotImplemented
  172. return self.char_pos == other.char_pos and self.newline_char == other.newline_char
  173. def feed(self, token, test_newline=True):
  174. """Consume a token and calculate the new line & column.
  175. As an optional optimization, set test_newline=False if token doesn't contain a newline.
  176. """
  177. if test_newline:
  178. newlines = token.count(self.newline_char)
  179. if newlines:
  180. self.line += newlines
  181. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  182. self.char_pos += len(token)
  183. self.column = self.char_pos - self.line_start_pos + 1
  184. class UnlessCallback:
  185. def __init__(self, mres):
  186. self.mres = mres
  187. def __call__(self, t):
  188. for mre, type_from_index in self.mres:
  189. m = mre.match(t.value)
  190. if m:
  191. t.type = type_from_index[m.lastindex]
  192. break
  193. return t
  194. class CallChain:
  195. def __init__(self, callback1, callback2, cond):
  196. self.callback1 = callback1
  197. self.callback2 = callback2
  198. self.cond = cond
  199. def __call__(self, t):
  200. t2 = self.callback1(t)
  201. return self.callback2(t) if self.cond(t2) else t2
  202. def _create_unless(terminals, g_regex_flags, re_, use_bytes):
  203. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  204. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  205. embedded_strs = set()
  206. callback = {}
  207. for retok in tokens_by_type.get(PatternRE, []):
  208. unless = []
  209. for strtok in tokens_by_type.get(PatternStr, []):
  210. if strtok.priority > retok.priority:
  211. continue
  212. s = strtok.pattern.value
  213. m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags)
  214. if m and m.group(0) == s:
  215. unless.append(strtok)
  216. if strtok.pattern.flags <= retok.pattern.flags:
  217. embedded_strs.add(strtok)
  218. if unless:
  219. callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
  220. terminals = [t for t in terminals if t not in embedded_strs]
  221. return terminals, callback
  222. def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_, use_bytes):
  223. # Python sets an unreasonable group limit (currently 100) in its re module
  224. # Worse, the only way to know we reached it is by catching an AssertionError!
  225. # This function recursively tries less and less groups until it's successful.
  226. postfix = '$' if match_whole else ''
  227. mres = []
  228. while terminals:
  229. pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
  230. if use_bytes:
  231. pattern = pattern.encode('latin-1')
  232. try:
  233. mre = re_.compile(pattern, g_regex_flags)
  234. except AssertionError: # Yes, this is what Python provides us.. :/
  235. return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_, use_bytes)
  236. mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
  237. terminals = terminals[max_size:]
  238. return mres
  239. def build_mres(terminals, g_regex_flags, re_, use_bytes, match_whole=False):
  240. return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_, use_bytes)
  241. def _regexp_has_newline(r):
  242. r"""Expressions that may indicate newlines in a regexp:
  243. - newlines (\n)
  244. - escaped newline (\\n)
  245. - anything but ([^...])
  246. - any-char (.) when the flag (?s) exists
  247. - spaces (\s)
  248. """
  249. return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
  250. class LexerState(object):
  251. __slots__ = 'text', 'line_ctr', 'last_token'
  252. def __init__(self, text, line_ctr, last_token=None):
  253. self.text = text
  254. self.line_ctr = line_ctr
  255. self.last_token = last_token
  256. def __eq__(self, other):
  257. if not isinstance(other, LexerState):
  258. return NotImplemented
  259. return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token
  260. def __copy__(self):
  261. return type(self)(self.text, copy(self.line_ctr), self.last_token)
  262. _Callback = Callable[[Token], Token]
  263. class Lexer(ABC):
  264. """Lexer interface
  265. Method Signatures:
  266. lex(self, lexer_state, parser_state) -> Iterator[Token]
  267. """
  268. @abstractmethod
  269. def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
  270. ...
  271. def make_lexer_state(self, text):
  272. line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n')
  273. return LexerState(text, line_ctr)
  274. class TraditionalLexer(Lexer):
  275. terminals: Collection[TerminalDef]
  276. ignore_types: FrozenSet[str]
  277. newline_types: FrozenSet[str]
  278. user_callbacks: Dict[str, _Callback]
  279. callback: Dict[str, _Callback]
  280. re: ModuleType
  281. def __init__(self, conf: 'LexerConf') -> None:
  282. terminals = list(conf.terminals)
  283. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  284. self.re = conf.re_module
  285. if not conf.skip_validation:
  286. # Sanitization
  287. for t in terminals:
  288. try:
  289. self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
  290. except self.re.error:
  291. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  292. if t.pattern.min_width == 0:
  293. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  294. if not (set(conf.ignore) <= {t.name for t in terminals}):
  295. raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals}))
  296. # Init
  297. self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))
  298. self.ignore_types = frozenset(conf.ignore)
  299. terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  300. self.terminals = terminals
  301. self.user_callbacks = conf.callbacks
  302. self.g_regex_flags = conf.g_regex_flags
  303. self.use_bytes = conf.use_bytes
  304. self.terminals_by_name = conf.terminals_by_name
  305. self._mres = None
  306. def _build(self) -> None:
  307. terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
  308. assert all(self.callback.values())
  309. for type_, f in self.user_callbacks.items():
  310. if type_ in self.callback:
  311. # Already a callback there, probably UnlessCallback
  312. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  313. else:
  314. self.callback[type_] = f
  315. self._mres = build_mres(terminals, self.g_regex_flags, self.re, self.use_bytes)
  316. @property
  317. def mres(self) -> List[Tuple[REPattern, Dict[int, str]]]:
  318. if self._mres is None:
  319. self._build()
  320. assert self._mres is not None
  321. return self._mres
  322. def match(self, text: str, pos: int) -> Optional[Tuple[str, str]]:
  323. for mre, type_from_index in self.mres:
  324. m = mre.match(text, pos)
  325. if m:
  326. return m.group(0), type_from_index[m.lastindex]
  327. def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:
  328. with suppress(EOFError):
  329. while True:
  330. yield self.next_token(state, parser_state)
  331. def next_token(self, lex_state: LexerState, parser_state: Any=None) -> Token:
  332. line_ctr = lex_state.line_ctr
  333. while line_ctr.char_pos < len(lex_state.text):
  334. res = self.match(lex_state.text, line_ctr.char_pos)
  335. if not res:
  336. allowed = {v for m, tfi in self.mres for v in tfi.values()} - self.ignore_types
  337. if not allowed:
  338. allowed = {"<END-OF-FILE>"}
  339. raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,
  340. allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],
  341. state=parser_state, terminals_by_name=self.terminals_by_name)
  342. value, type_ = res
  343. if type_ not in self.ignore_types:
  344. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  345. line_ctr.feed(value, type_ in self.newline_types)
  346. t.end_line = line_ctr.line
  347. t.end_column = line_ctr.column
  348. t.end_pos = line_ctr.char_pos
  349. if t.type in self.callback:
  350. t = self.callback[t.type](t)
  351. if not isinstance(t, Token):
  352. raise LexError("Callbacks must return a token (returned %r)" % t)
  353. lex_state.last_token = t
  354. return t
  355. else:
  356. if type_ in self.callback:
  357. t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  358. self.callback[type_](t2)
  359. line_ctr.feed(value, type_ in self.newline_types)
  360. # EOF
  361. raise EOFError(self)
  362. class ContextualLexer(Lexer):
  363. lexers: Dict[str, TraditionalLexer]
  364. root_lexer: TraditionalLexer
  365. def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None:
  366. terminals = list(conf.terminals)
  367. terminals_by_name = conf.terminals_by_name
  368. trad_conf = copy(conf)
  369. trad_conf.terminals = terminals
  370. lexer_by_tokens = {}
  371. self.lexers = {}
  372. for state, accepts in states.items():
  373. key = frozenset(accepts)
  374. try:
  375. lexer = lexer_by_tokens[key]
  376. except KeyError:
  377. accepts = set(accepts) | set(conf.ignore) | set(always_accept)
  378. lexer_conf = copy(trad_conf)
  379. lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]
  380. lexer = TraditionalLexer(lexer_conf)
  381. lexer_by_tokens[key] = lexer
  382. self.lexers[state] = lexer
  383. assert trad_conf.terminals is terminals
  384. self.root_lexer = TraditionalLexer(trad_conf)
  385. def make_lexer_state(self, text):
  386. return self.root_lexer.make_lexer_state(text)
  387. def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
  388. try:
  389. while True:
  390. lexer = self.lexers[parser_state.position]
  391. yield lexer.next_token(lexer_state, parser_state)
  392. except EOFError:
  393. pass
  394. except UnexpectedCharacters as e:
  395. # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context.
  396. # This tests the input against the global context, to provide a nicer error.
  397. try:
  398. last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token
  399. token = self.root_lexer.next_token(lexer_state, parser_state)
  400. raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name)
  401. except UnexpectedCharacters:
  402. raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set.
  403. class LexerThread(object):
  404. """A thread that ties a lexer instance and a lexer state, to be used by the parser"""
  405. def __init__(self, lexer, text):
  406. self.lexer = lexer
  407. self.state = lexer.make_lexer_state(text)
  408. def lex(self, parser_state):
  409. return self.lexer.lex(self.state, parser_state)
  410. def __copy__(self):
  411. copied = object.__new__(LexerThread)
  412. copied.lexer = self.lexer
  413. copied.state = copy(self.state)
  414. return copied
  415. ###}