This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 

535 líneas
18 KiB

  1. # Lexer Implementation
  2. from abc import abstractmethod, ABC
  3. import re
  4. from contextlib import suppress
  5. from typing import (
  6. TypeVar, Type, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any,
  7. Pattern as REPattern, ClassVar, TYPE_CHECKING
  8. )
  9. from types import ModuleType
  10. if TYPE_CHECKING:
  11. from .common import LexerConf
  12. from .utils import classify, get_regexp_width, Serialize
  13. from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
  14. from .grammar import TOKEN_DEFAULT_PRIORITY
  15. ###{standalone
  16. from copy import copy
  17. class Pattern(Serialize, ABC):
  18. value: str
  19. flags: Collection[str]
  20. raw: Optional[str]
  21. type: ClassVar[str]
  22. def __init__(self, value: str, flags: Collection[str]=(), raw: Optional[str]=None) -> None:
  23. self.value = value
  24. self.flags = frozenset(flags)
  25. self.raw = raw
  26. def __repr__(self):
  27. return repr(self.to_regexp())
  28. # Pattern Hashing assumes all subclasses have a different priority!
  29. def __hash__(self):
  30. return hash((type(self), self.value, self.flags))
  31. def __eq__(self, other):
  32. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  33. @abstractmethod
  34. def to_regexp(self) -> str:
  35. raise NotImplementedError()
  36. @property
  37. @abstractmethod
  38. def min_width(self) -> int:
  39. raise NotImplementedError()
  40. @property
  41. @abstractmethod
  42. def max_width(self) -> int:
  43. raise NotImplementedError()
  44. def _get_flags(self, value):
  45. for f in self.flags:
  46. value = ('(?%s:%s)' % (f, value))
  47. return value
  48. class PatternStr(Pattern):
  49. __serialize_fields__ = 'value', 'flags'
  50. type: ClassVar[str] = "str"
  51. def to_regexp(self) -> str:
  52. return self._get_flags(re.escape(self.value))
  53. @property
  54. def min_width(self) -> int:
  55. return len(self.value)
  56. @property
  57. def max_width(self) -> int:
  58. return len(self.value)
  59. class PatternRE(Pattern):
  60. __serialize_fields__ = 'value', 'flags', '_width'
  61. type: ClassVar[str] = "re"
  62. def to_regexp(self) -> str:
  63. return self._get_flags(self.value)
  64. _width = None
  65. def _get_width(self):
  66. if self._width is None:
  67. self._width = get_regexp_width(self.to_regexp())
  68. return self._width
  69. @property
  70. def min_width(self) -> int:
  71. return self._get_width()[0]
  72. @property
  73. def max_width(self) -> int:
  74. return self._get_width()[1]
  75. class TerminalDef(Serialize):
  76. __serialize_fields__ = 'name', 'pattern', 'priority'
  77. __serialize_namespace__ = PatternStr, PatternRE
  78. name: str
  79. pattern: Pattern
  80. priority: int
  81. def __init__(self, name: str, pattern: Pattern, priority: int=TOKEN_DEFAULT_PRIORITY) -> None:
  82. assert isinstance(pattern, Pattern), pattern
  83. self.name = name
  84. self.pattern = pattern
  85. self.priority = priority
  86. def __repr__(self):
  87. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  88. def user_repr(self) -> str:
  89. if self.name.startswith('__'): # We represent a generated terminal
  90. return self.pattern.raw or self.name
  91. else:
  92. return self.name
  93. _T = TypeVar('_T')
  94. class Token(str):
  95. """A string with meta-information, that is produced by the lexer.
  96. When parsing text, the resulting chunks of the input that haven't been discarded,
  97. will end up in the tree as Token instances. The Token class inherits from Python's ``str``,
  98. so normal string comparisons and operations will work as expected.
  99. Attributes:
  100. type: Name of the token (as specified in grammar)
  101. value: Value of the token (redundant, as ``token.value == token`` will always be true)
  102. start_pos: The index of the token in the text
  103. line: The line of the token in the text (starting with 1)
  104. column: The column of the token in the text (starting with 1)
  105. end_line: The line where the token ends
  106. end_column: The next column after the end of the token. For example,
  107. if the token is a single character with a column value of 4,
  108. end_column will be 5.
  109. end_pos: the index where the token ends (basically ``start_pos + len(token)``)
  110. """
  111. __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
  112. type: str
  113. start_pos: int
  114. value: Any
  115. line: int
  116. column: int
  117. end_line: int
  118. end_column: int
  119. end_pos: int
  120. def __new__(cls, type_, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
  121. inst = super(Token, cls).__new__(cls, value)
  122. inst.type = type_
  123. inst.start_pos = start_pos
  124. inst.value = value
  125. inst.line = line
  126. inst.column = column
  127. inst.end_line = end_line
  128. inst.end_column = end_column
  129. inst.end_pos = end_pos
  130. return inst
  131. def update(self, type_: Optional[str]=None, value: Optional[Any]=None) -> 'Token':
  132. return Token.new_borrow_pos(
  133. type_ if type_ is not None else self.type,
  134. value if value is not None else self.value,
  135. self
  136. )
  137. @classmethod
  138. def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T:
  139. return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
  140. def __reduce__(self):
  141. return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column))
  142. def __repr__(self):
  143. return 'Token(%r, %r)' % (self.type, self.value)
  144. def __deepcopy__(self, memo):
  145. return Token(self.type, self.value, self.start_pos, self.line, self.column)
  146. def __eq__(self, other):
  147. if isinstance(other, Token) and self.type != other.type:
  148. return False
  149. return str.__eq__(self, other)
  150. __hash__ = str.__hash__
  151. class LineCounter:
  152. __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'
  153. def __init__(self, newline_char):
  154. self.newline_char = newline_char
  155. self.char_pos = 0
  156. self.line = 1
  157. self.column = 1
  158. self.line_start_pos = 0
  159. def __eq__(self, other):
  160. if not isinstance(other, LineCounter):
  161. return NotImplemented
  162. return self.char_pos == other.char_pos and self.newline_char == other.newline_char
  163. def feed(self, token: Token, test_newline=True):
  164. """Consume a token and calculate the new line & column.
  165. As an optional optimization, set test_newline=False if token doesn't contain a newline.
  166. """
  167. if test_newline:
  168. newlines = token.count(self.newline_char)
  169. if newlines:
  170. self.line += newlines
  171. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  172. self.char_pos += len(token)
  173. self.column = self.char_pos - self.line_start_pos + 1
  174. class UnlessCallback:
  175. def __init__(self, scanner):
  176. self.scanner = scanner
  177. def __call__(self, t):
  178. res = self.scanner.match(t.value, 0)
  179. if res:
  180. _value, t.type = res
  181. return t
  182. class CallChain:
  183. def __init__(self, callback1, callback2, cond):
  184. self.callback1 = callback1
  185. self.callback2 = callback2
  186. self.cond = cond
  187. def __call__(self, t):
  188. t2 = self.callback1(t)
  189. return self.callback2(t) if self.cond(t2) else t2
  190. def _get_match(re_, regexp, s, flags):
  191. m = re_.match(regexp, s, flags)
  192. if m:
  193. return m.group(0)
  194. def _create_unless(terminals, g_regex_flags, re_, use_bytes):
  195. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  196. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  197. embedded_strs = set()
  198. callback = {}
  199. for retok in tokens_by_type.get(PatternRE, []):
  200. unless = []
  201. for strtok in tokens_by_type.get(PatternStr, []):
  202. if strtok.priority != retok.priority:
  203. continue
  204. s = strtok.pattern.value
  205. if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags):
  206. unless.append(strtok)
  207. if strtok.pattern.flags <= retok.pattern.flags:
  208. embedded_strs.add(strtok)
  209. if unless:
  210. callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
  211. new_terminals = [t for t in terminals if t not in embedded_strs]
  212. return new_terminals, callback
  213. class Scanner:
  214. def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False):
  215. self.terminals = terminals
  216. self.g_regex_flags = g_regex_flags
  217. self.re_ = re_
  218. self.use_bytes = use_bytes
  219. self.match_whole = match_whole
  220. self.allowed_types = {t.name for t in self.terminals}
  221. self._mres = self._build_mres(terminals, len(terminals))
  222. def _build_mres(self, terminals, max_size):
  223. # Python sets an unreasonable group limit (currently 100) in its re module
  224. # Worse, the only way to know we reached it is by catching an AssertionError!
  225. # This function recursively tries less and less groups until it's successful.
  226. postfix = '$' if self.match_whole else ''
  227. mres = []
  228. while terminals:
  229. pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
  230. if self.use_bytes:
  231. pattern = pattern.encode('latin-1')
  232. try:
  233. mre = self.re_.compile(pattern, self.g_regex_flags)
  234. except AssertionError: # Yes, this is what Python provides us.. :/
  235. return self._build_mres(terminals, max_size//2)
  236. mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
  237. terminals = terminals[max_size:]
  238. return mres
  239. def match(self, text, pos):
  240. for mre, type_from_index in self._mres:
  241. m = mre.match(text, pos)
  242. if m:
  243. return m.group(0), type_from_index[m.lastindex]
  244. def _regexp_has_newline(r: str):
  245. r"""Expressions that may indicate newlines in a regexp:
  246. - newlines (\n)
  247. - escaped newline (\\n)
  248. - anything but ([^...])
  249. - any-char (.) when the flag (?s) exists
  250. - spaces (\s)
  251. """
  252. return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
  253. class LexerState:
  254. __slots__ = 'text', 'line_ctr', 'last_token'
  255. def __init__(self, text, line_ctr, last_token=None):
  256. self.text = text
  257. self.line_ctr = line_ctr
  258. self.last_token = last_token
  259. def __eq__(self, other):
  260. if not isinstance(other, LexerState):
  261. return NotImplemented
  262. return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token
  263. def __copy__(self):
  264. return type(self)(self.text, copy(self.line_ctr), self.last_token)
  265. _Callback = Callable[[Token], Token]
  266. class Lexer(ABC):
  267. """Lexer interface
  268. Method Signatures:
  269. lex(self, lexer_state, parser_state) -> Iterator[Token]
  270. """
  271. @abstractmethod
  272. def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
  273. return NotImplemented
  274. def make_lexer_state(self, text):
  275. line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n')
  276. return LexerState(text, line_ctr)
  277. class BasicLexer(Lexer):
  278. terminals: Collection[TerminalDef]
  279. ignore_types: FrozenSet[str]
  280. newline_types: FrozenSet[str]
  281. user_callbacks: Dict[str, _Callback]
  282. callback: Dict[str, _Callback]
  283. re: ModuleType
  284. def __init__(self, conf: 'LexerConf') -> None:
  285. terminals = list(conf.terminals)
  286. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  287. self.re = conf.re_module
  288. if not conf.skip_validation:
  289. # Sanitization
  290. for t in terminals:
  291. try:
  292. self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
  293. except self.re.error:
  294. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  295. if t.pattern.min_width == 0:
  296. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  297. if not (set(conf.ignore) <= {t.name for t in terminals}):
  298. raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals}))
  299. # Init
  300. self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))
  301. self.ignore_types = frozenset(conf.ignore)
  302. terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  303. self.terminals = terminals
  304. self.user_callbacks = conf.callbacks
  305. self.g_regex_flags = conf.g_regex_flags
  306. self.use_bytes = conf.use_bytes
  307. self.terminals_by_name = conf.terminals_by_name
  308. self._scanner = None
  309. def _build_scanner(self):
  310. terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
  311. assert all(self.callback.values())
  312. for type_, f in self.user_callbacks.items():
  313. if type_ in self.callback:
  314. # Already a callback there, probably UnlessCallback
  315. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  316. else:
  317. self.callback[type_] = f
  318. self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes)
  319. @property
  320. def scanner(self):
  321. if self._scanner is None:
  322. self._build_scanner()
  323. return self._scanner
  324. def match(self, text, pos):
  325. return self.scanner.match(text, pos)
  326. def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:
  327. with suppress(EOFError):
  328. while True:
  329. yield self.next_token(state, parser_state)
  330. def next_token(self, lex_state: LexerState, parser_state: Any=None) -> Token:
  331. line_ctr = lex_state.line_ctr
  332. while line_ctr.char_pos < len(lex_state.text):
  333. res = self.match(lex_state.text, line_ctr.char_pos)
  334. if not res:
  335. allowed = self.scanner.allowed_types - self.ignore_types
  336. if not allowed:
  337. allowed = {"<END-OF-FILE>"}
  338. raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,
  339. allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],
  340. state=parser_state, terminals_by_name=self.terminals_by_name)
  341. value, type_ = res
  342. if type_ not in self.ignore_types:
  343. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  344. line_ctr.feed(value, type_ in self.newline_types)
  345. t.end_line = line_ctr.line
  346. t.end_column = line_ctr.column
  347. t.end_pos = line_ctr.char_pos
  348. if t.type in self.callback:
  349. t = self.callback[t.type](t)
  350. if not isinstance(t, Token):
  351. raise LexError("Callbacks must return a token (returned %r)" % t)
  352. lex_state.last_token = t
  353. return t
  354. else:
  355. if type_ in self.callback:
  356. t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  357. self.callback[type_](t2)
  358. line_ctr.feed(value, type_ in self.newline_types)
  359. # EOF
  360. raise EOFError(self)
  361. class ContextualLexer(Lexer):
  362. lexers: Dict[str, BasicLexer]
  363. root_lexer: BasicLexer
  364. def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None:
  365. terminals = list(conf.terminals)
  366. terminals_by_name = conf.terminals_by_name
  367. trad_conf = copy(conf)
  368. trad_conf.terminals = terminals
  369. lexer_by_tokens = {}
  370. self.lexers = {}
  371. for state, accepts in states.items():
  372. key = frozenset(accepts)
  373. try:
  374. lexer = lexer_by_tokens[key]
  375. except KeyError:
  376. accepts = set(accepts) | set(conf.ignore) | set(always_accept)
  377. lexer_conf = copy(trad_conf)
  378. lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]
  379. lexer = BasicLexer(lexer_conf)
  380. lexer_by_tokens[key] = lexer
  381. self.lexers[state] = lexer
  382. assert trad_conf.terminals is terminals
  383. self.root_lexer = BasicLexer(trad_conf)
  384. def make_lexer_state(self, text):
  385. return self.root_lexer.make_lexer_state(text)
  386. def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:
  387. try:
  388. while True:
  389. lexer = self.lexers[parser_state.position]
  390. yield lexer.next_token(lexer_state, parser_state)
  391. except EOFError:
  392. pass
  393. except UnexpectedCharacters as e:
  394. # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context.
  395. # This tests the input against the global context, to provide a nicer error.
  396. try:
  397. last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token
  398. token = self.root_lexer.next_token(lexer_state, parser_state)
  399. raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name)
  400. except UnexpectedCharacters:
  401. raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set.
  402. class LexerThread:
  403. """A thread that ties a lexer instance and a lexer state, to be used by the parser"""
  404. def __init__(self, lexer, text):
  405. self.lexer = lexer
  406. self.state = lexer.make_lexer_state(text)
  407. def lex(self, parser_state):
  408. return self.lexer.lex(self.state, parser_state)
  409. def __copy__(self):
  410. copied = object.__new__(LexerThread)
  411. copied.lexer = self.lexer
  412. copied.state = copy(self.state)
  413. return copied
  414. ###}