This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 

507 líneas
17 KiB

  1. # Lexer Implementation
  2. import re
  3. from .utils import Str, classify, get_regexp_width, Py36, Serialize, suppress
  4. from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
  5. ###{standalone
  6. from warnings import warn
  7. from copy import copy
  8. class Pattern(Serialize):
  9. raw = None
  10. type = None
  11. def __init__(self, value, flags=(), raw=None):
  12. self.value = value
  13. self.flags = frozenset(flags)
  14. self.raw = raw
  15. def __repr__(self):
  16. return repr(self.to_regexp())
  17. # Pattern Hashing assumes all subclasses have a different priority!
  18. def __hash__(self):
  19. return hash((type(self), self.value, self.flags))
  20. def __eq__(self, other):
  21. return type(self) == type(other) and self.value == other.value and self.flags == other.flags
  22. def to_regexp(self):
  23. raise NotImplementedError()
  24. def min_width(self):
  25. raise NotImplementedError()
  26. def max_width(self):
  27. raise NotImplementedError()
  28. if Py36:
  29. # Python 3.6 changed syntax for flags in regular expression
  30. def _get_flags(self, value):
  31. for f in self.flags:
  32. value = ('(?%s:%s)' % (f, value))
  33. return value
  34. else:
  35. def _get_flags(self, value):
  36. for f in self.flags:
  37. value = ('(?%s)' % f) + value
  38. return value
  39. class PatternStr(Pattern):
  40. __serialize_fields__ = 'value', 'flags'
  41. type = "str"
  42. def to_regexp(self):
  43. return self._get_flags(re.escape(self.value))
  44. @property
  45. def min_width(self):
  46. return len(self.value)
  47. max_width = min_width
  48. class PatternRE(Pattern):
  49. __serialize_fields__ = 'value', 'flags', '_width'
  50. type = "re"
  51. def to_regexp(self):
  52. return self._get_flags(self.value)
  53. _width = None
  54. def _get_width(self):
  55. if self._width is None:
  56. self._width = get_regexp_width(self.to_regexp())
  57. return self._width
  58. @property
  59. def min_width(self):
  60. return self._get_width()[0]
  61. @property
  62. def max_width(self):
  63. return self._get_width()[1]
  64. class TerminalDef(Serialize):
  65. __serialize_fields__ = 'name', 'pattern', 'priority'
  66. __serialize_namespace__ = PatternStr, PatternRE
  67. def __init__(self, name, pattern, priority=1):
  68. assert isinstance(pattern, Pattern), pattern
  69. self.name = name
  70. self.pattern = pattern
  71. self.priority = priority
  72. def __repr__(self):
  73. return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
  74. def user_repr(self):
  75. if self.name.startswith('__'): # We represent a generated terminal
  76. return self.pattern.raw or self.name
  77. else:
  78. return self.name
  79. class Token(Str):
  80. """A string with meta-information, that is produced by the lexer.
  81. When parsing text, the resulting chunks of the input that haven't been discarded,
  82. will end up in the tree as Token instances. The Token class inherits from Python's ``str``,
  83. so normal string comparisons and operations will work as expected.
  84. Attributes:
  85. type: Name of the token (as specified in grammar)
  86. value: Value of the token (redundant, as ``token.value == token`` will always be true)
  87. start_pos: The index of the token in the text
  88. line: The line of the token in the text (starting with 1)
  89. column: The column of the token in the text (starting with 1)
  90. end_line: The line where the token ends
  91. end_column: The next column after the end of the token. For example,
  92. if the token is a single character with a column value of 4,
  93. end_column will be 5.
  94. end_pos: the index where the token ends (basically ``start_pos + len(token)``)
  95. """
  96. __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
  97. def __new__(cls, type_, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None, pos_in_stream=None):
  98. try:
  99. inst = super(Token, cls).__new__(cls, value)
  100. except UnicodeDecodeError:
  101. value = value.decode('latin1')
  102. inst = super(Token, cls).__new__(cls, value)
  103. inst.type = type_
  104. inst.start_pos = start_pos if start_pos is not None else pos_in_stream
  105. inst.value = value
  106. inst.line = line
  107. inst.column = column
  108. inst.end_line = end_line
  109. inst.end_column = end_column
  110. inst.end_pos = end_pos
  111. return inst
  112. @property
  113. def pos_in_stream(self):
  114. warn("Attribute Token.pos_in_stream was renamed to Token.start_pos", DeprecationWarning, 2)
  115. return self.start_pos
  116. def update(self, type_=None, value=None):
  117. return Token.new_borrow_pos(
  118. type_ if type_ is not None else self.type,
  119. value if value is not None else self.value,
  120. self
  121. )
  122. @classmethod
  123. def new_borrow_pos(cls, type_, value, borrow_t):
  124. return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
  125. def __reduce__(self):
  126. return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column))
  127. def __repr__(self):
  128. return 'Token(%r, %r)' % (self.type, self.value)
  129. def __deepcopy__(self, memo):
  130. return Token(self.type, self.value, self.start_pos, self.line, self.column)
  131. def __eq__(self, other):
  132. if isinstance(other, Token) and self.type != other.type:
  133. return False
  134. return Str.__eq__(self, other)
  135. __hash__ = Str.__hash__
  136. class LineCounter:
  137. __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'
  138. def __init__(self, newline_char):
  139. self.newline_char = newline_char
  140. self.char_pos = 0
  141. self.line = 1
  142. self.column = 1
  143. self.line_start_pos = 0
  144. def __eq__(self, other):
  145. if not isinstance(other, LineCounter):
  146. return NotImplemented
  147. return self.char_pos == other.char_pos and self.newline_char == other.newline_char
  148. def feed(self, token, test_newline=True):
  149. """Consume a token and calculate the new line & column.
  150. As an optional optimization, set test_newline=False if token doesn't contain a newline.
  151. """
  152. if test_newline:
  153. newlines = token.count(self.newline_char)
  154. if newlines:
  155. self.line += newlines
  156. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  157. self.char_pos += len(token)
  158. self.column = self.char_pos - self.line_start_pos + 1
  159. class UnlessCallback:
  160. def __init__(self, scanner):
  161. self.scanner = scanner
  162. def __call__(self, t):
  163. res = self.scanner.match(t.value, 0)
  164. if res:
  165. _value, t.type = res
  166. return t
  167. class CallChain:
  168. def __init__(self, callback1, callback2, cond):
  169. self.callback1 = callback1
  170. self.callback2 = callback2
  171. self.cond = cond
  172. def __call__(self, t):
  173. t2 = self.callback1(t)
  174. return self.callback2(t) if self.cond(t2) else t2
  175. def _get_match(re_, regexp, s, flags):
  176. m = re_.match(regexp, s, flags)
  177. if m:
  178. return m.group(0)
  179. def _create_unless(terminals, g_regex_flags, re_, use_bytes):
  180. tokens_by_type = classify(terminals, lambda t: type(t.pattern))
  181. assert len(tokens_by_type) <= 2, tokens_by_type.keys()
  182. embedded_strs = set()
  183. callback = {}
  184. for retok in tokens_by_type.get(PatternRE, []):
  185. unless = []
  186. for strtok in tokens_by_type.get(PatternStr, []):
  187. if strtok.priority > retok.priority:
  188. continue
  189. s = strtok.pattern.value
  190. if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags):
  191. unless.append(strtok)
  192. if strtok.pattern.flags <= retok.pattern.flags:
  193. embedded_strs.add(strtok)
  194. if unless:
  195. callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
  196. new_terminals = [t for t in terminals if t not in embedded_strs]
  197. return new_terminals, callback
  198. class Scanner:
  199. def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False):
  200. self.terminals = terminals
  201. self.g_regex_flags = g_regex_flags
  202. self.re_ = re_
  203. self.use_bytes = use_bytes
  204. self.match_whole = match_whole
  205. self.allowed_types = {t.name for t in self.terminals}
  206. self._mres = self._build_mres(terminals, len(terminals))
  207. def _build_mres(self, terminals, max_size):
  208. # Python sets an unreasonable group limit (currently 100) in its re module
  209. # Worse, the only way to know we reached it is by catching an AssertionError!
  210. # This function recursively tries less and less groups until it's successful.
  211. postfix = '$' if self.match_whole else ''
  212. mres = []
  213. while terminals:
  214. pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
  215. if self.use_bytes:
  216. pattern = pattern.encode('latin-1')
  217. try:
  218. mre = self.re_.compile(pattern, self.g_regex_flags)
  219. except AssertionError: # Yes, this is what Python provides us.. :/
  220. return self._build_mres(terminals, max_size//2)
  221. mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
  222. terminals = terminals[max_size:]
  223. return mres
  224. def match(self, text, pos):
  225. for mre, type_from_index in self._mres:
  226. m = mre.match(text, pos)
  227. if m:
  228. return m.group(0), type_from_index[m.lastindex]
  229. def _regexp_has_newline(r):
  230. r"""Expressions that may indicate newlines in a regexp:
  231. - newlines (\n)
  232. - escaped newline (\\n)
  233. - anything but ([^...])
  234. - any-char (.) when the flag (?s) exists
  235. - spaces (\s)
  236. """
  237. return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
  238. class Lexer(object):
  239. """Lexer interface
  240. Method Signatures:
  241. lex(self, text) -> Iterator[Token]
  242. """
  243. lex = NotImplemented
  244. def make_lexer_state(self, text):
  245. line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n')
  246. return LexerState(text, line_ctr)
  247. class TraditionalLexer(Lexer):
  248. def __init__(self, conf):
  249. terminals = list(conf.terminals)
  250. assert all(isinstance(t, TerminalDef) for t in terminals), terminals
  251. self.re = conf.re_module
  252. if not conf.skip_validation:
  253. # Sanitization
  254. for t in terminals:
  255. try:
  256. self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
  257. except self.re.error:
  258. raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
  259. if t.pattern.min_width == 0:
  260. raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
  261. if not (set(conf.ignore) <= {t.name for t in terminals}):
  262. raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals}))
  263. # Init
  264. self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))
  265. self.ignore_types = frozenset(conf.ignore)
  266. terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
  267. self.terminals = terminals
  268. self.user_callbacks = conf.callbacks
  269. self.g_regex_flags = conf.g_regex_flags
  270. self.use_bytes = conf.use_bytes
  271. self.terminals_by_name = conf.terminals_by_name
  272. self._scanner = None
  273. def _build_scanner(self):
  274. terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
  275. assert all(self.callback.values())
  276. for type_, f in self.user_callbacks.items():
  277. if type_ in self.callback:
  278. # Already a callback there, probably UnlessCallback
  279. self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
  280. else:
  281. self.callback[type_] = f
  282. self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes)
  283. @property
  284. def scanner(self):
  285. if self._scanner is None:
  286. self._build_scanner()
  287. return self._scanner
  288. def match(self, text, pos):
  289. return self.scanner.match(text, pos)
  290. def lex(self, state, parser_state):
  291. with suppress(EOFError):
  292. while True:
  293. yield self.next_token(state, parser_state)
  294. def next_token(self, lex_state, parser_state=None):
  295. line_ctr = lex_state.line_ctr
  296. while line_ctr.char_pos < len(lex_state.text):
  297. res = self.match(lex_state.text, line_ctr.char_pos)
  298. if not res:
  299. allowed = self.scanner.allowed_types - self.ignore_types
  300. if not allowed:
  301. allowed = {"<END-OF-FILE>"}
  302. raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,
  303. allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],
  304. state=parser_state, terminals_by_name=self.terminals_by_name)
  305. value, type_ = res
  306. if type_ not in self.ignore_types:
  307. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  308. line_ctr.feed(value, type_ in self.newline_types)
  309. t.end_line = line_ctr.line
  310. t.end_column = line_ctr.column
  311. t.end_pos = line_ctr.char_pos
  312. if t.type in self.callback:
  313. t = self.callback[t.type](t)
  314. if not isinstance(t, Token):
  315. raise LexError("Callbacks must return a token (returned %r)" % t)
  316. lex_state.last_token = t
  317. return t
  318. else:
  319. if type_ in self.callback:
  320. t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  321. self.callback[type_](t2)
  322. line_ctr.feed(value, type_ in self.newline_types)
  323. # EOF
  324. raise EOFError(self)
  325. class LexerState(object):
  326. __slots__ = 'text', 'line_ctr', 'last_token'
  327. def __init__(self, text, line_ctr, last_token=None):
  328. self.text = text
  329. self.line_ctr = line_ctr
  330. self.last_token = last_token
  331. def __eq__(self, other):
  332. if not isinstance(other, LexerState):
  333. return NotImplemented
  334. return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token
  335. def __copy__(self):
  336. return type(self)(self.text, copy(self.line_ctr), self.last_token)
  337. class ContextualLexer(Lexer):
  338. def __init__(self, conf, states, always_accept=()):
  339. terminals = list(conf.terminals)
  340. terminals_by_name = conf.terminals_by_name
  341. trad_conf = copy(conf)
  342. trad_conf.terminals = terminals
  343. lexer_by_tokens = {}
  344. self.lexers = {}
  345. for state, accepts in states.items():
  346. key = frozenset(accepts)
  347. try:
  348. lexer = lexer_by_tokens[key]
  349. except KeyError:
  350. accepts = set(accepts) | set(conf.ignore) | set(always_accept)
  351. lexer_conf = copy(trad_conf)
  352. lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]
  353. lexer = TraditionalLexer(lexer_conf)
  354. lexer_by_tokens[key] = lexer
  355. self.lexers[state] = lexer
  356. assert trad_conf.terminals is terminals
  357. self.root_lexer = TraditionalLexer(trad_conf)
  358. def make_lexer_state(self, text):
  359. return self.root_lexer.make_lexer_state(text)
  360. def lex(self, lexer_state, parser_state):
  361. try:
  362. while True:
  363. lexer = self.lexers[parser_state.position]
  364. yield lexer.next_token(lexer_state, parser_state)
  365. except EOFError:
  366. pass
  367. except UnexpectedCharacters as e:
  368. # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context.
  369. # This tests the input against the global context, to provide a nicer error.
  370. try:
  371. last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token
  372. token = self.root_lexer.next_token(lexer_state, parser_state)
  373. raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name)
  374. except UnexpectedCharacters:
  375. raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set.
  376. class LexerThread(object):
  377. """A thread that ties a lexer instance and a lexer state, to be used by the parser"""
  378. def __init__(self, lexer, text):
  379. self.lexer = lexer
  380. self.state = lexer.make_lexer_state(text)
  381. def lex(self, parser_state):
  382. return self.lexer.lex(self.state, parser_state)
  383. def __copy__(self):
  384. copied = object.__new__(LexerThread)
  385. copied.lexer = self.lexer
  386. copied.state = copy(self.state)
  387. return copied
  388. ###}