This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

795 lines
25 KiB

  1. # The file was automatically generated by Lark v0.5.2
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Generates a stand-alone LALR(1) parser with a standard lexer
  7. #
  8. # Git: https://github.com/erezsh/lark
  9. # Author: Erez Shinan (erezshin@gmail.com)
  10. #
  11. #
  12. # >>> LICENSE
  13. #
  14. # This tool and its generated code use a separate license from Lark.
  15. #
  16. # It is licensed under GPLv2 or above.
  17. #
  18. # If you wish to purchase a commercial license for this tool and its
  19. # generated code, contact me via email.
  20. #
  21. # This program is free software: you can redistribute it and/or modify
  22. # it under the terms of the GNU General Public License as published by
  23. # the Free Software Foundation, either version 2 of the License, or
  24. # (at your option) any later version.
  25. #
  26. # This program is distributed in the hope that it will be useful,
  27. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29. # GNU General Public License for more details.
  30. #
  31. # See <http://www.gnu.org/licenses/>.
  32. #
  33. #
  34. import types
  35. import functools
  36. from contextlib import contextmanager
  37. Str = type(u'')
  38. def inline_args(f):
  39. # print '@@', f.__name__, type(f), isinstance(f, types.FunctionType), isinstance(f, types.TypeType), isinstance(f, types.BuiltinFunctionType)
  40. if isinstance(f, types.FunctionType):
  41. @functools.wraps(f)
  42. def _f_func(self, args):
  43. return f(self, *args)
  44. return _f_func
  45. elif isinstance(f, (type, types.BuiltinFunctionType)):
  46. @functools.wraps(f)
  47. def _f_builtin(_self, args):
  48. return f(*args)
  49. return _f_builtin
  50. elif isinstance(f, types.MethodType):
  51. @functools.wraps(f.__func__)
  52. def _f(self, args):
  53. return f.__func__(self, *args)
  54. return _f
  55. else:
  56. @functools.wraps(f.__call__.__func__)
  57. def _f(self, args):
  58. return f.__call__.__func__(self, *args)
  59. return _f
  60. try:
  61. from contextlib import suppress # Python 3
  62. except ImportError:
  63. @contextmanager
  64. def suppress(*excs):
  65. '''Catch and dismiss the provided exception
  66. >>> x = 'hello'
  67. >>> with suppress(IndexError):
  68. ... x = x[10]
  69. >>> x
  70. 'hello'
  71. '''
  72. try:
  73. yield
  74. except excs:
  75. pass
  76. def is_terminal(sym):
  77. return sym.isupper()
  78. class GrammarError(Exception):
  79. pass
  80. class ParseError(Exception):
  81. pass
  82. class UnexpectedToken(ParseError):
  83. def __init__(self, token, expected, seq, index):
  84. self.token = token
  85. self.expected = expected
  86. self.line = getattr(token, 'line', '?')
  87. self.column = getattr(token, 'column', '?')
  88. try:
  89. context = ' '.join(['%r(%s)' % (t.value, t.type) for t in seq[index:index+5]])
  90. except AttributeError:
  91. context = seq[index:index+5]
  92. except TypeError:
  93. context = "<no context>"
  94. message = ("Unexpected token %r at line %s, column %s.\n"
  95. "Expected: %s\n"
  96. "Context: %s" % (token, self.line, self.column, expected, context))
  97. super(UnexpectedToken, self).__init__(message)
  98. class Tree(object):
  99. def __init__(self, data, children):
  100. self.data = data
  101. self.children = list(children)
  102. def __repr__(self):
  103. return 'Tree(%s, %s)' % (self.data, self.children)
  104. def _pretty_label(self):
  105. return self.data
  106. def _pretty(self, level, indent_str):
  107. if len(self.children) == 1 and not isinstance(self.children[0], Tree):
  108. return [ indent_str*level, self._pretty_label(), '\t', '%s' % self.children[0], '\n']
  109. l = [ indent_str*level, self._pretty_label(), '\n' ]
  110. for n in self.children:
  111. if isinstance(n, Tree):
  112. l += n._pretty(level+1, indent_str)
  113. else:
  114. l += [ indent_str*(level+1), '%s' % n, '\n' ]
  115. return l
  116. def pretty(self, indent_str=' '):
  117. return ''.join(self._pretty(0, indent_str))
  118. class Transformer(object):
  119. def _get_func(self, name):
  120. return getattr(self, name)
  121. def transform(self, tree):
  122. items = []
  123. for c in tree.children:
  124. try:
  125. items.append(self.transform(c) if isinstance(c, Tree) else c)
  126. except Discard:
  127. pass
  128. try:
  129. f = self._get_func(tree.data)
  130. except AttributeError:
  131. return self.__default__(tree.data, items)
  132. else:
  133. return f(items)
  134. def __default__(self, data, children):
  135. return Tree(data, children)
  136. def __mul__(self, other):
  137. return TransformerChain(self, other)
  138. class Discard(Exception):
  139. pass
  140. class TransformerChain(object):
  141. def __init__(self, *transformers):
  142. self.transformers = transformers
  143. def transform(self, tree):
  144. for t in self.transformers:
  145. tree = t.transform(tree)
  146. return tree
  147. def __mul__(self, other):
  148. return TransformerChain(*self.transformers + (other,))
  149. class InlineTransformer(Transformer):
  150. def _get_func(self, name): # use super()._get_func
  151. return inline_args(getattr(self, name)).__get__(self)
  152. class Visitor(object):
  153. def visit(self, tree):
  154. for child in tree.children:
  155. if isinstance(child, Tree):
  156. self.visit(child)
  157. f = getattr(self, tree.data, self.__default__)
  158. f(tree)
  159. return tree
  160. def __default__(self, tree):
  161. pass
  162. class Visitor_NoRecurse(Visitor):
  163. def visit(self, tree):
  164. subtrees = list(tree.iter_subtrees())
  165. for subtree in (subtrees):
  166. getattr(self, subtree.data, self.__default__)(subtree)
  167. return tree
  168. class Transformer_NoRecurse(Transformer):
  169. def transform(self, tree):
  170. subtrees = list(tree.iter_subtrees())
  171. def _t(t):
  172. # Assumes t is already transformed
  173. try:
  174. f = self._get_func(t.data)
  175. except AttributeError:
  176. return self.__default__(t)
  177. else:
  178. return f(t)
  179. for subtree in subtrees:
  180. children = []
  181. for c in subtree.children:
  182. try:
  183. children.append(_t(c) if isinstance(c, Tree) else c)
  184. except Discard:
  185. pass
  186. subtree.children = children
  187. return _t(tree)
  188. def __default__(self, t):
  189. return t
  190. class Indenter:
  191. def __init__(self):
  192. self.paren_level = 0
  193. self.indent_level = [0]
  194. def handle_NL(self, token):
  195. if self.paren_level > 0:
  196. return
  197. yield token
  198. indent_str = token.rsplit('\n', 1)[1] # Tabs and spaces
  199. indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len
  200. if indent > self.indent_level[-1]:
  201. self.indent_level.append(indent)
  202. yield Token.new_borrow_pos(self.INDENT_type, indent_str, token)
  203. else:
  204. while indent < self.indent_level[-1]:
  205. self.indent_level.pop()
  206. yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token)
  207. assert indent == self.indent_level[-1], '%s != %s' % (indent, self.indent_level[-1])
  208. def process(self, stream):
  209. for token in stream:
  210. if token.type == self.NL_type:
  211. for t in self.handle_NL(token):
  212. yield t
  213. else:
  214. yield token
  215. if token.type in self.OPEN_PAREN_types:
  216. self.paren_level += 1
  217. elif token.type in self.CLOSE_PAREN_types:
  218. self.paren_level -= 1
  219. assert self.paren_level >= 0
  220. while len(self.indent_level) > 1:
  221. self.indent_level.pop()
  222. yield Token(self.DEDENT_type, '')
  223. assert self.indent_level == [0], self.indent_level
  224. # XXX Hack for ContextualLexer. Maybe there's a more elegant solution?
  225. @property
  226. def always_accept(self):
  227. return (self.NL_type,)
  228. class LexError(Exception):
  229. pass
  230. class UnexpectedInput(LexError):
  231. def __init__(self, seq, lex_pos, line, column, allowed=None):
  232. context = seq[lex_pos:lex_pos+5]
  233. message = "No token defined for: '%s' in %r at line %d col %d" % (seq[lex_pos], context, line, column)
  234. super(UnexpectedInput, self).__init__(message)
  235. self.line = line
  236. self.column = column
  237. self.context = context
  238. self.allowed = allowed
  239. class Token(Str):
  240. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None):
  241. inst = Str.__new__(cls, value)
  242. inst.type = type_
  243. inst.pos_in_stream = pos_in_stream
  244. inst.value = value
  245. inst.line = line
  246. inst.column = column
  247. return inst
  248. @classmethod
  249. def new_borrow_pos(cls, type_, value, borrow_t):
  250. return cls(type_, value, borrow_t.pos_in_stream, line=borrow_t.line, column=borrow_t.column)
  251. def __repr__(self):
  252. return 'Token(%s, %r)' % (self.type, self.value)
  253. def __deepcopy__(self, memo):
  254. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  255. def __eq__(self, other):
  256. if isinstance(other, Token) and self.type != other.type:
  257. return False
  258. return Str.__eq__(self, other)
  259. __hash__ = Str.__hash__
  260. class LineCounter:
  261. def __init__(self):
  262. self.newline_char = '\n'
  263. self.char_pos = 0
  264. self.line = 1
  265. self.column = 0
  266. self.line_start_pos = 0
  267. def feed(self, token, test_newline=True):
  268. """Consume a token and calculate the new line & column.
  269. As an optional optimization, set test_newline=False is token doesn't contain a newline.
  270. """
  271. if test_newline:
  272. newlines = token.count(self.newline_char)
  273. if newlines:
  274. self.line += newlines
  275. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  276. self.char_pos += len(token)
  277. self.column = self.char_pos - self.line_start_pos
  278. class _Lex:
  279. "Built to serve both Lexer and ContextualLexer"
  280. def __init__(self, lexer):
  281. self.lexer = lexer
  282. def lex(self, stream, newline_types, ignore_types):
  283. newline_types = list(newline_types)
  284. newline_types = list(newline_types)
  285. line_ctr = LineCounter()
  286. while True:
  287. lexer = self.lexer
  288. for mre, type_from_index in lexer.mres:
  289. m = mre.match(stream, line_ctr.char_pos)
  290. if m:
  291. value = m.group(0)
  292. type_ = type_from_index[m.lastindex]
  293. if type_ not in ignore_types:
  294. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  295. if t.type in lexer.callback:
  296. t = lexer.callback[t.type](t)
  297. lexer = yield t
  298. line_ctr.feed(value, type_ in newline_types)
  299. break
  300. else:
  301. if line_ctr.char_pos < len(stream):
  302. raise UnexpectedInput(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  303. break
  304. class UnlessCallback:
  305. def __init__(self, mres):
  306. self.mres = mres
  307. def __call__(self, t):
  308. for mre, type_from_index in self.mres:
  309. m = mre.match(t.value)
  310. if m:
  311. value = m.group(0)
  312. t.type = type_from_index[m.lastindex]
  313. break
  314. return t
  315. class NodeBuilder:
  316. def __init__(self, tree_class, name):
  317. self.tree_class = tree_class
  318. self.name = name
  319. def __call__(self, children):
  320. return self.tree_class(self.name, children)
  321. class Expand1:
  322. def __init__(self, node_builder):
  323. self.node_builder = node_builder
  324. def __call__(self, children):
  325. if len(children) == 1:
  326. return children[0]
  327. else:
  328. return self.node_builder(children)
  329. class Factory:
  330. def __init__(self, cls, *args):
  331. self.cls = cls
  332. self.args = args
  333. def __call__(self, node_builder):
  334. return self.cls(node_builder, *self.args)
  335. class TokenWrapper:
  336. "Used for fixing the results of scanless parsing"
  337. def __init__(self, node_builder, token_name):
  338. self.node_builder = node_builder
  339. self.token_name = token_name
  340. def __call__(self, children):
  341. return self.node_builder( [Token(self.token_name, ''.join(children))] )
  342. def identity(node_builder):
  343. return node_builder
  344. class ChildFilter:
  345. def __init__(self, node_builder, to_include):
  346. self.node_builder = node_builder
  347. self.to_include = to_include
  348. def __call__(self, children):
  349. filtered = []
  350. for i, to_expand in self.to_include:
  351. if to_expand:
  352. filtered += children[i].children
  353. else:
  354. filtered.append(children[i])
  355. return self.node_builder(filtered)
  356. def create_rule_handler(expansion, keep_all_tokens, filter_out):
  357. # if not keep_all_tokens:
  358. to_include = [(i, not is_terminal(sym) and sym.startswith('_'))
  359. for i, sym in enumerate(expansion)
  360. if keep_all_tokens
  361. or not ((is_terminal(sym) and sym.startswith('_')) or sym in filter_out)
  362. ]
  363. if len(to_include) < len(expansion) or any(to_expand for i, to_expand in to_include):
  364. return Factory(ChildFilter, to_include)
  365. # else, if no filtering required..
  366. return identity
  367. class PropagatePositions:
  368. def __init__(self, node_builder):
  369. self.node_builder = node_builder
  370. def __call__(self, children):
  371. res = self.node_builder(children)
  372. if children:
  373. for a in children:
  374. with suppress(AttributeError):
  375. res.line = a.line
  376. res.column = a.column
  377. break
  378. for a in reversed(children):
  379. with suppress(AttributeError):
  380. res.end_line = a.end_line
  381. res.end_col = a.end_col
  382. break
  383. return res
  384. class Callback(object):
  385. pass
  386. class ParseTreeBuilder:
  387. def __init__(self, rules, tree_class, propagate_positions=False, keep_all_tokens=False):
  388. self.tree_class = tree_class
  389. self.propagate_positions = propagate_positions
  390. self.always_keep_all_tokens = keep_all_tokens
  391. self.rule_builders = list(self._init_builders(rules))
  392. self.user_aliases = {}
  393. def _init_builders(self, rules):
  394. filter_out = set()
  395. for rule in rules:
  396. if rule.options and rule.options.filter_out:
  397. assert rule.origin.startswith('_') # Just to make sure
  398. filter_out.add(rule.origin)
  399. for rule in rules:
  400. options = rule.options
  401. keep_all_tokens = self.always_keep_all_tokens or (options.keep_all_tokens if options else False)
  402. expand1 = options.expand1 if options else False
  403. create_token = options.create_token if options else False
  404. wrapper_chain = filter(None, [
  405. (expand1 and not rule.alias) and Expand1,
  406. create_token and Factory(TokenWrapper, create_token),
  407. create_rule_handler(rule.expansion, keep_all_tokens, filter_out),
  408. self.propagate_positions and PropagatePositions,
  409. ])
  410. yield rule, wrapper_chain
  411. def create_callback(self, transformer=None):
  412. callback = Callback()
  413. for rule, wrapper_chain in self.rule_builders:
  414. internal_callback_name = '_callback_%s_%s' % (rule.origin, '_'.join(rule.expansion))
  415. user_callback_name = rule.alias or rule.origin
  416. try:
  417. f = transformer._get_func(user_callback_name)
  418. except AttributeError:
  419. f = NodeBuilder(self.tree_class, user_callback_name)
  420. self.user_aliases[rule] = rule.alias
  421. rule.alias = internal_callback_name
  422. for w in wrapper_chain:
  423. f = w(f)
  424. if hasattr(callback, internal_callback_name):
  425. raise GrammarError("Rule '%s' already exists" % (rule,))
  426. setattr(callback, internal_callback_name, f)
  427. return callback
  428. class _Parser:
  429. def __init__(self, parse_table, callbacks):
  430. self.states = parse_table.states
  431. self.start_state = parse_table.start_state
  432. self.end_state = parse_table.end_state
  433. self.callbacks = callbacks
  434. def parse(self, seq, set_state=None):
  435. i = 0
  436. token = None
  437. stream = iter(seq)
  438. states = self.states
  439. state_stack = [self.start_state]
  440. value_stack = []
  441. if set_state: set_state(self.start_state)
  442. def get_action(key):
  443. state = state_stack[-1]
  444. try:
  445. return states[state][key]
  446. except KeyError:
  447. expected = states[state].keys()
  448. raise UnexpectedToken(token, expected, seq, i)
  449. def reduce(rule):
  450. size = len(rule.expansion)
  451. if size:
  452. s = value_stack[-size:]
  453. del state_stack[-size:]
  454. del value_stack[-size:]
  455. else:
  456. s = []
  457. value = self.callbacks[rule](s)
  458. _action, new_state = get_action(rule.origin)
  459. assert _action is Shift
  460. state_stack.append(new_state)
  461. value_stack.append(value)
  462. # Main LALR-parser loop
  463. try:
  464. token = next(stream)
  465. i += 1
  466. while True:
  467. action, arg = get_action(token.type)
  468. assert arg != self.end_state
  469. if action is Shift:
  470. state_stack.append(arg)
  471. value_stack.append(token)
  472. if set_state: set_state(arg)
  473. token = next(stream)
  474. i += 1
  475. else:
  476. reduce(arg)
  477. except StopIteration:
  478. pass
  479. while True:
  480. _action, arg = get_action('$END')
  481. if _action is Shift:
  482. assert arg == self.end_state
  483. val ,= value_stack
  484. return val
  485. else:
  486. reduce(arg)
  487. class Rule(object):
  488. """
  489. origin : a symbol
  490. expansion : a list of symbols
  491. """
  492. def __init__(self, origin, expansion, alias=None, options=None):
  493. self.origin = origin
  494. self.expansion = expansion
  495. self.alias = alias
  496. self.options = options
  497. def __str__(self):
  498. return '<%s : %s>' % (self.origin, ' '.join(map(str,self.expansion)))
  499. def __repr__(self):
  500. return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
  501. class RuleOptions:
  502. def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False, priority=None):
  503. self.keep_all_tokens = keep_all_tokens
  504. self.expand1 = expand1
  505. self.create_token = create_token # used for scanless postprocessing
  506. self.priority = priority
  507. self.filter_out = filter_out # remove this rule from the tree
  508. # used for "token"-rules in scanless
  509. def __repr__(self):
  510. return 'RuleOptions(%r, %r, %r, %r, %r)' % (
  511. self.keep_all_tokens,
  512. self.expand1,
  513. self.create_token,
  514. self.priority,
  515. self.filter_out
  516. )
  517. Shift = 0
  518. Reduce = 1
  519. import re
  520. MRES = (
  521. [('(?P<SIGNED_NUMBER>(?:(?:\\+|\\-))?(?:(?:(?:[0-9])+(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+|(?:(?:[0-9])+\\.(?:(?:[0-9])+)?|\\.(?:[0-9])+)(?:(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+)?)|(?:[0-9])+))|(?P<ESCAPED_STRING>\\"(?:(?:\\\\\\"|[^"]))*\\")|(?P<WS>(?:[ \t\x0c'
  522. '\r\n'
  523. '])+)|(?P<__FALSE1>false)|(?P<__NULL2>null)|(?P<__TRUE0>true)|(?P<__COLON>\\:)|(?P<__COMMA>\\,)|(?P<__LBRACE>\\{)|(?P<__LSQB>\\[)|(?P<__RBRACE>\\})|(?P<__RSQB>\\])',
  524. {1: 'SIGNED_NUMBER',
  525. 2: 'ESCAPED_STRING',
  526. 3: 'WS',
  527. 4: '__FALSE1',
  528. 5: '__NULL2',
  529. 6: '__TRUE0',
  530. 7: '__COLON',
  531. 8: '__COMMA',
  532. 9: '__LBRACE',
  533. 10: '__LSQB',
  534. 11: '__RBRACE',
  535. 12: '__RSQB'})]
  536. )
  537. LEXER_CALLBACK = (
  538. {}
  539. )
  540. NEWLINE_TYPES = ['WS']
  541. IGNORE_TYPES = ['WS']
  542. class LexerRegexps: pass
  543. lexer_regexps = LexerRegexps()
  544. lexer_regexps.mres = [(re.compile(p), d) for p, d in MRES]
  545. lexer_regexps.callback = {n: UnlessCallback([(re.compile(p), d) for p, d in mres])
  546. for n, mres in LEXER_CALLBACK.items()}
  547. lexer = _Lex(lexer_regexps)
  548. def lex(stream):
  549. return lexer.lex(stream, NEWLINE_TYPES, IGNORE_TYPES)
  550. RULES = {
  551. 0: Rule('start', ['value'], None, RuleOptions(False, True, None, None, False)),
  552. 1: Rule('value', ['object'], None, RuleOptions(False, True, None, None, False)),
  553. 2: Rule('value', ['array'], None, RuleOptions(False, True, None, None, False)),
  554. 3: Rule('value', ['string'], None, RuleOptions(False, True, None, None, False)),
  555. 4: Rule('value', ['SIGNED_NUMBER'], 'number', RuleOptions(False, True, None, None, False)),
  556. 5: Rule('value', ['__TRUE0'], 'true', RuleOptions(False, True, None, None, False)),
  557. 6: Rule('value', ['__FALSE1'], 'false', RuleOptions(False, True, None, None, False)),
  558. 7: Rule('value', ['__NULL2'], 'null', RuleOptions(False, True, None, None, False)),
  559. 8: Rule('array', ['__LSQB', 'value', '__anon_star_0', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  560. 9: Rule('array', ['__LSQB', 'value', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  561. 10: Rule('array', ['__LSQB', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  562. 11: Rule('object', ['__LBRACE', 'pair', '__anon_star_1', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  563. 12: Rule('object', ['__LBRACE', 'pair', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  564. 13: Rule('object', ['__LBRACE', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  565. 14: Rule('pair', ['string', '__COLON', 'value'], None, RuleOptions(False, False, None, None, False)),
  566. 15: Rule('string', ['ESCAPED_STRING'], None, RuleOptions(False, False, None, None, False)),
  567. 16: Rule('__anon_star_0', ['__COMMA', 'value'], None, None),
  568. 17: Rule('__anon_star_0', ['__anon_star_0', '__COMMA', 'value'], None, None),
  569. 18: Rule('__anon_star_1', ['__COMMA', 'pair'], None, None),
  570. 19: Rule('__anon_star_1', ['__anon_star_1', '__COMMA', 'pair'], None, None),
  571. }
  572. parse_tree_builder = ParseTreeBuilder(RULES.values(), Tree)
  573. class ParseTable: pass
  574. parse_table = ParseTable()
  575. STATES = {
  576. 0: {0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (0, 4), 4: (0, 5), 5: (0, 6), 6: (0, 7), 7: (0, 8), 8: (0, 9), 9: (0, 10), 10: (0, 11), 11: (0, 12)},
  577. 1: {12: (1, 5), 13: (1, 5), 14: (1, 5), 15: (1, 5)},
  578. 2: {9: (0, 10), 14: (0, 13), 16: (0, 14), 11: (0, 15)},
  579. 3: {12: (1, 2), 13: (1, 2), 14: (1, 2), 15: (1, 2)},
  580. 4: {12: (1, 1), 13: (1, 1), 14: (1, 1), 15: (1, 1)},
  581. 5: {12: (0, 16)},
  582. 6: {7: (0, 17), 0: (0, 1), 1: (0, 2), 2: (0, 3), 3: (0, 4), 5: (0, 6), 6: (0, 7), 8: (0, 9), 9: (0, 10), 15: (0, 18), 10: (0, 11), 11: (0, 12)},
  583. 7: {12: (1, 4), 13: (1, 4), 14: (1, 4), 15: (1, 4)},
  584. 8: {12: (1, 0)},
  585. 9: {12: (1, 7), 13: (1, 7), 14: (1, 7), 15: (1, 7)},
  586. 10: {12: (1, 15), 17: (1, 15), 13: (1, 15), 14: (1, 15), 15: (1, 15)},
  587. 11: {12: (1, 6), 13: (1, 6), 14: (1, 6), 15: (1, 6)},
  588. 12: {12: (1, 3), 13: (1, 3), 14: (1, 3), 15: (1, 3)},
  589. 13: {13: (1, 13), 12: (1, 13), 14: (1, 13), 15: (1, 13)},
  590. 14: {14: (0, 19), 13: (0, 20), 18: (0, 21)},
  591. 15: {17: (0, 22)},
  592. 16: {},
  593. 17: {19: (0, 23), 15: (0, 24), 13: (0, 25)},
  594. 18: {13: (1, 10), 12: (1, 10), 14: (1, 10), 15: (1, 10)},
  595. 19: {13: (1, 12), 12: (1, 12), 14: (1, 12), 15: (1, 12)},
  596. 20: {9: (0, 10), 11: (0, 15), 16: (0, 26)},
  597. 21: {14: (0, 27), 13: (0, 28)},
  598. 22: {5: (0, 6), 1: (0, 2), 0: (0, 1), 8: (0, 9), 2: (0, 3), 3: (0, 4), 9: (0, 10), 6: (0, 7), 10: (0, 11), 11: (0, 12), 7: (0, 29)},
  599. 23: {15: (0, 30), 13: (0, 31)},
  600. 24: {13: (1, 9), 12: (1, 9), 14: (1, 9), 15: (1, 9)},
  601. 25: {5: (0, 6), 1: (0, 2), 0: (0, 1), 8: (0, 9), 2: (0, 3), 3: (0, 4), 7: (0, 32), 9: (0, 10), 6: (0, 7), 10: (0, 11), 11: (0, 12)},
  602. 26: {13: (1, 18), 14: (1, 18)},
  603. 27: {13: (1, 11), 12: (1, 11), 14: (1, 11), 15: (1, 11)},
  604. 28: {16: (0, 33), 9: (0, 10), 11: (0, 15)},
  605. 29: {13: (1, 14), 14: (1, 14)},
  606. 30: {13: (1, 8), 12: (1, 8), 14: (1, 8), 15: (1, 8)},
  607. 31: {5: (0, 6), 1: (0, 2), 0: (0, 1), 7: (0, 34), 8: (0, 9), 2: (0, 3), 3: (0, 4), 9: (0, 10), 6: (0, 7), 10: (0, 11), 11: (0, 12)},
  608. 32: {15: (1, 16), 13: (1, 16)},
  609. 33: {13: (1, 19), 14: (1, 19)},
  610. 34: {15: (1, 17), 13: (1, 17)},
  611. }
  612. TOKEN_TYPES = (
  613. {0: '__TRUE0',
  614. 1: '__LBRACE',
  615. 2: 'array',
  616. 3: 'object',
  617. 4: 'start',
  618. 5: '__LSQB',
  619. 6: 'SIGNED_NUMBER',
  620. 7: 'value',
  621. 8: '__NULL2',
  622. 9: 'ESCAPED_STRING',
  623. 10: '__FALSE1',
  624. 11: 'string',
  625. 12: '$END',
  626. 13: '__COMMA',
  627. 14: '__RBRACE',
  628. 15: '__RSQB',
  629. 16: 'pair',
  630. 17: '__COLON',
  631. 18: '__anon_star_1',
  632. 19: '__anon_star_0'}
  633. )
  634. parse_table.states = {s: {TOKEN_TYPES[t]: (a, RULES[x] if a is Reduce else x) for t, (a, x) in acts.items()}
  635. for s, acts in STATES.items()}
  636. parse_table.start_state = 0
  637. parse_table.end_state = 16
  638. class Lark_StandAlone:
  639. def __init__(self, transformer=None, postlex=None):
  640. callback = parse_tree_builder.create_callback(transformer=transformer)
  641. callbacks = {rule: getattr(callback, rule.alias or rule.origin, None) for rule in RULES.values()}
  642. self.parser = _Parser(parse_table, callbacks)
  643. self.postlex = postlex
  644. def parse(self, stream):
  645. tokens = lex(stream)
  646. if self.postlex: tokens = self.postlex.process(tokens)
  647. return self.parser.parse(tokens)