This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

780 lines
26 KiB

  1. # The file was automatically generated by Lark v0.5.5
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Generates a stand-alone LALR(1) parser with a standard lexer
  7. #
  8. # Git: https://github.com/erezsh/lark
  9. # Author: Erez Shinan (erezshin@gmail.com)
  10. #
  11. #
  12. # >>> LICENSE
  13. #
  14. # This tool and its generated code use a separate license from Lark.
  15. #
  16. # It is licensed under GPLv2 or above.
  17. #
  18. # If you wish to purchase a commercial license for this tool and its
  19. # generated code, contact me via email.
  20. #
  21. # This program is free software: you can redistribute it and/or modify
  22. # it under the terms of the GNU General Public License as published by
  23. # the Free Software Foundation, either version 2 of the License, or
  24. # (at your option) any later version.
  25. #
  26. # This program is distributed in the hope that it will be useful,
  27. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29. # GNU General Public License for more details.
  30. #
  31. # See <http://www.gnu.org/licenses/>.
  32. #
  33. #
  34. import types
  35. import functools
  36. from contextlib import contextmanager
  37. Str = type(u'')
  38. def inline_args(f):
  39. # print '@@', f.__name__, type(f), isinstance(f, types.FunctionType), isinstance(f, types.TypeType), isinstance(f, types.BuiltinFunctionType)
  40. if isinstance(f, types.FunctionType):
  41. @functools.wraps(f)
  42. def _f_func(self, args):
  43. return f(self, *args)
  44. return _f_func
  45. elif isinstance(f, (type, types.BuiltinFunctionType)):
  46. @functools.wraps(f)
  47. def _f_builtin(_self, args):
  48. return f(*args)
  49. return _f_builtin
  50. elif isinstance(f, types.MethodType):
  51. @functools.wraps(f.__func__)
  52. def _f(self, args):
  53. return f.__func__(self, *args)
  54. return _f
  55. else:
  56. @functools.wraps(f.__call__.__func__)
  57. def _f(self, args):
  58. return f.__call__.__func__(self, *args)
  59. return _f
  60. try:
  61. from contextlib import suppress # Python 3
  62. except ImportError:
  63. @contextmanager
  64. def suppress(*excs):
  65. '''Catch and dismiss the provided exception
  66. >>> x = 'hello'
  67. >>> with suppress(IndexError):
  68. ... x = x[10]
  69. >>> x
  70. 'hello'
  71. '''
  72. try:
  73. yield
  74. except excs:
  75. pass
  76. def is_terminal(sym):
  77. return sym.isupper()
  78. class GrammarError(Exception):
  79. pass
  80. class ParseError(Exception):
  81. pass
  82. class UnexpectedToken(ParseError):
  83. def __init__(self, token, expected, seq, index, considered_rules=None):
  84. self.token = token
  85. self.expected = expected
  86. self.line = getattr(token, 'line', '?')
  87. self.column = getattr(token, 'column', '?')
  88. self.considered_rules = considered_rules
  89. try:
  90. context = ' '.join(['%r(%s)' % (t.value, t.type) for t in seq[index:index+5]])
  91. except AttributeError:
  92. context = seq[index:index+5]
  93. except TypeError:
  94. context = "<no context>"
  95. message = ("Unexpected token %r at line %s, column %s.\n"
  96. "Expected: %s\n"
  97. "Context: %s" % (token, self.line, self.column, expected, context))
  98. super(UnexpectedToken, self).__init__(message)
  99. class Tree(object):
  100. def __init__(self, data, children):
  101. self.data = data
  102. self.children = children
  103. def __repr__(self):
  104. return 'Tree(%s, %s)' % (self.data, self.children)
  105. def _pretty_label(self):
  106. return self.data
  107. def _pretty(self, level, indent_str):
  108. if len(self.children) == 1 and not isinstance(self.children[0], Tree):
  109. return [ indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n']
  110. l = [ indent_str*level, self._pretty_label(), '\n' ]
  111. for n in self.children:
  112. if isinstance(n, Tree):
  113. l += n._pretty(level+1, indent_str)
  114. else:
  115. l += [ indent_str*(level+1), '%s' % (n,), '\n' ]
  116. return l
  117. def pretty(self, indent_str=' '):
  118. return ''.join(self._pretty(0, indent_str))
  119. class Transformer(object):
  120. def _get_func(self, name):
  121. return getattr(self, name)
  122. def transform(self, tree):
  123. items = []
  124. for c in tree.children:
  125. try:
  126. items.append(self.transform(c) if isinstance(c, Tree) else c)
  127. except Discard:
  128. pass
  129. try:
  130. f = self._get_func(tree.data)
  131. except AttributeError:
  132. return self.__default__(tree.data, items)
  133. else:
  134. return f(items)
  135. def __default__(self, data, children):
  136. return Tree(data, children)
  137. def __mul__(self, other):
  138. return TransformerChain(self, other)
  139. class Discard(Exception):
  140. pass
  141. class TransformerChain(object):
  142. def __init__(self, *transformers):
  143. self.transformers = transformers
  144. def transform(self, tree):
  145. for t in self.transformers:
  146. tree = t.transform(tree)
  147. return tree
  148. def __mul__(self, other):
  149. return TransformerChain(*self.transformers + (other,))
  150. class InlineTransformer(Transformer):
  151. def _get_func(self, name): # use super()._get_func
  152. return inline_args(getattr(self, name)).__get__(self)
  153. class Visitor(object):
  154. def visit(self, tree):
  155. for child in tree.children:
  156. if isinstance(child, Tree):
  157. self.visit(child)
  158. f = getattr(self, tree.data, self.__default__)
  159. f(tree)
  160. return tree
  161. def __default__(self, tree):
  162. pass
  163. class Visitor_NoRecurse(Visitor):
  164. def visit(self, tree):
  165. subtrees = list(tree.iter_subtrees())
  166. for subtree in (subtrees):
  167. getattr(self, subtree.data, self.__default__)(subtree)
  168. return tree
  169. class Transformer_NoRecurse(Transformer):
  170. def transform(self, tree):
  171. subtrees = list(tree.iter_subtrees())
  172. def _t(t):
  173. # Assumes t is already transformed
  174. try:
  175. f = self._get_func(t.data)
  176. except AttributeError:
  177. return self.__default__(t)
  178. else:
  179. return f(t)
  180. for subtree in subtrees:
  181. children = []
  182. for c in subtree.children:
  183. try:
  184. children.append(_t(c) if isinstance(c, Tree) else c)
  185. except Discard:
  186. pass
  187. subtree.children = children
  188. return _t(tree)
  189. def __default__(self, t):
  190. return t
  191. class Indenter:
  192. def __init__(self):
  193. self.paren_level = 0
  194. self.indent_level = [0]
  195. def handle_NL(self, token):
  196. if self.paren_level > 0:
  197. return
  198. yield token
  199. indent_str = token.rsplit('\n', 1)[1] # Tabs and spaces
  200. indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len
  201. if indent > self.indent_level[-1]:
  202. self.indent_level.append(indent)
  203. yield Token.new_borrow_pos(self.INDENT_type, indent_str, token)
  204. else:
  205. while indent < self.indent_level[-1]:
  206. self.indent_level.pop()
  207. yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token)
  208. assert indent == self.indent_level[-1], '%s != %s' % (indent, self.indent_level[-1])
  209. def process(self, stream):
  210. for token in stream:
  211. if token.type == self.NL_type:
  212. for t in self.handle_NL(token):
  213. yield t
  214. else:
  215. yield token
  216. if token.type in self.OPEN_PAREN_types:
  217. self.paren_level += 1
  218. elif token.type in self.CLOSE_PAREN_types:
  219. self.paren_level -= 1
  220. assert self.paren_level >= 0
  221. while len(self.indent_level) > 1:
  222. self.indent_level.pop()
  223. yield Token(self.DEDENT_type, '')
  224. assert self.indent_level == [0], self.indent_level
  225. # XXX Hack for ContextualLexer. Maybe there's a more elegant solution?
  226. @property
  227. def always_accept(self):
  228. return (self.NL_type,)
  229. class LexError(Exception):
  230. pass
  231. class UnexpectedInput(LexError):
  232. def __init__(self, seq, lex_pos, line, column, allowed=None, considered_rules=None):
  233. context = seq[lex_pos:lex_pos+5]
  234. message = "No token defined for: '%s' in %r at line %d col %d" % (seq[lex_pos], context, line, column)
  235. if allowed:
  236. message += '\n\nExpecting: %s\n' % allowed
  237. super(UnexpectedInput, self).__init__(message)
  238. self.line = line
  239. self.column = column
  240. self.context = context
  241. self.allowed = allowed
  242. self.considered_rules = considered_rules
  243. class Token(Str):
  244. def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None):
  245. inst = Str.__new__(cls, value)
  246. inst.type = type_
  247. inst.pos_in_stream = pos_in_stream
  248. inst.value = value
  249. inst.line = line
  250. inst.column = column
  251. return inst
  252. @classmethod
  253. def new_borrow_pos(cls, type_, value, borrow_t):
  254. return cls(type_, value, borrow_t.pos_in_stream, line=borrow_t.line, column=borrow_t.column)
  255. def __repr__(self):
  256. return 'Token(%s, %r)' % (self.type, self.value)
  257. def __deepcopy__(self, memo):
  258. return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
  259. def __eq__(self, other):
  260. if isinstance(other, Token) and self.type != other.type:
  261. return False
  262. return Str.__eq__(self, other)
  263. __hash__ = Str.__hash__
  264. class LineCounter:
  265. def __init__(self):
  266. self.newline_char = '\n'
  267. self.char_pos = 0
  268. self.line = 1
  269. self.column = 0
  270. self.line_start_pos = 0
  271. def feed(self, token, test_newline=True):
  272. """Consume a token and calculate the new line & column.
  273. As an optional optimization, set test_newline=False is token doesn't contain a newline.
  274. """
  275. if test_newline:
  276. newlines = token.count(self.newline_char)
  277. if newlines:
  278. self.line += newlines
  279. self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
  280. self.char_pos += len(token)
  281. self.column = self.char_pos - self.line_start_pos
  282. class _Lex:
  283. "Built to serve both Lexer and ContextualLexer"
  284. def __init__(self, lexer):
  285. self.lexer = lexer
  286. def lex(self, stream, newline_types, ignore_types):
  287. newline_types = list(newline_types)
  288. ignore_types = list(ignore_types)
  289. line_ctr = LineCounter()
  290. t = None
  291. while True:
  292. lexer = self.lexer
  293. for mre, type_from_index in lexer.mres:
  294. m = mre.match(stream, line_ctr.char_pos)
  295. if m:
  296. value = m.group(0)
  297. type_ = type_from_index[m.lastindex]
  298. if type_ not in ignore_types:
  299. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  300. if t.type in lexer.callback:
  301. t = lexer.callback[t.type](t)
  302. yield t
  303. else:
  304. if type_ in lexer.callback:
  305. t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  306. lexer.callback[type_](t)
  307. line_ctr.feed(value, type_ in newline_types)
  308. if t:
  309. t.end_line = line_ctr.line
  310. t.end_column = line_ctr.column
  311. break
  312. else:
  313. if line_ctr.char_pos < len(stream):
  314. raise UnexpectedInput(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column)
  315. break
  316. class UnlessCallback:
  317. def __init__(self, mres):
  318. self.mres = mres
  319. def __call__(self, t):
  320. for mre, type_from_index in self.mres:
  321. m = mre.match(t.value)
  322. if m:
  323. value = m.group(0)
  324. t.type = type_from_index[m.lastindex]
  325. break
  326. return t
  327. from functools import partial
  328. class ExpandSingleChild:
  329. def __init__(self, node_builder):
  330. self.node_builder = node_builder
  331. def __call__(self, children):
  332. if len(children) == 1:
  333. return children[0]
  334. else:
  335. return self.node_builder(children)
  336. class CreateToken:
  337. "Used for fixing the results of scanless parsing"
  338. def __init__(self, token_name, node_builder):
  339. self.node_builder = node_builder
  340. self.token_name = token_name
  341. def __call__(self, children):
  342. return self.node_builder( [Token(self.token_name, ''.join(children))] )
  343. class PropagatePositions:
  344. def __init__(self, node_builder):
  345. self.node_builder = node_builder
  346. def __call__(self, children):
  347. res = self.node_builder(children)
  348. if children:
  349. for a in children:
  350. with suppress(AttributeError):
  351. res.line = a.line
  352. res.column = a.column
  353. break
  354. for a in reversed(children):
  355. with suppress(AttributeError):
  356. res.end_line = a.end_line
  357. res.end_column = a.end_column
  358. break
  359. return res
  360. class ChildFilter:
  361. def __init__(self, to_include, node_builder):
  362. self.node_builder = node_builder
  363. self.to_include = to_include
  364. def __call__(self, children):
  365. filtered = []
  366. for i, to_expand in self.to_include:
  367. if to_expand:
  368. if filtered:
  369. filtered += children[i].children
  370. else: # Optimize for left-recursion
  371. filtered = children[i].children
  372. else:
  373. filtered.append(children[i])
  374. return self.node_builder(filtered)
  375. def _should_expand(sym):
  376. return not is_terminal(sym) and sym.startswith('_')
  377. def maybe_create_child_filter(expansion, filter_out):
  378. to_include = [(i, _should_expand(sym)) for i, sym in enumerate(expansion) if sym not in filter_out]
  379. if len(to_include) < len(expansion) or any(to_expand for i, to_expand in to_include):
  380. return partial(ChildFilter, to_include)
  381. class Callback(object):
  382. pass
  383. class ParseTreeBuilder:
  384. def __init__(self, rules, tree_class, propagate_positions=False, keep_all_tokens=False):
  385. self.tree_class = tree_class
  386. self.propagate_positions = propagate_positions
  387. self.always_keep_all_tokens = keep_all_tokens
  388. self.rule_builders = list(self._init_builders(rules))
  389. self.user_aliases = {}
  390. def _init_builders(self, rules):
  391. filter_out = {rule.origin for rule in rules if rule.options and rule.options.filter_out}
  392. filter_out |= {sym for rule in rules for sym in rule.expansion if is_terminal(sym) and sym.startswith('_')}
  393. assert all(x.startswith('_') for x in filter_out)
  394. for rule in rules:
  395. options = rule.options
  396. keep_all_tokens = self.always_keep_all_tokens or (options.keep_all_tokens if options else False)
  397. expand_single_child = options.expand1 if options else False
  398. create_token = options.create_token if options else False
  399. wrapper_chain = filter(None, [
  400. create_token and partial(CreateToken, create_token),
  401. (expand_single_child and not rule.alias) and ExpandSingleChild,
  402. maybe_create_child_filter(rule.expansion, () if keep_all_tokens else filter_out),
  403. self.propagate_positions and PropagatePositions,
  404. ])
  405. yield rule, wrapper_chain
  406. def create_callback(self, transformer=None):
  407. callback = Callback()
  408. for rule, wrapper_chain in self.rule_builders:
  409. internal_callback_name = '_callback_%s_%s' % (rule.origin, '_'.join(rule.expansion))
  410. user_callback_name = rule.alias or rule.origin
  411. try:
  412. f = transformer._get_func(user_callback_name)
  413. except AttributeError:
  414. f = partial(self.tree_class, user_callback_name)
  415. self.user_aliases[rule] = rule.alias
  416. rule.alias = internal_callback_name
  417. for w in wrapper_chain:
  418. f = w(f)
  419. if hasattr(callback, internal_callback_name):
  420. raise GrammarError("Rule '%s' already exists" % (rule,))
  421. setattr(callback, internal_callback_name, f)
  422. return callback
  423. class _Parser:
  424. def __init__(self, parse_table, callbacks):
  425. self.states = parse_table.states
  426. self.start_state = parse_table.start_state
  427. self.end_state = parse_table.end_state
  428. self.callbacks = callbacks
  429. def parse(self, seq, set_state=None):
  430. i = 0
  431. token = None
  432. stream = iter(seq)
  433. states = self.states
  434. state_stack = [self.start_state]
  435. value_stack = []
  436. if set_state: set_state(self.start_state)
  437. def get_action(key):
  438. state = state_stack[-1]
  439. try:
  440. return states[state][key]
  441. except KeyError:
  442. expected = states[state].keys()
  443. raise UnexpectedToken(token, expected, seq, i)
  444. def reduce(rule):
  445. size = len(rule.expansion)
  446. if size:
  447. s = value_stack[-size:]
  448. del state_stack[-size:]
  449. del value_stack[-size:]
  450. else:
  451. s = []
  452. value = self.callbacks[rule](s)
  453. _action, new_state = get_action(rule.origin)
  454. assert _action is Shift
  455. state_stack.append(new_state)
  456. value_stack.append(value)
  457. # Main LALR-parser loop
  458. for i, token in enumerate(stream):
  459. while True:
  460. action, arg = get_action(token.type)
  461. assert arg != self.end_state
  462. if action is Shift:
  463. state_stack.append(arg)
  464. value_stack.append(token)
  465. if set_state: set_state(arg)
  466. break # next token
  467. else:
  468. reduce(arg)
  469. while True:
  470. _action, arg = get_action('$END')
  471. if _action is Shift:
  472. assert arg == self.end_state
  473. val ,= value_stack
  474. return val
  475. else:
  476. reduce(arg)
  477. class Rule(object):
  478. """
  479. origin : a symbol
  480. expansion : a list of symbols
  481. """
  482. def __init__(self, origin, expansion, alias=None, options=None):
  483. self.origin = origin
  484. self.expansion = expansion
  485. self.alias = alias
  486. self.options = options
  487. def __str__(self):
  488. return '<%s : %s>' % (self.origin, ' '.join(map(str,self.expansion)))
  489. def __repr__(self):
  490. return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
  491. class RuleOptions:
  492. def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False, priority=None):
  493. self.keep_all_tokens = keep_all_tokens
  494. self.expand1 = expand1
  495. self.create_token = create_token # used for scanless postprocessing
  496. self.priority = priority
  497. self.filter_out = filter_out # remove this rule from the tree
  498. # used for "token"-rules in scanless
  499. def __repr__(self):
  500. return 'RuleOptions(%r, %r, %r, %r, %r)' % (
  501. self.keep_all_tokens,
  502. self.expand1,
  503. self.create_token,
  504. self.priority,
  505. self.filter_out
  506. )
  507. Shift = 0
  508. Reduce = 1
  509. import re
  510. MRES = (
  511. [(u'(?P<SIGNED_NUMBER>(?:(?:\\+|\\-))?(?:(?:(?:[0-9])+(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+|(?:(?:[0-9])+\\.(?:(?:[0-9])+)?|\\.(?:[0-9])+)(?:(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+)?)|(?:[0-9])+))|(?P<ESCAPED_STRING>\\"(?:(?:\\\\\\"|[^"]))*\\")|(?P<WS>(?:[ \t\x0c\r\n])+)|(?P<__FALSE1>false)|(?P<__NULL2>null)|(?P<__TRUE0>true)|(?P<__COLON>\\:)|(?P<__COMMA>\\,)|(?P<__LBRACE>\\{)|(?P<__LSQB>\\[)|(?P<__RBRACE>\\})|(?P<__RSQB>\\])',
  512. {1: u'SIGNED_NUMBER',
  513. 2: u'ESCAPED_STRING',
  514. 3: u'WS',
  515. 4: u'__FALSE1',
  516. 5: u'__NULL2',
  517. 6: u'__TRUE0',
  518. 7: u'__COLON',
  519. 8: u'__COMMA',
  520. 9: u'__LBRACE',
  521. 10: u'__LSQB',
  522. 11: u'__RBRACE',
  523. 12: u'__RSQB'})]
  524. )
  525. LEXER_CALLBACK = (
  526. {}
  527. )
  528. NEWLINE_TYPES = [u'WS']
  529. IGNORE_TYPES = [u'WS']
  530. class LexerRegexps: pass
  531. lexer_regexps = LexerRegexps()
  532. lexer_regexps.mres = [(re.compile(p), d) for p, d in MRES]
  533. lexer_regexps.callback = {n: UnlessCallback([(re.compile(p), d) for p, d in mres])
  534. for n, mres in LEXER_CALLBACK.items()}
  535. lexer = _Lex(lexer_regexps)
  536. def lex(stream):
  537. return lexer.lex(stream, NEWLINE_TYPES, IGNORE_TYPES)
  538. RULES = {
  539. 0: Rule(u'start', [u'value'], None, RuleOptions(False, True, None, None, False)),
  540. 1: Rule(u'value', [u'string'], None, RuleOptions(False, True, None, None, False)),
  541. 2: Rule(u'value', [u'__TRUE0'], u'true', RuleOptions(False, True, None, None, False)),
  542. 3: Rule(u'value', [u'array'], None, RuleOptions(False, True, None, None, False)),
  543. 4: Rule(u'value', [u'__NULL2'], u'null', RuleOptions(False, True, None, None, False)),
  544. 5: Rule(u'value', [u'SIGNED_NUMBER'], u'number', RuleOptions(False, True, None, None, False)),
  545. 6: Rule(u'value', [u'object'], None, RuleOptions(False, True, None, None, False)),
  546. 7: Rule(u'value', [u'__FALSE1'], u'false', RuleOptions(False, True, None, None, False)),
  547. 8: Rule(u'array', ['__LSQB', u'value', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  548. 9: Rule(u'array', ['__LSQB', u'value', '__anon_star_0', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  549. 10: Rule(u'array', ['__LSQB', '__RSQB'], None, RuleOptions(False, False, None, None, False)),
  550. 11: Rule(u'object', ['__LBRACE', u'pair', '__anon_star_1', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  551. 12: Rule(u'object', ['__LBRACE', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  552. 13: Rule(u'object', ['__LBRACE', u'pair', '__RBRACE'], None, RuleOptions(False, False, None, None, False)),
  553. 14: Rule(u'pair', [u'string', '__COLON', u'value'], None, RuleOptions(False, False, None, None, False)),
  554. 15: Rule(u'string', [u'ESCAPED_STRING'], None, RuleOptions(False, False, None, None, False)),
  555. 16: Rule('__anon_star_0', ['__anon_star_0', '__COMMA', u'value'], None, None),
  556. 17: Rule('__anon_star_0', ['__COMMA', u'value'], None, None),
  557. 18: Rule('__anon_star_1', ['__COMMA', u'pair'], None, None),
  558. 19: Rule('__anon_star_1', ['__anon_star_1', '__COMMA', u'pair'], None, None),
  559. }
  560. parse_tree_builder = ParseTreeBuilder(RULES.values(), Tree)
  561. class ParseTable: pass
  562. parse_table = ParseTable()
  563. STATES = {
  564. 0: {0: (1, 4), 1: (1, 4), 2: (1, 4), 3: (1, 4)},
  565. 1: {1: (1, 14), 2: (1, 14)},
  566. 2: {0: (0, 29), 1: (0, 32), 4: (0, 9)},
  567. 3: {1: (0, 13), 2: (0, 12)},
  568. 4: {0: (1, 1), 1: (1, 1), 2: (1, 1), 3: (1, 1)},
  569. 5: {0: (1, 10), 1: (1, 10), 2: (1, 10), 3: (1, 10)},
  570. 6: {2: (0, 15), 5: (0, 27), 6: (0, 16), 7: (0, 26)},
  571. 7: {5: (0, 34), 6: (0, 16), 7: (0, 26)},
  572. 8: {0: (1, 2), 1: (1, 2), 2: (1, 2), 3: (1, 2)},
  573. 9: {0: (0, 11), 1: (0, 22)},
  574. 10: {0: (1, 6), 1: (1, 6), 2: (1, 6), 3: (1, 6)},
  575. 11: {0: (1, 9), 1: (1, 9), 2: (1, 9), 3: (1, 9)},
  576. 12: {0: (1, 11), 1: (1, 11), 2: (1, 11), 3: (1, 11)},
  577. 13: {5: (0, 20), 6: (0, 16), 7: (0, 26)},
  578. 14: {6: (0, 16), 7: (0, 4), 8: (0, 6), 9: (0, 31), 10: (0, 24), 11: (0, 10), 12: (0, 21), 13: (0, 17), 14: (0, 33), 15: (0, 0), 16: (0, 19), 17: (0, 8)},
  579. 15: {0: (1, 12), 1: (1, 12), 2: (1, 12), 3: (1, 12)},
  580. 16: {0: (1, 15), 1: (1, 15), 2: (1, 15), 3: (1, 15), 18: (1, 15)},
  581. 17: {3: (1, 0)},
  582. 18: {},
  583. 19: {0: (1, 3), 1: (1, 3), 2: (1, 3), 3: (1, 3)},
  584. 20: {1: (1, 19), 2: (1, 19)},
  585. 21: {0: (1, 5), 1: (1, 5), 2: (1, 5), 3: (1, 5)},
  586. 22: {6: (0, 16), 7: (0, 4), 8: (0, 6), 9: (0, 31), 10: (0, 24), 11: (0, 10), 12: (0, 21), 13: (0, 30), 15: (0, 0), 16: (0, 19), 17: (0, 8)},
  587. 23: {6: (0, 16), 7: (0, 4), 8: (0, 6), 9: (0, 31), 10: (0, 24), 11: (0, 10), 12: (0, 21), 13: (0, 1), 15: (0, 0), 16: (0, 19), 17: (0, 8)},
  588. 24: {0: (0, 5), 6: (0, 16), 7: (0, 4), 8: (0, 6), 9: (0, 31), 10: (0, 24), 11: (0, 10), 12: (0, 21), 13: (0, 2), 15: (0, 0), 16: (0, 19), 17: (0, 8)},
  589. 25: {0: (1, 13), 1: (1, 13), 2: (1, 13), 3: (1, 13)},
  590. 26: {18: (0, 23)},
  591. 27: {1: (0, 7), 2: (0, 25), 19: (0, 3)},
  592. 28: {0: (1, 17), 1: (1, 17)},
  593. 29: {0: (1, 8), 1: (1, 8), 2: (1, 8), 3: (1, 8)},
  594. 30: {0: (1, 16), 1: (1, 16)},
  595. 31: {0: (1, 7), 1: (1, 7), 2: (1, 7), 3: (1, 7)},
  596. 32: {6: (0, 16), 7: (0, 4), 8: (0, 6), 9: (0, 31), 10: (0, 24), 11: (0, 10), 12: (0, 21), 13: (0, 28), 15: (0, 0), 16: (0, 19), 17: (0, 8)},
  597. 33: {3: (0, 18)},
  598. 34: {1: (1, 18), 2: (1, 18)},
  599. }
  600. TOKEN_TYPES = (
  601. {0: '__RSQB',
  602. 1: '__COMMA',
  603. 2: '__RBRACE',
  604. 3: '$END',
  605. 4: '__anon_star_0',
  606. 5: u'pair',
  607. 6: u'ESCAPED_STRING',
  608. 7: u'string',
  609. 8: '__LBRACE',
  610. 9: u'__FALSE1',
  611. 10: '__LSQB',
  612. 11: u'object',
  613. 12: u'SIGNED_NUMBER',
  614. 13: u'value',
  615. 14: 'start',
  616. 15: u'__NULL2',
  617. 16: u'array',
  618. 17: u'__TRUE0',
  619. 18: '__COLON',
  620. 19: '__anon_star_1'}
  621. )
  622. parse_table.states = {s: {TOKEN_TYPES[t]: (a, RULES[x] if a is Reduce else x) for t, (a, x) in acts.items()}
  623. for s, acts in STATES.items()}
  624. parse_table.start_state = 14
  625. parse_table.end_state = 18
  626. class Lark_StandAlone:
  627. def __init__(self, transformer=None, postlex=None):
  628. callback = parse_tree_builder.create_callback(transformer=transformer)
  629. callbacks = {rule: getattr(callback, rule.alias or rule.origin, None) for rule in RULES.values()}
  630. self.parser = _Parser(parse_table, callbacks)
  631. self.postlex = postlex
  632. def parse(self, stream):
  633. tokens = lex(stream)
  634. if self.postlex: tokens = self.postlex.process(tokens)
  635. return self.parser.parse(tokens)