This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
 
 

616 行
25 KiB

  1. from abc import ABC, abstractmethod
  2. import sys, os, pickle, hashlib
  3. import tempfile
  4. from .exceptions import ConfigurationError, assert_config, UnexpectedInput
  5. from .utils import Serialize, SerializeMemoizer, FS, isascii, logger
  6. from .load_grammar import load_grammar, FromPackageLoader, Grammar, verify_used_files, PackageResource
  7. from .tree import Tree
  8. from .common import LexerConf, ParserConf
  9. from .lexer import Lexer, TraditionalLexer, TerminalDef, LexerThread, Token
  10. from .parse_tree_builder import ParseTreeBuilder
  11. from .parser_frontends import get_frontend, _get_lexer_callbacks
  12. from .grammar import Rule
  13. import re
  14. try:
  15. import regex # type: ignore
  16. except ImportError:
  17. regex = None
  18. ###{standalone
  19. from typing import (
  20. TypeVar, Type, List, Dict, Iterator, Callable, Union, Optional,
  21. Tuple, Iterable, IO, Any, TYPE_CHECKING
  22. )
  23. if TYPE_CHECKING:
  24. from .parsers.lalr_interactive_parser import InteractiveParser
  25. from .visitors import Transformer
  26. if sys.version_info >= (3, 8):
  27. from typing import Literal
  28. else:
  29. from typing_extensions import Literal
  30. class PostLex(ABC):
  31. @abstractmethod
  32. def process(self, stream: Iterator[Token]) -> Iterator[Token]:
  33. return stream
  34. always_accept: Iterable[str] = ()
  35. class LarkOptions(Serialize):
  36. """Specifies the options for Lark
  37. """
  38. start: List[str]
  39. debug: bool
  40. transformer: 'Optional[Transformer]'
  41. propagate_positions: Union[bool, str]
  42. maybe_placeholders: bool
  43. cache: Union[bool, str]
  44. regex: bool
  45. g_regex_flags: int
  46. keep_all_tokens: bool
  47. tree_class: Any
  48. parser: 'Literal["earley", "lalr", "cyk", "auto"]'
  49. lexer: 'Union[Literal["auto", "standard", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]'
  50. ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]'
  51. postlex: Optional[PostLex]
  52. priority: 'Optional[Literal["auto", "normal", "invert"]]'
  53. lexer_callbacks: Dict[str, Callable[[Token], Token]]
  54. use_bytes: bool
  55. edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]]
  56. import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]'
  57. source_path: Optional[str]
  58. OPTIONS_DOC = """
  59. **=== General Options ===**
  60. start
  61. The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start")
  62. debug
  63. Display debug information and extra warnings. Use only when debugging (default: False)
  64. When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed.
  65. transformer
  66. Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster)
  67. propagate_positions
  68. Propagates (line, column, end_line, end_column) attributes into all tree branches.
  69. Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating.
  70. maybe_placeholders
  71. When ``True``, the ``[]`` operator returns ``None`` when not matched.
  72. When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all.
  73. (default= ``False``. Recommended to set to ``True``)
  74. cache
  75. Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now.
  76. - When ``False``, does nothing (default)
  77. - When ``True``, caches to a temporary file in the local directory
  78. - When given a string, caches to the path pointed by the string
  79. regex
  80. When True, uses the ``regex`` module instead of the stdlib ``re``.
  81. g_regex_flags
  82. Flags that are applied to all terminals (both regex and strings)
  83. keep_all_tokens
  84. Prevent the tree builder from automagically removing "punctuation" tokens (default: False)
  85. tree_class
  86. Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``.
  87. **=== Algorithm Options ===**
  88. parser
  89. Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley").
  90. (there is also a "cyk" option for legacy)
  91. lexer
  92. Decides whether or not to use a lexer stage
  93. - "auto" (default): Choose for me based on the parser
  94. - "standard": Use a standard lexer
  95. - "contextual": Stronger lexer (only works with parser="lalr")
  96. - "dynamic": Flexible and powerful (only with parser="earley")
  97. - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible.
  98. ambiguity
  99. Decides how to handle ambiguity in the parse. Only relevant if parser="earley"
  100. - "resolve": The parser will automatically choose the simplest derivation
  101. (it chooses consistently: greedy for tokens, non-greedy for rules)
  102. - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest).
  103. - "forest": The parser will return the root of the shared packed parse forest.
  104. **=== Misc. / Domain Specific Options ===**
  105. postlex
  106. Lexer post-processing (Default: None) Only works with the standard and contextual lexers.
  107. priority
  108. How priorities should be evaluated - auto, none, normal, invert (Default: auto)
  109. lexer_callbacks
  110. Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.
  111. use_bytes
  112. Accept an input of type ``bytes`` instead of ``str`` (Python 3 only).
  113. edit_terminals
  114. A callback for editing the terminals before parse.
  115. import_paths
  116. A List of either paths or loader functions to specify from where grammars are imported
  117. source_path
  118. Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading
  119. **=== End of Options ===**
  120. """
  121. if __doc__:
  122. __doc__ += OPTIONS_DOC
  123. # Adding a new option needs to be done in multiple places:
  124. # - In the dictionary below. This is the primary truth of which options `Lark.__init__` accepts
  125. # - In the docstring above. It is used both for the docstring of `LarkOptions` and `Lark`, and in readthedocs
  126. # - As an attribute of `LarkOptions` above
  127. # - Potentially in `_LOAD_ALLOWED_OPTIONS` below this class, when the option doesn't change how the grammar is loaded
  128. # - Potentially in `lark.tools.__init__`, if it makes sense, and it can easily be passed as a cmd argument
  129. _defaults: Dict[str, Any] = {
  130. 'debug': False,
  131. 'keep_all_tokens': False,
  132. 'tree_class': None,
  133. 'cache': False,
  134. 'postlex': None,
  135. 'parser': 'earley',
  136. 'lexer': 'auto',
  137. 'transformer': None,
  138. 'start': 'start',
  139. 'priority': 'auto',
  140. 'ambiguity': 'auto',
  141. 'regex': False,
  142. 'propagate_positions': False,
  143. 'lexer_callbacks': {},
  144. 'maybe_placeholders': False,
  145. 'edit_terminals': None,
  146. 'g_regex_flags': 0,
  147. 'use_bytes': False,
  148. 'import_paths': [],
  149. 'source_path': None,
  150. }
  151. def __init__(self, options_dict):
  152. o = dict(options_dict)
  153. options = {}
  154. for name, default in self._defaults.items():
  155. if name in o:
  156. value = o.pop(name)
  157. if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'):
  158. value = bool(value)
  159. else:
  160. value = default
  161. options[name] = value
  162. if isinstance(options['start'], str):
  163. options['start'] = [options['start']]
  164. self.__dict__['options'] = options
  165. assert_config(self.parser, ('earley', 'lalr', 'cyk', None))
  166. if self.parser == 'earley' and self.transformer:
  167. raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. '
  168. 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)')
  169. if o:
  170. raise ConfigurationError("Unknown options: %s" % o.keys())
  171. def __getattr__(self, name):
  172. try:
  173. return self.__dict__['options'][name]
  174. except KeyError as e:
  175. raise AttributeError(e)
  176. def __setattr__(self, name, value):
  177. assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s")
  178. self.options[name] = value
  179. def serialize(self, memo):
  180. return self.options
  181. @classmethod
  182. def deserialize(cls, data, memo):
  183. return cls(data)
  184. # Options that can be passed to the Lark parser, even when it was loaded from cache/standalone.
  185. # These option are only used outside of `load_grammar`.
  186. _LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class'}
  187. _VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None)
  188. _VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest')
  189. _T = TypeVar('_T')
  190. class Lark(Serialize):
  191. """Main interface for the library.
  192. It's mostly a thin wrapper for the many different parsers, and for the tree constructor.
  193. Parameters:
  194. grammar: a string or file-object containing the grammar spec (using Lark's ebnf syntax)
  195. options: a dictionary controlling various aspects of Lark.
  196. Example:
  197. >>> Lark(r'''start: "foo" ''')
  198. Lark(...)
  199. """
  200. source_path: str
  201. source_grammar: str
  202. grammar: 'Grammar'
  203. options: LarkOptions
  204. lexer: Lexer
  205. terminals: List[TerminalDef]
  206. def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None:
  207. self.options = LarkOptions(options)
  208. # Set regex or re module
  209. use_regex = self.options.regex
  210. if use_regex:
  211. if regex:
  212. re_module = regex
  213. else:
  214. raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.')
  215. else:
  216. re_module = re
  217. # Some, but not all file-like objects have a 'name' attribute
  218. if self.options.source_path is None:
  219. try:
  220. self.source_path = grammar.name
  221. except AttributeError:
  222. self.source_path = '<string>'
  223. else:
  224. self.source_path = self.options.source_path
  225. # Drain file-like objects to get their contents
  226. try:
  227. read = grammar.read
  228. except AttributeError:
  229. pass
  230. else:
  231. grammar = read()
  232. cache_fn = None
  233. cache_md5 = None
  234. if isinstance(grammar, str):
  235. self.source_grammar = grammar
  236. if self.options.use_bytes:
  237. if not isascii(grammar):
  238. raise ConfigurationError("Grammar must be ascii only, when use_bytes=True")
  239. if self.options.cache:
  240. if self.options.parser != 'lalr':
  241. raise ConfigurationError("cache only works with parser='lalr' for now")
  242. unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals')
  243. options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable)
  244. from . import __version__
  245. s = grammar + options_str + __version__ + str(sys.version_info[:2])
  246. cache_md5 = hashlib.md5(s.encode('utf8')).hexdigest()
  247. if isinstance(self.options.cache, str):
  248. cache_fn = self.options.cache
  249. else:
  250. if self.options.cache is not True:
  251. raise ConfigurationError("cache argument must be bool or str")
  252. # Python2.7 doesn't support * syntax in tuples
  253. cache_fn = tempfile.gettempdir() + '/.lark_cache_%s_%s_%s.tmp' % ((cache_md5,) + sys.version_info[:2])
  254. if FS.exists(cache_fn):
  255. logger.debug('Loading grammar from cache: %s', cache_fn)
  256. # Remove options that aren't relevant for loading from cache
  257. for name in (set(options) - _LOAD_ALLOWED_OPTIONS):
  258. del options[name]
  259. with FS.open(cache_fn, 'rb') as f:
  260. old_options = self.options
  261. try:
  262. file_md5 = f.readline().rstrip(b'\n')
  263. cached_used_files = pickle.load(f)
  264. if file_md5 == cache_md5.encode('utf8') and verify_used_files(cached_used_files):
  265. cached_parser_data = pickle.load(f)
  266. self._load(cached_parser_data, **options)
  267. return
  268. except Exception: # We should probably narrow done which errors we catch here.
  269. logger.exception("Failed to load Lark from cache: %r. We will try to carry on." % cache_fn)
  270. # In theory, the Lark instance might have been messed up by the call to `_load`.
  271. # In practice the only relevant thing that might have been overriden should be `options`
  272. self.options = old_options
  273. # Parse the grammar file and compose the grammars
  274. self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens)
  275. else:
  276. assert isinstance(grammar, Grammar)
  277. self.grammar = grammar
  278. if self.options.lexer == 'auto':
  279. if self.options.parser == 'lalr':
  280. self.options.lexer = 'contextual'
  281. elif self.options.parser == 'earley':
  282. if self.options.postlex is not None:
  283. logger.info("postlex can't be used with the dynamic lexer, so we use standard instead. "
  284. "Consider using lalr with contextual instead of earley")
  285. self.options.lexer = 'standard'
  286. else:
  287. self.options.lexer = 'dynamic'
  288. elif self.options.parser == 'cyk':
  289. self.options.lexer = 'standard'
  290. else:
  291. assert False, self.options.parser
  292. lexer = self.options.lexer
  293. if isinstance(lexer, type):
  294. assert issubclass(lexer, Lexer) # XXX Is this really important? Maybe just ensure interface compliance
  295. else:
  296. assert_config(lexer, ('standard', 'contextual', 'dynamic', 'dynamic_complete'))
  297. if self.options.postlex is not None and 'dynamic' in lexer:
  298. raise ConfigurationError("Can't use postlex with a dynamic lexer. Use standard or contextual instead")
  299. if self.options.ambiguity == 'auto':
  300. if self.options.parser == 'earley':
  301. self.options.ambiguity = 'resolve'
  302. else:
  303. assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s")
  304. if self.options.priority == 'auto':
  305. self.options.priority = 'normal'
  306. if self.options.priority not in _VALID_PRIORITY_OPTIONS:
  307. raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS))
  308. assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"'
  309. if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS:
  310. raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS))
  311. if self.options.postlex is not None:
  312. terminals_to_keep = set(self.options.postlex.always_accept)
  313. else:
  314. terminals_to_keep = set()
  315. # Compile the EBNF grammar into BNF
  316. self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep)
  317. if self.options.edit_terminals:
  318. for t in self.terminals:
  319. self.options.edit_terminals(t)
  320. self._terminals_dict = {t.name: t for t in self.terminals}
  321. # If the user asked to invert the priorities, negate them all here.
  322. # This replaces the old 'resolve__antiscore_sum' option.
  323. if self.options.priority == 'invert':
  324. for rule in self.rules:
  325. if rule.options.priority is not None:
  326. rule.options.priority = -rule.options.priority
  327. # Else, if the user asked to disable priorities, strip them from the
  328. # rules. This allows the Earley parsers to skip an extra forest walk
  329. # for improved performance, if you don't need them (or didn't specify any).
  330. elif self.options.priority is None:
  331. for rule in self.rules:
  332. if rule.options.priority is not None:
  333. rule.options.priority = None
  334. # TODO Deprecate lexer_callbacks?
  335. self.lexer_conf = LexerConf(
  336. self.terminals, re_module, self.ignore_tokens, self.options.postlex,
  337. self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes
  338. )
  339. if self.options.parser:
  340. self.parser = self._build_parser()
  341. elif lexer:
  342. self.lexer = self._build_lexer()
  343. if cache_fn:
  344. logger.debug('Saving grammar to cache: %s', cache_fn)
  345. with FS.open(cache_fn, 'wb') as f:
  346. assert cache_md5 is not None
  347. f.write(cache_md5.encode('utf8') + b'\n')
  348. pickle.dump(used_files, f)
  349. self.save(f)
  350. if __doc__:
  351. __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC
  352. __serialize_fields__ = 'parser', 'rules', 'options'
  353. def _build_lexer(self, dont_ignore=False):
  354. lexer_conf = self.lexer_conf
  355. if dont_ignore:
  356. from copy import copy
  357. lexer_conf = copy(lexer_conf)
  358. lexer_conf.ignore = ()
  359. return TraditionalLexer(lexer_conf)
  360. def _prepare_callbacks(self):
  361. self._callbacks = {}
  362. # we don't need these callbacks if we aren't building a tree
  363. if self.options.ambiguity != 'forest':
  364. self._parse_tree_builder = ParseTreeBuilder(
  365. self.rules,
  366. self.options.tree_class or Tree,
  367. self.options.propagate_positions,
  368. self.options.parser != 'lalr' and self.options.ambiguity == 'explicit',
  369. self.options.maybe_placeholders
  370. )
  371. self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)
  372. self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals))
  373. def _build_parser(self):
  374. self._prepare_callbacks()
  375. parser_class = get_frontend(self.options.parser, self.options.lexer)
  376. parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
  377. return parser_class(self.lexer_conf, parser_conf, options=self.options)
  378. def save(self, f):
  379. """Saves the instance into the given file object
  380. Useful for caching and multiprocessing.
  381. """
  382. data, m = self.memo_serialize([TerminalDef, Rule])
  383. pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL)
  384. @classmethod
  385. def load(cls, f):
  386. """Loads an instance from the given file object
  387. Useful for caching and multiprocessing.
  388. """
  389. inst = cls.__new__(cls)
  390. return inst._load(f)
  391. def _deserialize_lexer_conf(self, data, memo, options):
  392. lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo)
  393. lexer_conf.callbacks = options.lexer_callbacks or {}
  394. lexer_conf.re_module = regex if options.regex else re
  395. lexer_conf.use_bytes = options.use_bytes
  396. lexer_conf.g_regex_flags = options.g_regex_flags
  397. lexer_conf.skip_validation = True
  398. lexer_conf.postlex = options.postlex
  399. return lexer_conf
  400. def _load(self, f, **kwargs):
  401. if isinstance(f, dict):
  402. d = f
  403. else:
  404. d = pickle.load(f)
  405. memo_json = d['memo']
  406. data = d['data']
  407. assert memo_json
  408. memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {})
  409. options = dict(data['options'])
  410. if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults):
  411. raise ConfigurationError("Some options are not allowed when loading a Parser: {}"
  412. .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS))
  413. options.update(kwargs)
  414. self.options = LarkOptions.deserialize(options, memo)
  415. self.rules = [Rule.deserialize(r, memo) for r in data['rules']]
  416. self.source_path = '<deserialized>'
  417. parser_class = get_frontend(self.options.parser, self.options.lexer)
  418. self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options)
  419. self.terminals = self.lexer_conf.terminals
  420. self._prepare_callbacks()
  421. self._terminals_dict = {t.name: t for t in self.terminals}
  422. self.parser = parser_class.deserialize(
  423. data['parser'],
  424. memo,
  425. self.lexer_conf,
  426. self._callbacks,
  427. self.options, # Not all, but multiple attributes are used
  428. )
  429. return self
  430. @classmethod
  431. def _load_from_dict(cls, data, memo, **kwargs):
  432. inst = cls.__new__(cls)
  433. return inst._load({'data': data, 'memo': memo}, **kwargs)
  434. @classmethod
  435. def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T:
  436. """Create an instance of Lark with the grammar given by its filename
  437. If ``rel_to`` is provided, the function will find the grammar filename in relation to it.
  438. Example:
  439. >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
  440. Lark(...)
  441. """
  442. if rel_to:
  443. basepath = os.path.dirname(rel_to)
  444. grammar_filename = os.path.join(basepath, grammar_filename)
  445. with open(grammar_filename, encoding='utf8') as f:
  446. return cls(f, **options)
  447. @classmethod
  448. def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: Tuple[str, ...]=("",), **options) -> _T:
  449. """Create an instance of Lark with the grammar loaded from within the package `package`.
  450. This allows grammar loading from zipapps.
  451. Imports in the grammar will use the `package` and `search_paths` provided, through `FromPackageLoader`
  452. Example:
  453. Lark.open_from_package(__name__, "example.lark", ("grammars",), parser=...)
  454. """
  455. package_loader = FromPackageLoader(package, search_paths)
  456. full_path, text = package_loader(None, grammar_path)
  457. options.setdefault('source_path', full_path)
  458. options.setdefault('import_paths', [])
  459. options['import_paths'].append(package_loader)
  460. return cls(text, **options)
  461. def __repr__(self):
  462. return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer)
  463. def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]:
  464. """Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'
  465. When dont_ignore=True, the lexer will return all tokens, even those marked for %ignore.
  466. :raises UnexpectedCharacters: In case the lexer cannot find a suitable match.
  467. """
  468. if not hasattr(self, 'lexer') or dont_ignore:
  469. lexer = self._build_lexer(dont_ignore)
  470. else:
  471. lexer = self.lexer
  472. lexer_thread = LexerThread(lexer, text)
  473. stream = lexer_thread.lex(None)
  474. if self.options.postlex:
  475. return self.options.postlex.process(stream)
  476. return stream
  477. def get_terminal(self, name: str) -> TerminalDef:
  478. """Get information about a terminal"""
  479. return self._terminals_dict[name]
  480. def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser':
  481. """Start an interactive parsing session.
  482. Parameters:
  483. text (str, optional): Text to be parsed. Required for ``resume_parse()``.
  484. start (str, optional): Start symbol
  485. Returns:
  486. A new InteractiveParser instance.
  487. See Also: ``Lark.parse()``
  488. """
  489. return self.parser.parse_interactive(text, start=start)
  490. def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> Tree:
  491. """Parse the given text, according to the options provided.
  492. Parameters:
  493. text (str): Text to be parsed.
  494. start (str, optional): Required if Lark was given multiple possible start symbols (using the start option).
  495. on_error (function, optional): if provided, will be called on UnexpectedToken error. Return true to resume parsing.
  496. LALR only. See examples/advanced/error_handling.py for an example of how to use on_error.
  497. Returns:
  498. If a transformer is supplied to ``__init__``, returns whatever is the
  499. result of the transformation. Otherwise, returns a Tree instance.
  500. :raises UnexpectedInput: On a parse error, one of these sub-exceptions will rise:
  501. ``UnexpectedCharacters``, ``UnexpectedToken``, or ``UnexpectedEOF``.
  502. For convenience, these sub-exceptions also inherit from ``ParserError`` and ``LexerError``.
  503. """
  504. return self.parser.parse(text, start=start, on_error=on_error)
  505. ###}