This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

459 lines
17 KiB

  1. from __future__ import absolute_import
  2. import sys, os, pickle, hashlib
  3. from io import open
  4. from .utils import STRING_TYPE, Serialize, SerializeMemoizer, FS, isascii, logger
  5. from .load_grammar import load_grammar
  6. from .tree import Tree
  7. from .common import LexerConf, ParserConf
  8. from .lexer import Lexer, TraditionalLexer, TerminalDef, UnexpectedToken
  9. from .parse_tree_builder import ParseTreeBuilder
  10. from .parser_frontends import get_frontend, _get_lexer_callbacks
  11. from .grammar import Rule
  12. import re
  13. try:
  14. import regex
  15. except ImportError:
  16. regex = None
  17. ###{standalone
  18. class LarkOptions(Serialize):
  19. """Specifies the options for Lark
  20. """
  21. OPTIONS_DOC = """
  22. **General Options**
  23. start
  24. The start symbol. Either a string, or a list of strings for
  25. multiple possible starts (Default: "start")
  26. debug
  27. Display debug information, such as warnings (default: False)
  28. transformer
  29. Applies the transformer to every parse tree (equivlent
  30. to applying it after the parse, but faster)
  31. propagate_positions
  32. Propagates (line, column, end_line, end_column) attributes into all tree branches.
  33. maybe_placeholders
  34. When True, the ``[]`` operator returns ``None``
  35. when not matched. When ``False``, ``[]`` behaves like the ``?``
  36. operator, and returns no value at all. (default= ``False``. Recommended
  37. to set to ``True``)
  38. regex
  39. When True, uses the ``regex`` module instead of the
  40. stdlib ``re``.
  41. cache
  42. Cache the results of the Lark grammar analysis, for x2 to
  43. x3 faster loading. LALR only for now.
  44. - When ``False``, does nothing (default)
  45. - When ``True``, caches to a temporary file in the local directory
  46. - When given a string, caches to the path pointed by the string
  47. g_regex_flags
  48. Flags that are applied to all terminals (both regex and strings)
  49. keep_all_tokens
  50. Prevent the tree builder from automagically removing "punctuation" tokens (default: False)
  51. **Algorithm Options**
  52. parser
  53. Decides which parser engine to use. Accepts "earley" or "lalr".
  54. (Default: "earley"). (there is also a "cyk" option for legacy)
  55. lexer
  56. Decides whether or not to use a lexer stage
  57. - "auto" (default): Choose for me based on the parser
  58. - "standard": Use a standard lexer
  59. - "contextual": Stronger lexer (only works with parser="lalr")
  60. - "dynamic": Flexible and powerful (only with parser="earley")
  61. - "dynamic_complete": Same as dynamic, but tries *every* variation
  62. of tokenizing possible.
  63. ambiguity
  64. Decides how to handle ambiguity in the parse. Only relevant if parser="earley"
  65. - "resolve" - The parser will automatically choose the simplest
  66. derivation (it chooses consistently: greedy for tokens,
  67. non-greedy for rules)
  68. - "explicit": The parser will return all derivations wrapped in
  69. "_ambig" tree nodes (i.e. a forest).
  70. **Domain Specific Options**
  71. postlex
  72. Lexer post-processing (Default: None) Only works with the
  73. standard and contextual lexers.
  74. priority
  75. How priorities should be evaluated - auto, none, normal, invert (Default: auto)
  76. lexer_callbacks
  77. Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.
  78. use_bytes
  79. Accept an input of type ``bytes`` instead of ``str`` (Python 3 only).
  80. edit_terminals
  81. A callback
  82. """
  83. if __doc__:
  84. __doc__ += OPTIONS_DOC
  85. _defaults = {
  86. 'debug': False,
  87. 'keep_all_tokens': False,
  88. 'tree_class': None,
  89. 'cache': False,
  90. 'postlex': None,
  91. 'parser': 'earley',
  92. 'lexer': 'auto',
  93. 'transformer': None,
  94. 'start': 'start',
  95. 'priority': 'auto',
  96. 'ambiguity': 'auto',
  97. 'regex': False,
  98. 'propagate_positions': False,
  99. 'lexer_callbacks': {},
  100. 'maybe_placeholders': False,
  101. 'edit_terminals': None,
  102. 'g_regex_flags': 0,
  103. 'use_bytes': False,
  104. }
  105. def __init__(self, options_dict):
  106. o = dict(options_dict)
  107. options = {}
  108. for name, default in self._defaults.items():
  109. if name in o:
  110. value = o.pop(name)
  111. if isinstance(default, bool) and name not in ('cache', 'use_bytes'):
  112. value = bool(value)
  113. else:
  114. value = default
  115. options[name] = value
  116. if isinstance(options['start'], STRING_TYPE):
  117. options['start'] = [options['start']]
  118. self.__dict__['options'] = options
  119. assert self.parser in ('earley', 'lalr', 'cyk', None)
  120. if self.parser == 'earley' and self.transformer:
  121. raise ValueError('Cannot specify an embedded transformer when using the Earley algorithm.'
  122. 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)')
  123. if o:
  124. raise ValueError("Unknown options: %s" % o.keys())
  125. def __getattr__(self, name):
  126. try:
  127. return self.options[name]
  128. except KeyError as e:
  129. raise AttributeError(e)
  130. def __setattr__(self, name, value):
  131. assert name in self.options
  132. self.options[name] = value
  133. def serialize(self, memo):
  134. return self.options
  135. @classmethod
  136. def deserialize(cls, data, memo):
  137. return cls(data)
  138. class Lark(Serialize):
  139. """Main interface for the library.
  140. It's mostly a thin wrapper for the many different parsers, and for
  141. the tree constructor.
  142. Args:
  143. grammar: a string or file-object containing the
  144. grammar spec (using Lark's ebnf syntax)
  145. options : a dictionary controlling various aspects of Lark.
  146. Example:
  147. >>> Lark(r'''start: "foo" ''')
  148. Lark(...)
  149. """
  150. def __init__(self, grammar, **options):
  151. self.options = LarkOptions(options)
  152. # Set regex or re module
  153. use_regex = self.options.regex
  154. if use_regex:
  155. if regex:
  156. re_module = regex
  157. else:
  158. raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.')
  159. else:
  160. re_module = re
  161. # Some, but not all file-like objects have a 'name' attribute
  162. try:
  163. self.source = grammar.name
  164. except AttributeError:
  165. self.source = '<string>'
  166. # Drain file-like objects to get their contents
  167. try:
  168. read = grammar.read
  169. except AttributeError:
  170. pass
  171. else:
  172. grammar = read()
  173. assert isinstance(grammar, STRING_TYPE)
  174. self.grammar_source = grammar
  175. if self.options.use_bytes:
  176. if not isascii(grammar):
  177. raise ValueError("Grammar must be ascii only, when use_bytes=True")
  178. if sys.version_info[0] == 2 and self.options.use_bytes != 'force':
  179. raise NotImplementedError("`use_bytes=True` may have issues on python2."
  180. "Use `use_bytes='force'` to use it at your own risk.")
  181. cache_fn = None
  182. if self.options.cache:
  183. if self.options.parser != 'lalr':
  184. raise NotImplementedError("cache only works with parser='lalr' for now")
  185. if isinstance(self.options.cache, STRING_TYPE):
  186. cache_fn = self.options.cache
  187. else:
  188. if self.options.cache is not True:
  189. raise ValueError("cache argument must be bool or str")
  190. unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals')
  191. from . import __version__
  192. options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable)
  193. s = grammar + options_str + __version__
  194. md5 = hashlib.md5(s.encode()).hexdigest()
  195. cache_fn = '.lark_cache_%s.tmp' % md5
  196. if FS.exists(cache_fn):
  197. logger.debug('Loading grammar from cache: %s', cache_fn)
  198. with FS.open(cache_fn, 'rb') as f:
  199. self._load(f, self.options.transformer, self.options.postlex)
  200. return
  201. if self.options.lexer == 'auto':
  202. if self.options.parser == 'lalr':
  203. self.options.lexer = 'contextual'
  204. elif self.options.parser == 'earley':
  205. self.options.lexer = 'dynamic'
  206. elif self.options.parser == 'cyk':
  207. self.options.lexer = 'standard'
  208. else:
  209. assert False, self.options.parser
  210. lexer = self.options.lexer
  211. assert lexer in ('standard', 'contextual', 'dynamic', 'dynamic_complete') or issubclass(lexer, Lexer)
  212. if self.options.ambiguity == 'auto':
  213. if self.options.parser == 'earley':
  214. self.options.ambiguity = 'resolve'
  215. else:
  216. disambig_parsers = ['earley', 'cyk']
  217. assert self.options.parser in disambig_parsers, (
  218. 'Only %s supports disambiguation right now') % ', '.join(disambig_parsers)
  219. if self.options.priority == 'auto':
  220. if self.options.parser in ('earley', 'cyk', ):
  221. self.options.priority = 'normal'
  222. elif self.options.parser in ('lalr', ):
  223. self.options.priority = None
  224. elif self.options.priority in ('invert', 'normal'):
  225. assert self.options.parser in ('earley', 'cyk'), "priorities are not supported for LALR at this time"
  226. assert self.options.priority in ('auto', None, 'normal', 'invert'), 'invalid priority option specified: {}. options are auto, none, normal, invert.'.format(self.options.priority)
  227. assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"'
  228. assert self.options.ambiguity in ('resolve', 'explicit', 'auto', )
  229. # Parse the grammar file and compose the grammars (TODO)
  230. self.grammar = load_grammar(grammar, self.source, re_module)
  231. # Compile the EBNF grammar into BNF
  232. self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start)
  233. if self.options.edit_terminals:
  234. for t in self.terminals:
  235. self.options.edit_terminals(t)
  236. self._terminals_dict = {t.name: t for t in self.terminals}
  237. # If the user asked to invert the priorities, negate them all here.
  238. # This replaces the old 'resolve__antiscore_sum' option.
  239. if self.options.priority == 'invert':
  240. for rule in self.rules:
  241. if rule.options.priority is not None:
  242. rule.options.priority = -rule.options.priority
  243. # Else, if the user asked to disable priorities, strip them from the
  244. # rules. This allows the Earley parsers to skip an extra forest walk
  245. # for improved performance, if you don't need them (or didn't specify any).
  246. elif self.options.priority == None:
  247. for rule in self.rules:
  248. if rule.options.priority is not None:
  249. rule.options.priority = None
  250. # TODO Deprecate lexer_callbacks?
  251. lexer_callbacks = (_get_lexer_callbacks(self.options.transformer, self.terminals)
  252. if self.options.transformer
  253. else {})
  254. lexer_callbacks.update(self.options.lexer_callbacks)
  255. self.lexer_conf = LexerConf(self.terminals, re_module, self.ignore_tokens, self.options.postlex, lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes)
  256. if self.options.parser:
  257. self.parser = self._build_parser()
  258. elif lexer:
  259. self.lexer = self._build_lexer()
  260. if cache_fn:
  261. logger.debug('Saving grammar to cache: %s', cache_fn)
  262. with FS.open(cache_fn, 'wb') as f:
  263. self.save(f)
  264. # TODO: merge with above
  265. if __doc__:
  266. __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC
  267. __serialize_fields__ = 'parser', 'rules', 'options'
  268. def _build_lexer(self):
  269. return TraditionalLexer(self.lexer_conf)
  270. def _prepare_callbacks(self):
  271. self.parser_class = get_frontend(self.options.parser, self.options.lexer)
  272. self._parse_tree_builder = ParseTreeBuilder(self.rules, self.options.tree_class or Tree, self.options.propagate_positions, self.options.keep_all_tokens, self.options.parser!='lalr' and self.options.ambiguity=='explicit', self.options.maybe_placeholders)
  273. self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)
  274. def _build_parser(self):
  275. self._prepare_callbacks()
  276. parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
  277. return self.parser_class(self.lexer_conf, parser_conf, options=self.options)
  278. def save(self, f):
  279. """Saves the instance into the given file object
  280. Useful for caching and multiprocessing.
  281. """
  282. data, m = self.memo_serialize([TerminalDef, Rule])
  283. pickle.dump({'data': data, 'memo': m}, f)
  284. @classmethod
  285. def load(cls, f):
  286. """Loads an instance from the given file object
  287. Useful for caching and multiprocessing.
  288. """
  289. inst = cls.__new__(cls)
  290. return inst._load(f)
  291. def _load(self, f, transformer=None, postlex=None):
  292. if isinstance(f, dict):
  293. d = f
  294. else:
  295. d = pickle.load(f)
  296. memo = d['memo']
  297. data = d['data']
  298. assert memo
  299. memo = SerializeMemoizer.deserialize(memo, {'Rule': Rule, 'TerminalDef': TerminalDef}, {})
  300. options = dict(data['options'])
  301. if transformer is not None:
  302. options['transformer'] = transformer
  303. if postlex is not None:
  304. options['postlex'] = postlex
  305. self.options = LarkOptions.deserialize(options, memo)
  306. re_module = regex if self.options.regex else re
  307. self.rules = [Rule.deserialize(r, memo) for r in data['rules']]
  308. self.source = '<deserialized>'
  309. self._prepare_callbacks()
  310. self.parser = self.parser_class.deserialize(
  311. data['parser'],
  312. memo,
  313. self._callbacks,
  314. self.options.postlex,
  315. self.options.transformer,
  316. re_module
  317. )
  318. return self
  319. @classmethod
  320. def _load_from_dict(cls, data, memo, transformer=None, postlex=None):
  321. inst = cls.__new__(cls)
  322. return inst._load({'data': data, 'memo': memo}, transformer, postlex)
  323. @classmethod
  324. def open(cls, grammar_filename, rel_to=None, **options):
  325. """Create an instance of Lark with the grammar given by its filename
  326. If ``rel_to`` is provided, the function will find the grammar
  327. filename in relation to it.
  328. Example:
  329. >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
  330. Lark(...)
  331. """
  332. if rel_to:
  333. basepath = os.path.dirname(rel_to)
  334. grammar_filename = os.path.join(basepath, grammar_filename)
  335. with open(grammar_filename, encoding='utf8') as f:
  336. return cls(f, **options)
  337. def __repr__(self):
  338. return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source, self.options.parser, self.options.lexer)
  339. def lex(self, text):
  340. "Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'"
  341. if not hasattr(self, 'lexer'):
  342. self.lexer = self._build_lexer()
  343. stream = self.lexer.lex(text)
  344. if self.options.postlex:
  345. return self.options.postlex.process(stream)
  346. return stream
  347. def get_terminal(self, name):
  348. "Get information about a terminal"
  349. return self._terminals_dict[name]
  350. def parse(self, text, start=None, on_error=None):
  351. """Parse the given text, according to the options provided.
  352. If a transformer is supplied to ``__init__``, returns whatever is the
  353. result of the transformation.
  354. Args:
  355. text (str): Text to be parsed.
  356. start (str, optional): Required if Lark was given multiple
  357. possible start symbols (using the start option).
  358. on_error (function, optional): if provided, will be called on
  359. UnexpectedToken error. Return true to resume parsing.
  360. LALR only. See examples/error_puppet.py for an example
  361. of how to use on_error.
  362. """
  363. try:
  364. return self.parser.parse(text, start=start)
  365. except UnexpectedToken as e:
  366. if on_error is None:
  367. raise
  368. while True:
  369. if not on_error(e):
  370. raise e
  371. try:
  372. return e.puppet.resume_parse()
  373. except UnexpectedToken as e2:
  374. e = e2
  375. ###}