This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

448 lines
17 KiB

  1. from __future__ import absolute_import
  2. import sys, os, pickle, hashlib
  3. from io import open
  4. from .utils import STRING_TYPE, Serialize, SerializeMemoizer, FS, isascii, logger
  5. from .load_grammar import load_grammar
  6. from .tree import Tree
  7. from .common import LexerConf, ParserConf
  8. from .lexer import Lexer, TraditionalLexer, TerminalDef, UnexpectedToken
  9. from .parse_tree_builder import ParseTreeBuilder
  10. from .parser_frontends import get_frontend, _get_lexer_callbacks
  11. from .grammar import Rule
  12. import re
  13. try:
  14. import regex
  15. except ImportError:
  16. regex = None
  17. ###{standalone
  18. class LarkOptions(Serialize):
  19. """Specifies the options for Lark
  20. """
  21. OPTIONS_DOC = """
  22. **=== General Options ===**
  23. start
  24. The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start")
  25. debug
  26. Display debug information, such as warnings (default: False)
  27. transformer
  28. Applies the transformer to every parse tree (equivlent to applying it after the parse, but faster)
  29. propagate_positions
  30. Propagates (line, column, end_line, end_column) attributes into all tree branches.
  31. maybe_placeholders
  32. When True, the ``[]`` operator returns ``None`` when not matched.
  33. When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all.
  34. (default= ``False``. Recommended to set to ``True``)
  35. regex
  36. When True, uses the ``regex`` module instead of the stdlib ``re``.
  37. cache
  38. Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now.
  39. - When ``False``, does nothing (default)
  40. - When ``True``, caches to a temporary file in the local directory
  41. - When given a string, caches to the path pointed by the string
  42. g_regex_flags
  43. Flags that are applied to all terminals (both regex and strings)
  44. keep_all_tokens
  45. Prevent the tree builder from automagically removing "punctuation" tokens (default: False)
  46. **=== Algorithm Options ===**
  47. parser
  48. Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley").
  49. (there is also a "cyk" option for legacy)
  50. lexer
  51. Decides whether or not to use a lexer stage
  52. - "auto" (default): Choose for me based on the parser
  53. - "standard": Use a standard lexer
  54. - "contextual": Stronger lexer (only works with parser="lalr")
  55. - "dynamic": Flexible and powerful (only with parser="earley")
  56. - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible.
  57. ambiguity
  58. Decides how to handle ambiguity in the parse. Only relevant if parser="earley"
  59. - "resolve" - The parser will automatically choose the simplest derivation
  60. (it chooses consistently: greedy for tokens, non-greedy for rules)
  61. - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest).
  62. **=== Misc. / Domain Specific Options ===**
  63. postlex
  64. Lexer post-processing (Default: None) Only works with the standard and contextual lexers.
  65. priority
  66. How priorities should be evaluated - auto, none, normal, invert (Default: auto)
  67. lexer_callbacks
  68. Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.
  69. use_bytes
  70. Accept an input of type ``bytes`` instead of ``str`` (Python 3 only).
  71. edit_terminals
  72. A callback for editing the terminals before parse.
  73. """
  74. if __doc__:
  75. __doc__ += OPTIONS_DOC
  76. _defaults = {
  77. 'debug': False,
  78. 'keep_all_tokens': False,
  79. 'tree_class': None,
  80. 'cache': False,
  81. 'postlex': None,
  82. 'parser': 'earley',
  83. 'lexer': 'auto',
  84. 'transformer': None,
  85. 'start': 'start',
  86. 'priority': 'auto',
  87. 'ambiguity': 'auto',
  88. 'regex': False,
  89. 'propagate_positions': False,
  90. 'lexer_callbacks': {},
  91. 'maybe_placeholders': False,
  92. 'edit_terminals': None,
  93. 'g_regex_flags': 0,
  94. 'use_bytes': False,
  95. }
  96. def __init__(self, options_dict):
  97. o = dict(options_dict)
  98. options = {}
  99. for name, default in self._defaults.items():
  100. if name in o:
  101. value = o.pop(name)
  102. if isinstance(default, bool) and name not in ('cache', 'use_bytes'):
  103. value = bool(value)
  104. else:
  105. value = default
  106. options[name] = value
  107. if isinstance(options['start'], STRING_TYPE):
  108. options['start'] = [options['start']]
  109. self.__dict__['options'] = options
  110. assert self.parser in ('earley', 'lalr', 'cyk', None)
  111. if self.parser == 'earley' and self.transformer:
  112. raise ValueError('Cannot specify an embedded transformer when using the Earley algorithm.'
  113. 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)')
  114. if o:
  115. raise ValueError("Unknown options: %s" % o.keys())
  116. def __getattr__(self, name):
  117. try:
  118. return self.options[name]
  119. except KeyError as e:
  120. raise AttributeError(e)
  121. def __setattr__(self, name, value):
  122. assert name in self.options
  123. self.options[name] = value
  124. def serialize(self, memo):
  125. return self.options
  126. @classmethod
  127. def deserialize(cls, data, memo):
  128. return cls(data)
  129. class Lark(Serialize):
  130. """Main interface for the library.
  131. It's mostly a thin wrapper for the many different parsers, and for the tree constructor.
  132. Parameters:
  133. grammar: a string or file-object containing the grammar spec (using Lark's ebnf syntax)
  134. options: a dictionary controlling various aspects of Lark.
  135. Example:
  136. >>> Lark(r'''start: "foo" ''')
  137. Lark(...)
  138. """
  139. def __init__(self, grammar, **options):
  140. self.options = LarkOptions(options)
  141. # Set regex or re module
  142. use_regex = self.options.regex
  143. if use_regex:
  144. if regex:
  145. re_module = regex
  146. else:
  147. raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.')
  148. else:
  149. re_module = re
  150. # Some, but not all file-like objects have a 'name' attribute
  151. try:
  152. self.source = grammar.name
  153. except AttributeError:
  154. self.source = '<string>'
  155. # Drain file-like objects to get their contents
  156. try:
  157. read = grammar.read
  158. except AttributeError:
  159. pass
  160. else:
  161. grammar = read()
  162. assert isinstance(grammar, STRING_TYPE)
  163. self.grammar_source = grammar
  164. if self.options.use_bytes:
  165. if not isascii(grammar):
  166. raise ValueError("Grammar must be ascii only, when use_bytes=True")
  167. if sys.version_info[0] == 2 and self.options.use_bytes != 'force':
  168. raise NotImplementedError("`use_bytes=True` may have issues on python2."
  169. "Use `use_bytes='force'` to use it at your own risk.")
  170. cache_fn = None
  171. if self.options.cache:
  172. if self.options.parser != 'lalr':
  173. raise NotImplementedError("cache only works with parser='lalr' for now")
  174. if isinstance(self.options.cache, STRING_TYPE):
  175. cache_fn = self.options.cache
  176. else:
  177. if self.options.cache is not True:
  178. raise ValueError("cache argument must be bool or str")
  179. unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals')
  180. from . import __version__
  181. options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable)
  182. s = grammar + options_str + __version__
  183. md5 = hashlib.md5(s.encode()).hexdigest()
  184. cache_fn = '.lark_cache_%s.tmp' % md5
  185. if FS.exists(cache_fn):
  186. logger.debug('Loading grammar from cache: %s', cache_fn)
  187. with FS.open(cache_fn, 'rb') as f:
  188. self._load(f, self.options.transformer, self.options.postlex)
  189. return
  190. if self.options.lexer == 'auto':
  191. if self.options.parser == 'lalr':
  192. self.options.lexer = 'contextual'
  193. elif self.options.parser == 'earley':
  194. self.options.lexer = 'dynamic'
  195. elif self.options.parser == 'cyk':
  196. self.options.lexer = 'standard'
  197. else:
  198. assert False, self.options.parser
  199. lexer = self.options.lexer
  200. assert lexer in ('standard', 'contextual', 'dynamic', 'dynamic_complete') or issubclass(lexer, Lexer)
  201. if self.options.ambiguity == 'auto':
  202. if self.options.parser == 'earley':
  203. self.options.ambiguity = 'resolve'
  204. else:
  205. disambig_parsers = ['earley', 'cyk']
  206. assert self.options.parser in disambig_parsers, (
  207. 'Only %s supports disambiguation right now') % ', '.join(disambig_parsers)
  208. if self.options.priority == 'auto':
  209. if self.options.parser in ('earley', 'cyk', ):
  210. self.options.priority = 'normal'
  211. elif self.options.parser in ('lalr', ):
  212. self.options.priority = None
  213. elif self.options.priority in ('invert', 'normal'):
  214. assert self.options.parser in ('earley', 'cyk'), "priorities are not supported for LALR at this time"
  215. assert self.options.priority in ('auto', None, 'normal', 'invert'), 'invalid priority option specified: {}. options are auto, none, normal, invert.'.format(self.options.priority)
  216. assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"'
  217. assert self.options.ambiguity in ('resolve', 'explicit', 'auto', )
  218. # Parse the grammar file and compose the grammars (TODO)
  219. self.grammar = load_grammar(grammar, self.source, re_module)
  220. # Compile the EBNF grammar into BNF
  221. self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start)
  222. if self.options.edit_terminals:
  223. for t in self.terminals:
  224. self.options.edit_terminals(t)
  225. self._terminals_dict = {t.name: t for t in self.terminals}
  226. # If the user asked to invert the priorities, negate them all here.
  227. # This replaces the old 'resolve__antiscore_sum' option.
  228. if self.options.priority == 'invert':
  229. for rule in self.rules:
  230. if rule.options.priority is not None:
  231. rule.options.priority = -rule.options.priority
  232. # Else, if the user asked to disable priorities, strip them from the
  233. # rules. This allows the Earley parsers to skip an extra forest walk
  234. # for improved performance, if you don't need them (or didn't specify any).
  235. elif self.options.priority == None:
  236. for rule in self.rules:
  237. if rule.options.priority is not None:
  238. rule.options.priority = None
  239. # TODO Deprecate lexer_callbacks?
  240. lexer_callbacks = (_get_lexer_callbacks(self.options.transformer, self.terminals)
  241. if self.options.transformer
  242. else {})
  243. lexer_callbacks.update(self.options.lexer_callbacks)
  244. self.lexer_conf = LexerConf(self.terminals, re_module, self.ignore_tokens, self.options.postlex, lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes)
  245. if self.options.parser:
  246. self.parser = self._build_parser()
  247. elif lexer:
  248. self.lexer = self._build_lexer()
  249. if cache_fn:
  250. logger.debug('Saving grammar to cache: %s', cache_fn)
  251. with FS.open(cache_fn, 'wb') as f:
  252. self.save(f)
  253. # TODO: merge with above
  254. __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC
  255. __serialize_fields__ = 'parser', 'rules', 'options'
  256. def _build_lexer(self):
  257. return TraditionalLexer(self.lexer_conf)
  258. def _prepare_callbacks(self):
  259. self.parser_class = get_frontend(self.options.parser, self.options.lexer)
  260. self._parse_tree_builder = ParseTreeBuilder(self.rules, self.options.tree_class or Tree, self.options.propagate_positions, self.options.keep_all_tokens, self.options.parser!='lalr' and self.options.ambiguity=='explicit', self.options.maybe_placeholders)
  261. self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)
  262. def _build_parser(self):
  263. self._prepare_callbacks()
  264. parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
  265. return self.parser_class(self.lexer_conf, parser_conf, options=self.options)
  266. def save(self, f):
  267. """Saves the instance into the given file object
  268. Useful for caching and multiprocessing.
  269. """
  270. data, m = self.memo_serialize([TerminalDef, Rule])
  271. pickle.dump({'data': data, 'memo': m}, f)
  272. @classmethod
  273. def load(cls, f):
  274. """Loads an instance from the given file object
  275. Useful for caching and multiprocessing.
  276. """
  277. inst = cls.__new__(cls)
  278. return inst._load(f)
  279. def _load(self, f, transformer=None, postlex=None):
  280. if isinstance(f, dict):
  281. d = f
  282. else:
  283. d = pickle.load(f)
  284. memo = d['memo']
  285. data = d['data']
  286. assert memo
  287. memo = SerializeMemoizer.deserialize(memo, {'Rule': Rule, 'TerminalDef': TerminalDef}, {})
  288. options = dict(data['options'])
  289. if transformer is not None:
  290. options['transformer'] = transformer
  291. if postlex is not None:
  292. options['postlex'] = postlex
  293. self.options = LarkOptions.deserialize(options, memo)
  294. re_module = regex if self.options.regex else re
  295. self.rules = [Rule.deserialize(r, memo) for r in data['rules']]
  296. self.source = '<deserialized>'
  297. self._prepare_callbacks()
  298. self.parser = self.parser_class.deserialize(
  299. data['parser'],
  300. memo,
  301. self._callbacks,
  302. self.options.postlex,
  303. self.options.transformer,
  304. re_module
  305. )
  306. return self
  307. @classmethod
  308. def _load_from_dict(cls, data, memo, transformer=None, postlex=None):
  309. inst = cls.__new__(cls)
  310. return inst._load({'data': data, 'memo': memo}, transformer, postlex)
  311. @classmethod
  312. def open(cls, grammar_filename, rel_to=None, **options):
  313. """Create an instance of Lark with the grammar given by its filename
  314. If ``rel_to`` is provided, the function will find the grammar filename in relation to it.
  315. Example:
  316. >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
  317. Lark(...)
  318. """
  319. if rel_to:
  320. basepath = os.path.dirname(rel_to)
  321. grammar_filename = os.path.join(basepath, grammar_filename)
  322. with open(grammar_filename, encoding='utf8') as f:
  323. return cls(f, **options)
  324. def __repr__(self):
  325. return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source, self.options.parser, self.options.lexer)
  326. def lex(self, text):
  327. "Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'"
  328. if not hasattr(self, 'lexer'):
  329. self.lexer = self._build_lexer()
  330. stream = self.lexer.lex(text)
  331. if self.options.postlex:
  332. return self.options.postlex.process(stream)
  333. return stream
  334. def get_terminal(self, name):
  335. "Get information about a terminal"
  336. return self._terminals_dict[name]
  337. def parse(self, text, start=None, on_error=None):
  338. """Parse the given text, according to the options provided.
  339. Parameters:
  340. text (str): Text to be parsed.
  341. start (str, optional): Required if Lark was given multiple possible start symbols (using the start option).
  342. on_error (function, optional): if provided, will be called on UnexpectedToken error. Return true to resume parsing.
  343. LALR only. See examples/error_puppet.py for an example of how to use on_error.
  344. Returns:
  345. If a transformer is supplied to ``__init__``, returns whatever is the
  346. result of the transformation. Otherwise, returns a Tree instance.
  347. """
  348. try:
  349. return self.parser.parse(text, start=start)
  350. except UnexpectedToken as e:
  351. if on_error is None:
  352. raise
  353. while True:
  354. if not on_error(e):
  355. raise e
  356. try:
  357. return e.puppet.resume_parse()
  358. except UnexpectedToken as e2:
  359. if e.token.type == e2.token.type == '$END' and e.puppet == e2.puppet:
  360. # Prevent infinite loop
  361. raise e2
  362. e = e2
  363. ###}