This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

303 lines
12 KiB

  1. from __future__ import absolute_import
  2. import os
  3. import time
  4. from collections import defaultdict
  5. from io import open
  6. from .utils import STRING_TYPE
  7. from .load_grammar import load_grammar
  8. from .tree import Tree
  9. from .common import LexerConf, ParserConf
  10. from .lexer import Lexer, TraditionalLexer
  11. from .parse_tree_builder import ParseTreeBuilder
  12. from .parser_frontends import get_frontend
  13. class LarkOptions(object):
  14. """Specifies the options for Lark
  15. """
  16. OPTIONS_DOC = """
  17. parser - Decides which parser engine to use, "earley" or "lalr". (Default: "earley")
  18. Note: "lalr" requires a lexer
  19. lexer - Decides whether or not to use a lexer stage
  20. "standard": Use a standard lexer
  21. "contextual": Stronger lexer (only works with parser="lalr")
  22. "dynamic": Flexible and powerful (only with parser="earley")
  23. "dynamic_complete": Same as dynamic, but tries *every* variation
  24. of tokenizing possible. (only with parser="earley")
  25. "auto" (default): Choose for me based on grammar and parser
  26. ambiguity - Decides how to handle ambiguity in the parse. Only relevant if parser="earley"
  27. "resolve": The parser will automatically choose the simplest derivation
  28. (it chooses consistently: greedy for tokens, non-greedy for rules)
  29. "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest).
  30. transformer - Applies the transformer to every parse tree
  31. debug - Affects verbosity (default: False)
  32. keep_all_tokens - Don't automagically remove "punctuation" tokens (default: False)
  33. cache_grammar - Cache the Lark grammar (Default: False)
  34. postlex - Lexer post-processing (Default: None) Only works with the standard and contextual lexers.
  35. start - The start symbol (Default: start)
  36. profile - Measure run-time usage in Lark. Read results from the profiler proprety (Default: False)
  37. priority - How priorities should be evaluated - auto, none, normal, invert (Default: auto)
  38. propagate_positions - Propagates [line, column, end_line, end_column] attributes into all tree branches.
  39. lexer_callbacks - Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.
  40. maybe_placeholders - Experimental feature. Instead of omitting optional rules (i.e. rule?), replace them with None
  41. """
  42. if __doc__:
  43. __doc__ += OPTIONS_DOC
  44. _defaults = {
  45. 'debug': False,
  46. 'keep_all_tokens': False,
  47. 'tree_class': None,
  48. 'cache_grammar': False,
  49. 'postlex': None,
  50. 'parser': 'earley',
  51. 'lexer': 'auto',
  52. 'transformer': None,
  53. 'start': 'start',
  54. 'profile': False,
  55. 'priority': 'auto',
  56. 'ambiguity': 'auto',
  57. 'propagate_positions': False,
  58. 'lexer_callbacks': {},
  59. 'maybe_placeholders': False,
  60. }
  61. def __init__(self, options_dict):
  62. o = dict(options_dict)
  63. options = {}
  64. for name, default in self._defaults.items():
  65. if name in o:
  66. value = o.pop(name)
  67. if isinstance(default, bool):
  68. value = bool(value)
  69. else:
  70. value = default
  71. options[name] = value
  72. self.__dict__['options'] = options
  73. assert self.parser in ('earley', 'lalr', 'cyk', None)
  74. if self.parser == 'earley' and self.transformer:
  75. raise ValueError('Cannot specify an embedded transformer when using the Earley algorithm.'
  76. 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)')
  77. if o:
  78. raise ValueError("Unknown options: %s" % o.keys())
  79. def __getattr__(self, name):
  80. return self.options[name]
  81. def __setattr__(self, name, value):
  82. assert name in self.options
  83. self.options[name] = value
  84. def serialize(self):
  85. return self.options
  86. @classmethod
  87. def deserialize(cls, data):
  88. return cls(data)
  89. class Profiler:
  90. def __init__(self):
  91. self.total_time = defaultdict(float)
  92. self.cur_section = '__init__'
  93. self.last_enter_time = time.time()
  94. def enter_section(self, name):
  95. cur_time = time.time()
  96. self.total_time[self.cur_section] += cur_time - self.last_enter_time
  97. self.last_enter_time = cur_time
  98. self.cur_section = name
  99. def make_wrapper(self, name, f):
  100. def wrapper(*args, **kwargs):
  101. last_section = self.cur_section
  102. self.enter_section(name)
  103. try:
  104. return f(*args, **kwargs)
  105. finally:
  106. self.enter_section(last_section)
  107. return wrapper
  108. class Lark:
  109. def __init__(self, grammar, **options):
  110. """
  111. grammar : a string or file-object containing the grammar spec (using Lark's ebnf syntax)
  112. options : a dictionary controlling various aspects of Lark.
  113. """
  114. self.options = LarkOptions(options)
  115. # Some, but not all file-like objects have a 'name' attribute
  116. try:
  117. self.source = grammar.name
  118. except AttributeError:
  119. self.source = '<string>'
  120. # Drain file-like objects to get their contents
  121. try:
  122. read = grammar.read
  123. except AttributeError:
  124. pass
  125. else:
  126. grammar = read()
  127. assert isinstance(grammar, STRING_TYPE)
  128. if self.options.cache_grammar:
  129. raise NotImplementedError("Not available yet")
  130. assert not self.options.profile, "Feature temporarily disabled"
  131. # self.profiler = Profiler() if self.options.profile else None
  132. if self.options.lexer == 'auto':
  133. if self.options.parser == 'lalr':
  134. self.options.lexer = 'contextual'
  135. elif self.options.parser == 'earley':
  136. self.options.lexer = 'dynamic'
  137. elif self.options.parser == 'cyk':
  138. self.options.lexer = 'standard'
  139. else:
  140. assert False, self.options.parser
  141. lexer = self.options.lexer
  142. assert lexer in ('standard', 'contextual', 'dynamic', 'dynamic_complete') or issubclass(lexer, Lexer)
  143. if self.options.ambiguity == 'auto':
  144. if self.options.parser == 'earley':
  145. self.options.ambiguity = 'resolve'
  146. else:
  147. disambig_parsers = ['earley', 'cyk']
  148. assert self.options.parser in disambig_parsers, (
  149. 'Only %s supports disambiguation right now') % ', '.join(disambig_parsers)
  150. if self.options.priority == 'auto':
  151. if self.options.parser in ('earley', 'cyk', ):
  152. self.options.priority = 'normal'
  153. elif self.options.parser in ('lalr', ):
  154. self.options.priority = None
  155. elif self.options.priority in ('invert', 'normal'):
  156. assert self.options.parser in ('earley', 'cyk'), "priorities are not supported for LALR at this time"
  157. assert self.options.priority in ('auto', None, 'normal', 'invert'), 'invalid priority option specified: {}. options are auto, none, normal, invert.'.format(self.options.priority)
  158. assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"'
  159. assert self.options.ambiguity in ('resolve', 'explicit', 'auto', )
  160. # Parse the grammar file and compose the grammars (TODO)
  161. self.grammar = load_grammar(grammar, self.source)
  162. # Compile the EBNF grammar into BNF
  163. self.terminals, self.rules, self.ignore_tokens = self.grammar.compile()
  164. # If the user asked to invert the priorities, negate them all here.
  165. # This replaces the old 'resolve__antiscore_sum' option.
  166. if self.options.priority == 'invert':
  167. for rule in self.rules:
  168. if rule.options and rule.options.priority is not None:
  169. rule.options.priority = -rule.options.priority
  170. # Else, if the user asked to disable priorities, strip them from the
  171. # rules. This allows the Earley parsers to skip an extra forest walk
  172. # for improved performance, if you don't need them (or didn't specify any).
  173. elif self.options.priority == None:
  174. for rule in self.rules:
  175. if rule.options and rule.options.priority is not None:
  176. rule.options.priority = None
  177. self.lexer_conf = LexerConf(self.terminals, self.ignore_tokens, self.options.postlex, self.options.lexer_callbacks)
  178. if self.options.parser:
  179. self.parser = self._build_parser()
  180. elif lexer:
  181. self.lexer = self._build_lexer()
  182. if __init__.__doc__:
  183. __init__.__doc__ += "\nOPTIONS:" + LarkOptions.OPTIONS_DOC
  184. def _build_lexer(self):
  185. return TraditionalLexer(self.lexer_conf.tokens, ignore=self.lexer_conf.ignore, user_callbacks=self.lexer_conf.callbacks)
  186. def _prepare_callbacks(self):
  187. self.parser_class = get_frontend(self.options.parser, self.options.lexer)
  188. self._parse_tree_builder = ParseTreeBuilder(self.rules, self.options.tree_class or Tree, self.options.propagate_positions, self.options.keep_all_tokens, self.options.parser!='lalr' and self.options.ambiguity=='explicit', self.options.maybe_placeholders)
  189. self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)
  190. def _build_parser(self):
  191. self._prepare_callbacks()
  192. parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
  193. return self.parser_class(self.lexer_conf, parser_conf, options=self.options)
  194. def serialize(self):
  195. return {
  196. 'parser': self.parser.serialize(),
  197. 'rules': [r.serialize() for r in self.rules],
  198. 'options': self.options.serialize(),
  199. }
  200. @classmethod
  201. def deserialize(cls, data):
  202. from .grammar import Rule
  203. inst = cls.__new__(cls)
  204. inst.options = LarkOptions.deserialize(data['options'])
  205. inst.rules = [Rule.deserialize(r) for r in data['rules']]
  206. inst._prepare_callbacks()
  207. inst.parser = inst.parser_class.deserialize(data['parser'], inst._callbacks)
  208. return inst
  209. @classmethod
  210. def open(cls, grammar_filename, rel_to=None, **options):
  211. """Create an instance of Lark with the grammar given by its filename
  212. If rel_to is provided, the function will find the grammar filename in relation to it.
  213. Example:
  214. >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
  215. Lark(...)
  216. """
  217. if rel_to:
  218. basepath = os.path.dirname(rel_to)
  219. grammar_filename = os.path.join(basepath, grammar_filename)
  220. with open(grammar_filename, encoding='utf8') as f:
  221. return cls(f, **options)
  222. def __repr__(self):
  223. return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source, self.options.parser, self.options.lexer)
  224. def lex(self, text):
  225. "Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'"
  226. if not hasattr(self, 'lexer'):
  227. self.lexer = self._build_lexer()
  228. stream = self.lexer.lex(text)
  229. if self.options.postlex:
  230. return self.options.postlex.process(stream)
  231. return stream
  232. def parse(self, text):
  233. "Parse the given text, according to the options provided. Returns a tree, unless specified otherwise."
  234. return self.parser.parse(text)
  235. # if self.profiler:
  236. # self.profiler.enter_section('lex')
  237. # l = list(self.lex(text))
  238. # self.profiler.enter_section('parse')
  239. # try:
  240. # return self.parser.parse(l)
  241. # finally:
  242. # self.profiler.enter_section('outside_lark')
  243. # else:
  244. # l = list(self.lex(text))
  245. # return self.parser.parse(l)