This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

200 строки
7.6 KiB

  1. from __future__ import absolute_import
  2. import os
  3. import time
  4. from collections import defaultdict
  5. from .utils import STRING_TYPE
  6. from .load_grammar import load_grammar
  7. from .tree import Tree
  8. from .common import LexerConf, ParserConf
  9. from .lexer import Lexer
  10. from .parse_tree_builder import ParseTreeBuilder
  11. from .parser_frontends import get_frontend
  12. class LarkOptions(object):
  13. """Specifies the options for Lark
  14. """
  15. OPTIONS_DOC = """
  16. parser - Decides which parser engine to use, "earley" or "lalr". (Default: "earley")
  17. Note: "lalr" requires a lexer
  18. lexer - Decides whether or not to use a lexer stage
  19. None: Don't use a lexer (scanless, only works with parser="earley")
  20. "standard": Use a standard lexer
  21. "contextual": Stronger lexer (only works with parser="lalr")
  22. "auto" (default): Choose for me based on grammar and parser
  23. ambiguity - Decides how to handle ambiguity in the parse. Only relevant if parser="earley"
  24. "resolve": The parser will automatically choose the simplest derivation
  25. (it chooses consistently: greedy for tokens, non-greedy for rules)
  26. "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest).
  27. transformer - Applies the transformer to every parse tree
  28. debug - Affects verbosity (default: False)
  29. keep_all_tokens - Don't automagically remove "punctuation" tokens (default: False)
  30. cache_grammar - Cache the Lark grammar (Default: False)
  31. postlex - Lexer post-processing (Default: None)
  32. start - The start symbol (Default: start)
  33. profile - Measure run-time usage in Lark. Read results from the profiler proprety (Default: False)
  34. propagate_positions - Experimental. Don't use yet.
  35. """
  36. __doc__ += OPTIONS_DOC
  37. def __init__(self, options_dict):
  38. o = dict(options_dict)
  39. self.debug = bool(o.pop('debug', False))
  40. self.keep_all_tokens = bool(o.pop('keep_all_tokens', False))
  41. self.tree_class = o.pop('tree_class', Tree)
  42. self.cache_grammar = o.pop('cache_grammar', False)
  43. self.postlex = o.pop('postlex', None)
  44. self.parser = o.pop('parser', 'earley')
  45. self.lexer = o.pop('lexer', 'auto')
  46. self.transformer = o.pop('transformer', None)
  47. self.start = o.pop('start', 'start')
  48. self.profile = o.pop('profile', False)
  49. self.ambiguity = o.pop('ambiguity', 'auto')
  50. self.propagate_positions = o.pop('propagate_positions', False)
  51. assert self.parser in ('earley', 'lalr', None)
  52. if self.parser == 'earley' and self.transformer:
  53. raise ValueError('Cannot specify an auto-transformer when using the Earley algorithm.'
  54. 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. lalr)')
  55. if self.keep_all_tokens:
  56. raise NotImplementedError("keep_all_tokens: Not implemented yet!")
  57. if o:
  58. raise ValueError("Unknown options: %s" % o.keys())
  59. class Profiler:
  60. def __init__(self):
  61. self.total_time = defaultdict(float)
  62. self.cur_section = '__init__'
  63. self.last_enter_time = time.time()
  64. def enter_section(self, name):
  65. cur_time = time.time()
  66. self.total_time[self.cur_section] += cur_time - self.last_enter_time
  67. self.last_enter_time = cur_time
  68. self.cur_section = name
  69. def make_wrapper(self, name, f):
  70. def wrapper(*args, **kwargs):
  71. last_section = self.cur_section
  72. self.enter_section(name)
  73. try:
  74. return f(*args, **kwargs)
  75. finally:
  76. self.enter_section(last_section)
  77. return wrapper
  78. class Lark:
  79. def __init__(self, grammar, **options):
  80. """
  81. grammar : a string or file-object containing the grammar spec (using Lark's ebnf syntax)
  82. options : a dictionary controlling various aspects of Lark.
  83. """
  84. self.options = LarkOptions(options)
  85. # Some, but not all file-like objects have a 'name' attribute
  86. try:
  87. source = grammar.name
  88. except AttributeError:
  89. source = '<string>'
  90. cache_file = "larkcache_%s" % str(hash(grammar)%(2**32))
  91. else:
  92. cache_file = "larkcache_%s" % os.path.basename(source)
  93. # Drain file-like objects to get their contents
  94. try:
  95. read = grammar.read
  96. except AttributeError:
  97. pass
  98. else:
  99. grammar = read()
  100. assert isinstance(grammar, STRING_TYPE)
  101. if self.options.cache_grammar or self.options.keep_all_tokens:
  102. raise NotImplementedError("Not available yet")
  103. assert not self.options.profile, "Feature temporarily disabled"
  104. self.profiler = Profiler() if self.options.profile else None
  105. if self.options.lexer == 'auto':
  106. if self.options.parser == 'lalr':
  107. self.options.lexer = 'standard'
  108. elif self.options.parser == 'earley':
  109. self.options.lexer = None
  110. lexer = self.options.lexer
  111. assert lexer in ('standard', 'contextual', None)
  112. if self.options.ambiguity == 'auto':
  113. if self.options.parser == 'earley':
  114. self.options.ambiguity = 'resolve'
  115. else:
  116. assert self.options.parser == 'earley'
  117. assert self.options.ambiguity in ('resolve', 'explicit', 'auto')
  118. self.grammar = load_grammar(grammar, source)
  119. tokens, self.rules, self.grammar_extra = self.grammar.compile(lexer=bool(lexer), start=self.options.start)
  120. self.ignore_tokens = self.grammar.extra['ignore']
  121. self.lexer_conf = LexerConf(tokens, self.ignore_tokens, self.options.postlex)
  122. if self.options.parser:
  123. self.parser = self._build_parser()
  124. elif lexer:
  125. self.lexer = self._build_lexer()
  126. if self.profiler: self.profiler.enter_section('outside_lark')
  127. __init__.__doc__ += "\nOPTIONS:" + LarkOptions.OPTIONS_DOC
  128. def _build_lexer(self):
  129. return Lexer(self.lexer_conf.tokens, ignore=self.lexer_conf.ignore)
  130. def _build_parser(self):
  131. self.parser_class = get_frontend(self.options.parser, self.options.lexer)
  132. self.parse_tree_builder = ParseTreeBuilder(self.options.tree_class, self.options.propagate_positions)
  133. rules, callback = self.parse_tree_builder.create_tree_builder(self.rules, self.options.transformer)
  134. if self.profiler:
  135. for f in dir(callback):
  136. if not (f.startswith('__') and f.endswith('__')):
  137. setattr(callback, f, self.profiler.make_wrapper('transformer', getattr(callback, f)))
  138. parser_conf = ParserConf(rules, callback, self.options.start)
  139. return self.parser_class(self.lexer_conf, parser_conf, options=self.options)
  140. def lex(self, text):
  141. if not hasattr(self, 'lexer'):
  142. self.lexer = self._build_lexer()
  143. stream = self.lexer.lex(text)
  144. if self.options.postlex:
  145. return self.options.postlex.process(stream)
  146. else:
  147. return stream
  148. def parse(self, text):
  149. return self.parser.parse(text)
  150. # if self.profiler:
  151. # self.profiler.enter_section('lex')
  152. # l = list(self.lex(text))
  153. # self.profiler.enter_section('parse')
  154. # try:
  155. # return self.parser.parse(l)
  156. # finally:
  157. # self.profiler.enter_section('outside_lark')
  158. # else:
  159. # l = list(self.lex(text))
  160. # return self.parser.parse(l)