This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

268 lines
9.8 KiB

  1. from warnings import warn
  2. from .utils import STRING_TYPE, logger, NO_VALUE
  3. ###{standalone
  4. class LarkError(Exception):
  5. pass
  6. class ConfigurationError(LarkError, ValueError):
  7. pass
  8. def assert_config(value, options, msg='Got %r, expected one of %s'):
  9. if value not in options:
  10. raise ConfigurationError(msg % (value, options))
  11. class GrammarError(LarkError):
  12. pass
  13. class ParseError(LarkError):
  14. pass
  15. class LexError(LarkError):
  16. pass
  17. class UnexpectedInput(LarkError):
  18. """UnexpectedInput Error.
  19. Used as a base class for the following exceptions:
  20. - ``UnexpectedCharacters``: The lexer encountered an unexpected string
  21. - ``UnexpectedToken``: The parser received an unexpected token
  22. - ``UnexpectedEOF``: The parser expected a token, but the input ended
  23. After catching one of these exceptions, you may call the following helper methods to create a nicer error message.
  24. """
  25. pos_in_stream = None
  26. _terminals_by_name = None
  27. def get_context(self, text, span=40):
  28. """Returns a pretty string pinpointing the error in the text,
  29. with span amount of context characters around it.
  30. Note:
  31. The parser doesn't hold a copy of the text it has to parse,
  32. so you have to provide it again
  33. """
  34. assert self.pos_in_stream is not None, self
  35. pos = self.pos_in_stream
  36. start = max(pos - span, 0)
  37. end = pos + span
  38. if not isinstance(text, bytes):
  39. before = text[start:pos].rsplit('\n', 1)[-1]
  40. after = text[pos:end].split('\n', 1)[0]
  41. return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n'
  42. else:
  43. before = text[start:pos].rsplit(b'\n', 1)[-1]
  44. after = text[pos:end].split(b'\n', 1)[0]
  45. return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace")
  46. def match_examples(self, parse_fn, examples, token_type_match_fallback=False, use_accepts=False):
  47. """Allows you to detect what's wrong in the input text by matching
  48. against example errors.
  49. Given a parser instance and a dictionary mapping some label with
  50. some malformed syntax examples, it'll return the label for the
  51. example that bests matches the current error. The function will
  52. iterate the dictionary until it finds a matching error, and
  53. return the corresponding value.
  54. For an example usage, see `examples/error_reporting_lalr.py`
  55. Parameters:
  56. parse_fn: parse function (usually ``lark_instance.parse``)
  57. examples: dictionary of ``{'example_string': value}``.
  58. use_accepts: Recommended to call this with ``use_accepts=True``.
  59. The default is ``False`` for backwards compatibility.
  60. """
  61. assert self.state is not None, "Not supported for this exception"
  62. if isinstance(examples, dict):
  63. examples = examples.items()
  64. candidate = (None, False)
  65. for i, (label, example) in enumerate(examples):
  66. assert not isinstance(example, STRING_TYPE)
  67. for j, malformed in enumerate(example):
  68. try:
  69. parse_fn(malformed)
  70. except UnexpectedInput as ut:
  71. if ut.state == self.state:
  72. if use_accepts and hasattr(self, 'accepts') and ut.accepts != self.accepts:
  73. logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" %
  74. (self.state, self.accepts, ut.accepts, i, j))
  75. continue
  76. try:
  77. if ut.token == self.token: # Try exact match first
  78. logger.debug("Exact Match at example [%s][%s]" % (i, j))
  79. return label
  80. if token_type_match_fallback:
  81. # Fallback to token types match
  82. if (ut.token.type == self.token.type) and not candidate[-1]:
  83. logger.debug("Token Type Fallback at example [%s][%s]" % (i, j))
  84. candidate = label, True
  85. except AttributeError:
  86. pass
  87. if candidate[0] is None:
  88. logger.debug("Same State match at example [%s][%s]" % (i, j))
  89. candidate = label, False
  90. return candidate[0]
  91. def _format_expected(self, expected):
  92. if self._terminals_by_name:
  93. d = self._terminals_by_name
  94. expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected]
  95. return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected)
  96. class UnexpectedEOF(ParseError, UnexpectedInput):
  97. """An exception that is raised by the parser, when the input ends while it still expects a token.
  98. """
  99. def __init__(self, expected, state=None, terminals_by_name=None):
  100. super(UnexpectedEOF, self).__init__()
  101. self.expected = expected
  102. self.state = state
  103. from .lexer import Token
  104. self.token = Token("<EOF>", "") # , line=-1, column=-1, pos_in_stream=-1)
  105. self.pos_in_stream = -1
  106. self.line = -1
  107. self.column = -1
  108. self._terminals_by_name = terminals_by_name
  109. def __str__(self):
  110. message = "Unexpected end-of-input. "
  111. message += self._format_expected(self.expected)
  112. return message
  113. class UnexpectedCharacters(LexError, UnexpectedInput):
  114. """An exception that is raised by the lexer, when it cannot match the next
  115. string of characters to any of its terminals.
  116. """
  117. def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None,
  118. terminals_by_name=None, considered_rules=None):
  119. super(UnexpectedCharacters, self).__init__()
  120. # TODO considered_tokens and allowed can be figured out using state
  121. self.line = line
  122. self.column = column
  123. self.pos_in_stream = lex_pos
  124. self.state = state
  125. self._terminals_by_name = terminals_by_name
  126. self.allowed = allowed
  127. self.considered_tokens = considered_tokens
  128. self.considered_rules = considered_rules
  129. self.token_history = token_history
  130. if isinstance(seq, bytes):
  131. self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace")
  132. else:
  133. self.char = seq[lex_pos]
  134. self._context = self.get_context(seq)
  135. def __str__(self):
  136. message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column)
  137. message += '\n\n' + self._context
  138. if self.allowed:
  139. message += self._format_expected(self.allowed)
  140. if self.token_history:
  141. message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history)
  142. return message
  143. class UnexpectedToken(ParseError, UnexpectedInput):
  144. """An exception that is raised by the parser, when the token it received
  145. doesn't match any valid step forward.
  146. Parameters:
  147. token: The mismatched token
  148. expected: The set of expected tokens
  149. considered_rules: Which rules were considered, to deduce the expected tokens
  150. state: A value representing the parser state. Do not rely on its value or type.
  151. interactive_parser: An instance of ``InteractiveParser``, that is initialized to the point of failture,
  152. and can be used for debugging and error handling.
  153. Note: These parameters are available as attributes of the instance.
  154. """
  155. def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None):
  156. super(UnexpectedToken, self).__init__()
  157. # TODO considered_rules and expected can be figured out using state
  158. self.line = getattr(token, 'line', '?')
  159. self.column = getattr(token, 'column', '?')
  160. self.pos_in_stream = getattr(token, 'pos_in_stream', None)
  161. self.state = state
  162. self.token = token
  163. self.expected = expected # XXX deprecate? `accepts` is better
  164. self._accepts = NO_VALUE
  165. self.considered_rules = considered_rules
  166. self.interactive_parser = interactive_parser
  167. self._terminals_by_name = terminals_by_name
  168. self.token_history = token_history
  169. @property
  170. def accepts(self):
  171. if self._accepts is NO_VALUE:
  172. self._accepts = self.interactive_parser and self.interactive_parser.accepts()
  173. return self._accepts
  174. def __str__(self):
  175. message = ("Unexpected token %r at line %s, column %s.\n%s"
  176. % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected)))
  177. if self.token_history:
  178. message += "Previous tokens: %r\n" % self.token_history
  179. return message
  180. @property
  181. def puppet(self):
  182. warn("UnexpectedToken.puppet attribute has been renamed to interactive_parser", DeprecationWarning)
  183. return self.interactive_parser
  184. class VisitError(LarkError):
  185. """VisitError is raised when visitors are interrupted by an exception
  186. It provides the following attributes for inspection:
  187. Parameters:
  188. rule: the name of the visit rule that failed
  189. obj: the tree-node or token that was being processed
  190. orig_exc: the exception that cause it to fail
  191. Note: These parameters are available as attributes
  192. """
  193. def __init__(self, rule, obj, orig_exc):
  194. message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc)
  195. super(VisitError, self).__init__(message)
  196. self.rule = rule
  197. self.obj = obj
  198. self.orig_exc = orig_exc
  199. ###}