This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

746 lines
26 KiB

  1. "Parses and creates Grammar objects"
  2. import os.path
  3. import sys
  4. from itertools import chain
  5. import re
  6. from ast import literal_eval
  7. from copy import deepcopy
  8. from .lexer import Token
  9. from .parse_tree_builder import ParseTreeBuilder
  10. from .parser_frontends import LALR
  11. from .common import LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  12. from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol
  13. from .utils import classify, suppress
  14. from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken
  15. from .tree import Tree, SlottedTree as ST
  16. from .visitors import Transformer, Visitor, v_args
  17. inline_args = v_args(inline=True)
  18. __path__ = os.path.dirname(__file__)
  19. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  20. _RE_FLAGS = 'imslux'
  21. def is_terminal(sym):
  22. return sym.isupper()
  23. _TERMINAL_NAMES = {
  24. '.' : 'DOT',
  25. ',' : 'COMMA',
  26. ':' : 'COLON',
  27. ';' : 'SEMICOLON',
  28. '+' : 'PLUS',
  29. '-' : 'MINUS',
  30. '*' : 'STAR',
  31. '/' : 'SLASH',
  32. '\\' : 'BACKSLASH',
  33. '|' : 'VBAR',
  34. '?' : 'QMARK',
  35. '!' : 'BANG',
  36. '@' : 'AT',
  37. '#' : 'HASH',
  38. '$' : 'DOLLAR',
  39. '%' : 'PERCENT',
  40. '^' : 'CIRCUMFLEX',
  41. '&' : 'AMPERSAND',
  42. '_' : 'UNDERSCORE',
  43. '<' : 'LESSTHAN',
  44. '>' : 'MORETHAN',
  45. '=' : 'EQUAL',
  46. '"' : 'DBLQUOTE',
  47. '\'' : 'QUOTE',
  48. '`' : 'BACKQUOTE',
  49. '~' : 'TILDE',
  50. '(' : 'LPAR',
  51. ')' : 'RPAR',
  52. '{' : 'LBRACE',
  53. '}' : 'RBRACE',
  54. '[' : 'LSQB',
  55. ']' : 'RSQB',
  56. '\n' : 'NEWLINE',
  57. '\r\n' : 'CRLF',
  58. '\t' : 'TAB',
  59. ' ' : 'SPACE',
  60. }
  61. # Grammar Parser
  62. TERMINALS = {
  63. '_LPAR': r'\(',
  64. '_RPAR': r'\)',
  65. '_LBRA': r'\[',
  66. '_RBRA': r'\]',
  67. 'OP': '[+*][?]?|[?](?![a-z])',
  68. '_COLON': ':',
  69. '_COMMA': ',',
  70. '_OR': r'\|',
  71. '_DOT': r'\.',
  72. 'TILDE': '~',
  73. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  74. 'TERMINAL': '_?[A-Z][_A-Z0-9]*',
  75. 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
  76. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS,
  77. '_NL': r'(\r?\n)+\s*',
  78. 'WS': r'[ \t]+',
  79. 'COMMENT': r'//[^\n]*',
  80. '_TO': '->',
  81. '_IGNORE': r'%ignore',
  82. '_DECLARE': r'%declare',
  83. '_IMPORT': r'%import',
  84. '_FROM': r'%from',
  85. 'NUMBER': r'\d+',
  86. }
  87. RULES = {
  88. 'start': ['_list'],
  89. '_list': ['_item', '_list _item'],
  90. '_item': ['rule', 'token', 'statement', '_NL'],
  91. 'rule': ['RULE _COLON expansions _NL',
  92. 'RULE _DOT NUMBER _COLON expansions _NL'],
  93. 'expansions': ['alias',
  94. 'expansions _OR alias',
  95. 'expansions _NL _OR alias'],
  96. '?alias': ['expansion _TO RULE', 'expansion'],
  97. 'expansion': ['_expansion'],
  98. '_expansion': ['', '_expansion expr'],
  99. '?expr': ['atom',
  100. 'atom OP',
  101. 'atom TILDE NUMBER',
  102. 'atom TILDE NUMBER _DOT _DOT NUMBER',
  103. ],
  104. '?atom': ['_LPAR expansions _RPAR',
  105. 'maybe',
  106. 'value'],
  107. 'value': ['terminal',
  108. 'nonterminal',
  109. 'literal',
  110. 'range'],
  111. 'terminal': ['TERMINAL'],
  112. 'nonterminal': ['RULE'],
  113. '?name': ['RULE', 'TERMINAL'],
  114. 'maybe': ['_LBRA expansions _RBRA'],
  115. 'range': ['STRING _DOT _DOT STRING'],
  116. 'token': ['TERMINAL _COLON expansions _NL',
  117. 'TERMINAL _DOT NUMBER _COLON expansions _NL'],
  118. 'statement': ['ignore', 'import', 'rel_import', 'from', 'rel_from', 'declare'],
  119. 'ignore': ['_IGNORE expansions _NL'],
  120. 'declare': ['_DECLARE _declare_args _NL'],
  121. 'from': ['_FROM import_args _IMPORT list_name _NL'],
  122. 'rel_from': ['_FROM _DOT import_args _IMPORT list_name _NL'],
  123. 'import': ['_IMPORT import_args _NL',
  124. '_IMPORT import_args _TO TERMINAL _NL'],
  125. 'rel_import': ['_IMPORT _DOT import_args _NL',
  126. '_IMPORT _DOT import_args _TO TERMINAL _NL'],
  127. 'import_args': ['_import_args'],
  128. 'list_name': ['_list_name'],
  129. '_import_args': ['name', '_import_args _DOT name'],
  130. '_list_name': ['name', '_list_name _COMMA name'],
  131. '_declare_args': ['name', '_declare_args name'],
  132. 'literal': ['REGEXP', 'STRING'],
  133. }
  134. @inline_args
  135. class EBNF_to_BNF(Transformer):
  136. def __init__(self):
  137. self.new_rules = []
  138. self.rules_by_expr = {}
  139. self.prefix = 'anon'
  140. self.i = 0
  141. self.rule_options = None
  142. def _add_recurse_rule(self, type_, expr):
  143. if expr in self.rules_by_expr:
  144. return self.rules_by_expr[expr]
  145. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  146. self.i += 1
  147. t = NonTerminal(Token('RULE', new_name, -1))
  148. tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])])
  149. self.new_rules.append((new_name, tree, self.rule_options))
  150. self.rules_by_expr[expr] = t
  151. return t
  152. def expr(self, rule, op, *args):
  153. if op.value == '?':
  154. return ST('expansions', [rule, ST('expansion', [])])
  155. elif op.value == '+':
  156. # a : b c+ d
  157. # -->
  158. # a : b _c d
  159. # _c : _c c | c;
  160. return self._add_recurse_rule('plus', rule)
  161. elif op.value == '*':
  162. # a : b c* d
  163. # -->
  164. # a : b _c? d
  165. # _c : _c c | c;
  166. new_name = self._add_recurse_rule('star', rule)
  167. return ST('expansions', [new_name, ST('expansion', [])])
  168. elif op.value == '~':
  169. if len(args) == 1:
  170. mn = mx = int(args[0])
  171. else:
  172. mn, mx = map(int, args)
  173. if mx < mn:
  174. raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx))
  175. return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)])
  176. assert False, op
  177. class SimplifyRule_Visitor(Visitor):
  178. @staticmethod
  179. def _flatten(tree):
  180. while True:
  181. to_expand = [i for i, child in enumerate(tree.children)
  182. if isinstance(child, Tree) and child.data == tree.data]
  183. if not to_expand:
  184. break
  185. tree.expand_kids_by_index(*to_expand)
  186. def expansion(self, tree):
  187. # rules_list unpacking
  188. # a : b (c|d) e
  189. # -->
  190. # a : b c e | b d e
  191. #
  192. # In AST terms:
  193. # expansion(b, expansions(c, d), e)
  194. # -->
  195. # expansions( expansion(b, c, e), expansion(b, d, e) )
  196. self._flatten(tree)
  197. for i, child in enumerate(tree.children):
  198. if isinstance(child, Tree) and child.data == 'expansions':
  199. tree.data = 'expansions'
  200. tree.children = [self.visit(ST('expansion', [option if i==j else other
  201. for j, other in enumerate(tree.children)]))
  202. for option in set(child.children)]
  203. break
  204. def alias(self, tree):
  205. rule, alias_name = tree.children
  206. if rule.data == 'expansions':
  207. aliases = []
  208. for child in tree.children[0].children:
  209. aliases.append(ST('alias', [child, alias_name]))
  210. tree.data = 'expansions'
  211. tree.children = aliases
  212. def expansions(self, tree):
  213. self._flatten(tree)
  214. tree.children = list(set(tree.children))
  215. class RuleTreeToText(Transformer):
  216. def expansions(self, x):
  217. return x
  218. def expansion(self, symbols):
  219. return symbols, None
  220. def alias(self, x):
  221. (expansion, _alias), alias = x
  222. assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed
  223. return expansion, alias.value
  224. @inline_args
  225. class CanonizeTree(Transformer):
  226. def maybe(self, expr):
  227. return ST('expr', [expr, Token('OP', '?', -1)])
  228. def tokenmods(self, *args):
  229. if len(args) == 1:
  230. return list(args)
  231. tokenmods, value = args
  232. return tokenmods + [value]
  233. class PrepareAnonTerminals(Transformer):
  234. "Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
  235. def __init__(self, tokens):
  236. self.tokens = tokens
  237. self.token_set = {td.name for td in self.tokens}
  238. self.token_reverse = {td.pattern: td for td in tokens}
  239. self.i = 0
  240. @inline_args
  241. def pattern(self, p):
  242. value = p.value
  243. if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
  244. raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
  245. token_name = None
  246. if isinstance(p, PatternStr):
  247. try:
  248. # If already defined, use the user-defined token name
  249. token_name = self.token_reverse[p].name
  250. except KeyError:
  251. # Try to assign an indicative anon-token name
  252. try:
  253. token_name = _TERMINAL_NAMES[value]
  254. except KeyError:
  255. if value.isalnum() and value[0].isalpha() and value.upper() not in self.token_set:
  256. with suppress(UnicodeEncodeError):
  257. value.upper().encode('ascii') # Make sure we don't have unicode in our token names
  258. token_name = value.upper()
  259. elif isinstance(p, PatternRE):
  260. if p in self.token_reverse: # Kind of a wierd placement.name
  261. token_name = self.token_reverse[p].name
  262. else:
  263. assert False, p
  264. if token_name is None:
  265. token_name = '__ANON_%d' % self.i
  266. self.i += 1
  267. if token_name not in self.token_set:
  268. assert p not in self.token_reverse
  269. self.token_set.add(token_name)
  270. tokendef = TokenDef(token_name, p)
  271. self.token_reverse[p] = tokendef
  272. self.tokens.append(tokendef)
  273. return Terminal(Token('TERMINAL', token_name, -1), filter_out=isinstance(p, PatternStr))
  274. def _rfind(s, choices):
  275. return max(s.rfind(c) for c in choices)
  276. def _fix_escaping(s):
  277. w = ''
  278. i = iter(s)
  279. for n in i:
  280. w += n
  281. if n == '\\':
  282. n2 = next(i)
  283. if n2 == '\\':
  284. w += '\\\\'
  285. elif n2 not in 'unftr':
  286. w += '\\'
  287. w += n2
  288. w = w.replace('\\"', '"').replace("'", "\\'")
  289. to_eval = "u'''%s'''" % w
  290. try:
  291. s = literal_eval(to_eval)
  292. except SyntaxError as e:
  293. raise ValueError(s, e)
  294. return s
  295. def _literal_to_pattern(literal):
  296. v = literal.value
  297. flag_start = _rfind(v, '/"')+1
  298. assert flag_start > 0
  299. flags = v[flag_start:]
  300. assert all(f in _RE_FLAGS for f in flags), flags
  301. v = v[:flag_start]
  302. assert v[0] == v[-1] and v[0] in '"/'
  303. x = v[1:-1]
  304. s = _fix_escaping(x)
  305. if literal.type == 'STRING':
  306. s = s.replace('\\\\', '\\')
  307. return { 'STRING': PatternStr,
  308. 'REGEXP': PatternRE }[literal.type](s, flags)
  309. @inline_args
  310. class PrepareLiterals(Transformer):
  311. def literal(self, literal):
  312. return ST('pattern', [_literal_to_pattern(literal)])
  313. def range(self, start, end):
  314. assert start.type == end.type == 'STRING'
  315. start = start.value[1:-1]
  316. end = end.value[1:-1]
  317. assert len(start) == len(end) == 1, (start, end, len(start), len(end))
  318. regexp = '[%s-%s]' % (start, end)
  319. return ST('pattern', [PatternRE(regexp)])
  320. class TokenTreeToPattern(Transformer):
  321. def pattern(self, ps):
  322. p ,= ps
  323. return p
  324. def expansion(self, items):
  325. assert items
  326. if len(items) == 1:
  327. return items[0]
  328. if len({i.flags for i in items}) > 1:
  329. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  330. return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags if items else ())
  331. def expansions(self, exps):
  332. if len(exps) == 1:
  333. return exps[0]
  334. if len({i.flags for i in exps}) > 1:
  335. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  336. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags)
  337. def expr(self, args):
  338. inner, op = args[:2]
  339. if op == '~':
  340. if len(args) == 3:
  341. op = "{%d}" % int(args[2])
  342. else:
  343. mn, mx = map(int, args[2:])
  344. if mx < mn:
  345. raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx))
  346. op = "{%d,%d}" % (mn, mx)
  347. else:
  348. assert len(args) == 2
  349. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
  350. def alias(self, t):
  351. raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)")
  352. def value(self, v):
  353. return v[0]
  354. class PrepareSymbols(Transformer):
  355. def value(self, v):
  356. v ,= v
  357. if isinstance(v, Tree):
  358. return v
  359. elif v.type == 'RULE':
  360. return NonTerminal(v.value)
  361. elif v.type == 'TERMINAL':
  362. return Terminal(v.value, filter_out=v.startswith('_'))
  363. assert False
  364. def _choice_of_rules(rules):
  365. return ST('expansions', [ST('expansion', [Token('RULE', name)]) for name in rules])
  366. class Grammar:
  367. def __init__(self, rule_defs, token_defs, ignore):
  368. self.token_defs = token_defs
  369. self.rule_defs = rule_defs
  370. self.ignore = ignore
  371. def compile(self):
  372. token_defs = list(self.token_defs)
  373. rule_defs = self.rule_defs
  374. # =================
  375. # Compile Tokens
  376. # =================
  377. # Convert token-trees to strings/regexps
  378. transformer = PrepareLiterals() * TokenTreeToPattern()
  379. for name, (token_tree, priority) in token_defs:
  380. if token_tree is None: # Terminal added through %declare
  381. continue
  382. expansions = list(token_tree.find_data('expansion'))
  383. if len(expansions) == 1 and not expansions[0].children:
  384. raise GrammarError("Terminals cannot be empty (%s)" % name)
  385. tokens = [TokenDef(name, transformer.transform(token_tree), priority)
  386. for name, (token_tree, priority) in token_defs if token_tree]
  387. # =================
  388. # Compile Rules
  389. # =================
  390. # 1. Pre-process terminals
  391. transformer = PrepareLiterals() * PrepareSymbols() * PrepareAnonTerminals(tokens) # Adds to tokens
  392. # 2. Convert EBNF to BNF (and apply step 1)
  393. ebnf_to_bnf = EBNF_to_BNF()
  394. rules = []
  395. for name, rule_tree, options in rule_defs:
  396. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  397. tree = transformer.transform(rule_tree)
  398. rules.append((name, ebnf_to_bnf.transform(tree), options))
  399. rules += ebnf_to_bnf.new_rules
  400. assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision"
  401. # 3. Compile tree to Rule objects
  402. rule_tree_to_text = RuleTreeToText()
  403. simplify_rule = SimplifyRule_Visitor()
  404. compiled_rules = []
  405. for name, tree, options in rules:
  406. simplify_rule.visit(tree)
  407. expansions = rule_tree_to_text.transform(tree)
  408. for expansion, alias in expansions:
  409. if alias and name.startswith('_'):
  410. raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (name, alias))
  411. assert all(isinstance(x, Symbol) for x in expansion), expansion
  412. rule = Rule(NonTerminal(name), expansion, alias, options)
  413. compiled_rules.append(rule)
  414. return tokens, compiled_rules, self.ignore
  415. _imported_grammars = {}
  416. def import_grammar(grammar_path, base_path=None):
  417. if grammar_path not in _imported_grammars:
  418. if base_path is None:
  419. import_paths = IMPORT_PATHS
  420. else:
  421. import_paths = [base_path] + IMPORT_PATHS
  422. found = False
  423. for import_path in import_paths:
  424. try:
  425. with open(os.path.join(import_path, grammar_path)) as f:
  426. text = f.read()
  427. grammar = load_grammar(text, grammar_path)
  428. _imported_grammars[grammar_path] = grammar
  429. found = True
  430. break
  431. except FileNotFoundError:
  432. pass
  433. if not found:
  434. raise FileNotFoundError(grammar_path)
  435. return _imported_grammars[grammar_path]
  436. def resolve_token_references(token_defs):
  437. # TODO Cycles detection
  438. # TODO Solve with transitive closure (maybe)
  439. token_dict = {k:t for k, (t,_p) in token_defs}
  440. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  441. while True:
  442. changed = False
  443. for name, (token_tree, _p) in token_defs:
  444. if token_tree is None: # Terminal added through %declare
  445. continue
  446. for exp in token_tree.find_data('value'):
  447. item ,= exp.children
  448. if isinstance(item, Token):
  449. if item.type == 'RULE':
  450. raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name))
  451. if item.type == 'TERMINAL':
  452. exp.children[0] = token_dict[item]
  453. changed = True
  454. if not changed:
  455. break
  456. def options_from_rule(name, *x):
  457. if len(x) > 1:
  458. priority, expansions = x
  459. priority = int(priority)
  460. else:
  461. expansions ,= x
  462. priority = None
  463. keep_all_tokens = name.startswith('!')
  464. name = name.lstrip('!')
  465. expand1 = name.startswith('?')
  466. name = name.lstrip('?')
  467. return name, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority)
  468. def symbols_from_strcase(expansion):
  469. return [Terminal(x, filter_out=x.startswith('_')) if is_terminal(x) else NonTerminal(x) for x in expansion]
  470. @inline_args
  471. class PrepareGrammar(Transformer):
  472. def terminal(self, name):
  473. return name
  474. def nonterminal(self, name):
  475. return name
  476. class GrammarLoader:
  477. def __init__(self):
  478. tokens = [TokenDef(name, PatternRE(value)) for name, value in TERMINALS.items()]
  479. rules = [options_from_rule(name, x) for name, x in RULES.items()]
  480. rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), None, o) for r, xs, o in rules for x in xs]
  481. callback = ParseTreeBuilder(rules, ST).create_callback()
  482. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
  483. parser_conf = ParserConf(rules, callback, 'start')
  484. self.parser = LALR(lexer_conf, parser_conf)
  485. self.canonize_tree = CanonizeTree()
  486. def load_grammar(self, grammar_text, grammar_name='<?>'):
  487. "Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
  488. try:
  489. tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
  490. except UnexpectedCharacters as e:
  491. raise GrammarError("Unexpected input %r at line %d column %d in %s" %
  492. (e.context, e.line, e.column, grammar_name))
  493. except UnexpectedToken as e:
  494. context = e.get_context(grammar_text)
  495. error = e.match_examples(self.parser.parse, {
  496. 'Unclosed parenthesis': ['a: (\n'],
  497. 'Umatched closing parenthesis': ['a: )\n', 'a: [)\n', 'a: (]\n'],
  498. 'Expecting rule or token definition (missing colon)': ['a\n', 'a->\n', 'A->\n', 'a A\n'],
  499. 'Alias expects lowercase name': ['a: -> "a"\n'],
  500. 'Unexpected colon': ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n'],
  501. 'Misplaced operator': ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n'],
  502. 'Expecting option ("|") or a new rule or token definition': ['a:a\n()\n'],
  503. '%import expects a name': ['%import "a"\n'],
  504. '%ignore expects a value': ['%ignore %import\n'],
  505. })
  506. if error:
  507. raise GrammarError("%s at line %s column %s\n\n%s" % (error, e.line, e.column, context))
  508. elif 'STRING' in e.expected:
  509. raise GrammarError("Expecting a value at line %s column %s\n\n%s" % (e.line, e.column, context))
  510. raise
  511. tree = PrepareGrammar().transform(tree)
  512. # Extract grammar items
  513. defs = classify(tree.children, lambda c: c.data, lambda c: c.children)
  514. token_defs = defs.pop('token', [])
  515. rule_defs = defs.pop('rule', [])
  516. statements = defs.pop('statement', [])
  517. assert not defs
  518. token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
  519. token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
  520. # Execute statements
  521. ignore = []
  522. declared = []
  523. for (stmt,) in statements:
  524. if stmt.data == 'ignore':
  525. t ,= stmt.children
  526. ignore.append(t)
  527. elif stmt.data in ['import', 'rel_import']:
  528. dotted_path = stmt.children[0].children
  529. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  530. grammar_path = os.path.join(*dotted_path[:-1]) + '.lark'
  531. if stmt.data == 'import':
  532. g = import_grammar(grammar_path)
  533. else:
  534. if grammar_name == '<string>':
  535. base_file = os.path.abspath(sys.modules['__main__'].__file__)
  536. else:
  537. base_file = grammar_name
  538. base_path = os.path.split(base_file)[0]
  539. g = import_grammar(grammar_path, base_path=base_path)
  540. token_options = dict(g.token_defs)[dotted_path[-1]]
  541. assert isinstance(token_options, tuple) and len(token_options)==2
  542. token_defs.append([name.value, token_options])
  543. elif stmt.data in ['from', 'rel_from']:
  544. dotted_path = stmt.children[0].children
  545. names = stmt.children[1].children
  546. grammar_path = os.path.join(*dotted_path) + '.lark'
  547. if stmt.data == 'from':
  548. g = import_grammar(grammar_path)
  549. else:
  550. if grammar_name == '<string>':
  551. base_file = os.path.abspath(sys.modules['__main__'].__file__)
  552. else:
  553. base_file = grammar_name
  554. base_path = os.path.split(base_file)[0]
  555. g = import_grammar(grammar_path, base_path=base_path)
  556. for name in names:
  557. token_options = dict(g.token_defs)[name]
  558. assert isinstance(token_options, tuple) and len(token_options) == 2
  559. token_defs.append([name.value, token_options])
  560. elif stmt.data == 'declare':
  561. for t in stmt.children:
  562. token_defs.append([t.value, (None, None)])
  563. else:
  564. assert False, stmt
  565. # Verify correctness 1
  566. for name, _ in token_defs:
  567. if name.startswith('__'):
  568. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  569. # Handle ignore tokens
  570. # XXX A slightly hacky solution. Recognition of %ignore TERMINAL as separate comes from the lexer's
  571. # inability to handle duplicate tokens (two names, one value)
  572. ignore_names = []
  573. for t in ignore:
  574. if t.data=='expansions' and len(t.children) == 1:
  575. t2 ,= t.children
  576. if t2.data=='expansion' and len(t2.children) == 1:
  577. item ,= t2.children
  578. if item.data == 'value':
  579. item ,= item.children
  580. if isinstance(item, Token) and item.type == 'TERMINAL':
  581. ignore_names.append(item.value)
  582. continue
  583. name = '__IGNORE_%d'% len(ignore_names)
  584. ignore_names.append(name)
  585. token_defs.append((name, (t, 0)))
  586. # Verify correctness 2
  587. token_names = set()
  588. for name, _ in token_defs:
  589. if name in token_names:
  590. raise GrammarError("Token '%s' defined more than once" % name)
  591. token_names.add(name)
  592. if set(ignore_names) > token_names:
  593. raise GrammarError("Tokens %s were marked to ignore but were not defined!" % (set(ignore_names) - token_names))
  594. # Resolve token references
  595. resolve_token_references(token_defs)
  596. rules = [options_from_rule(*x) for x in rule_defs]
  597. rule_names = set()
  598. for name, _x, _o in rules:
  599. if name.startswith('__'):
  600. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  601. if name in rule_names:
  602. raise GrammarError("Rule '%s' defined more than once" % name)
  603. rule_names.add(name)
  604. for name, expansions, _o in rules:
  605. used_symbols = {t for x in expansions.find_data('expansion')
  606. for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))}
  607. for sym in used_symbols:
  608. if is_terminal(sym):
  609. if sym not in token_names:
  610. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  611. else:
  612. if sym not in rule_names:
  613. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  614. # TODO don't include unused tokens, they can only cause trouble!
  615. return Grammar(rules, token_defs, ignore_names)
  616. load_grammar = GrammarLoader().load_grammar