This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

678 rader
23 KiB

  1. "Parses and creates Grammar objects"
  2. import os.path
  3. from itertools import chain
  4. import re
  5. from ast import literal_eval
  6. from copy import deepcopy
  7. from .lexer import Token, UnexpectedInput
  8. from .parse_tree_builder import ParseTreeBuilder
  9. from .parser_frontends import LALR
  10. from .parsers.lalr_parser import UnexpectedToken
  11. from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  12. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  13. __path__ = os.path.dirname(__file__)
  14. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  15. _RE_FLAGS = 'imslux'
  16. _TOKEN_NAMES = {
  17. '.' : 'DOT',
  18. ',' : 'COMMA',
  19. ':' : 'COLON',
  20. ';' : 'SEMICOLON',
  21. '+' : 'PLUS',
  22. '-' : 'MINUS',
  23. '*' : 'STAR',
  24. '/' : 'SLASH',
  25. '\\' : 'BACKSLASH',
  26. '|' : 'VBAR',
  27. '?' : 'QMARK',
  28. '!' : 'BANG',
  29. '@' : 'AT',
  30. '#' : 'HASH',
  31. '$' : 'DOLLAR',
  32. '%' : 'PERCENT',
  33. '^' : 'CIRCUMFLEX',
  34. '&' : 'AMPERSAND',
  35. '_' : 'UNDERSCORE',
  36. '<' : 'LESSTHAN',
  37. '>' : 'MORETHAN',
  38. '=' : 'EQUAL',
  39. '"' : 'DBLQUOTE',
  40. '\'' : 'QUOTE',
  41. '`' : 'BACKQUOTE',
  42. '~' : 'TILDE',
  43. '(' : 'LPAR',
  44. ')' : 'RPAR',
  45. '{' : 'LBRACE',
  46. '}' : 'RBRACE',
  47. '[' : 'LSQB',
  48. ']' : 'RSQB',
  49. '\n' : 'NEWLINE',
  50. '\r\n' : 'CRLF',
  51. '\t' : 'TAB',
  52. ' ' : 'SPACE',
  53. }
  54. # Grammar Parser
  55. TOKENS = {
  56. '_LPAR': r'\(',
  57. '_RPAR': r'\)',
  58. '_LBRA': r'\[',
  59. '_RBRA': r'\]',
  60. 'OP': '[+*][?]?|[?](?![a-z])',
  61. '_COLON': ':',
  62. '_OR': r'\|',
  63. '_DOT': r'\.',
  64. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  65. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  66. 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
  67. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS,
  68. '_NL': r'(\r?\n)+\s*',
  69. 'WS': r'[ \t]+',
  70. 'COMMENT': r'//[^\n]*',
  71. '_TO': '->',
  72. '_IGNORE': r'%ignore',
  73. '_IMPORT': r'%import',
  74. 'NUMBER': r'\d+',
  75. }
  76. RULES = {
  77. 'start': ['_list'],
  78. '_list': ['_item', '_list _item'],
  79. '_item': ['rule', 'token', 'statement', '_NL'],
  80. 'rule': ['RULE _COLON expansions _NL',
  81. 'RULE _DOT NUMBER _COLON expansions _NL'],
  82. 'expansions': ['alias',
  83. 'expansions _OR alias',
  84. 'expansions _NL _OR alias'],
  85. '?alias': ['expansion _TO RULE', 'expansion'],
  86. 'expansion': ['_expansion'],
  87. '_expansion': ['', '_expansion expr'],
  88. '?expr': ['atom',
  89. 'atom OP'],
  90. '?atom': ['_LPAR expansions _RPAR',
  91. 'maybe',
  92. 'name',
  93. 'literal',
  94. 'range'],
  95. '?name': ['RULE', 'TOKEN'],
  96. 'maybe': ['_LBRA expansions _RBRA'],
  97. 'range': ['STRING _DOT _DOT STRING'],
  98. 'token': ['TOKEN _COLON expansions _NL',
  99. 'TOKEN _DOT NUMBER _COLON expansions _NL'],
  100. 'statement': ['ignore', 'import'],
  101. 'ignore': ['_IGNORE expansions _NL'],
  102. 'import': ['_IMPORT import_args _NL',
  103. '_IMPORT import_args _TO TOKEN'],
  104. 'import_args': ['_import_args'],
  105. '_import_args': ['name', '_import_args _DOT name'],
  106. 'literal': ['REGEXP', 'STRING'],
  107. }
  108. class EBNF_to_BNF(InlineTransformer):
  109. def __init__(self):
  110. self.new_rules = {}
  111. self.rules_by_expr = {}
  112. self.prefix = 'anon'
  113. self.i = 0
  114. self.rule_options = None
  115. def _add_recurse_rule(self, type_, expr):
  116. if expr in self.rules_by_expr:
  117. return self.rules_by_expr[expr]
  118. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  119. self.i += 1
  120. t = Token('RULE', new_name, -1)
  121. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options
  122. self.rules_by_expr[expr] = t
  123. return t
  124. def expr(self, rule, op):
  125. if op.value == '?':
  126. return T('expansions', [rule, T('expansion', [])])
  127. elif op.value == '+':
  128. # a : b c+ d
  129. # -->
  130. # a : b _c d
  131. # _c : _c c | c;
  132. return self._add_recurse_rule('plus', rule)
  133. elif op.value == '*':
  134. # a : b c* d
  135. # -->
  136. # a : b _c? d
  137. # _c : _c c | c;
  138. new_name = self._add_recurse_rule('star', rule)
  139. return T('expansions', [new_name, T('expansion', [])])
  140. assert False, op
  141. class SimplifyRule_Visitor(Visitor):
  142. @staticmethod
  143. def _flatten(tree):
  144. while True:
  145. to_expand = [i for i, child in enumerate(tree.children)
  146. if isinstance(child, T) and child.data == tree.data]
  147. if not to_expand:
  148. break
  149. tree.expand_kids_by_index(*to_expand)
  150. def expansion(self, tree):
  151. # rules_list unpacking
  152. # a : b (c|d) e
  153. # -->
  154. # a : b c e | b d e
  155. #
  156. # In AST terms:
  157. # expansion(b, expansions(c, d), e)
  158. # -->
  159. # expansions( expansion(b, c, e), expansion(b, d, e) )
  160. while True:
  161. self._flatten(tree)
  162. for i, child in enumerate(tree.children):
  163. if isinstance(child, T) and child.data == 'expansions':
  164. tree.data = 'expansions'
  165. tree.children = [self.visit(T('expansion', [option if i==j else other
  166. for j, other in enumerate(tree.children)]))
  167. for option in child.children]
  168. break
  169. else:
  170. break
  171. def alias(self, tree):
  172. rule, alias_name = tree.children
  173. if rule.data == 'expansions':
  174. aliases = []
  175. for child in tree.children[0].children:
  176. aliases.append(T('alias', [child, alias_name]))
  177. tree.data = 'expansions'
  178. tree.children = aliases
  179. expansions = _flatten
  180. class RuleTreeToText(Transformer):
  181. def expansions(self, x):
  182. return x
  183. def expansion(self, symbols):
  184. return [sym.value for sym in symbols], None
  185. def alias(self, x):
  186. (expansion, _alias), alias = x
  187. assert _alias is None, (alias, expansion, '-', _alias)
  188. return expansion, alias.value
  189. class CanonizeTree(InlineTransformer):
  190. def maybe(self, expr):
  191. return T('expr', [expr, Token('OP', '?', -1)])
  192. def tokenmods(self, *args):
  193. if len(args) == 1:
  194. return list(args)
  195. tokenmods, value = args
  196. return tokenmods + [value]
  197. class ExtractAnonTokens(InlineTransformer):
  198. "Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
  199. def __init__(self, tokens):
  200. self.tokens = tokens
  201. self.token_set = {td.name for td in self.tokens}
  202. self.token_reverse = {td.pattern: td for td in tokens}
  203. self.i = 0
  204. def pattern(self, p):
  205. value = p.value
  206. if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
  207. raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
  208. if isinstance(p, PatternStr):
  209. try:
  210. # If already defined, use the user-defined token name
  211. token_name = self.token_reverse[p].name
  212. except KeyError:
  213. # Try to assign an indicative anon-token name, otherwise use a numbered name
  214. try:
  215. token_name = _TOKEN_NAMES[value]
  216. except KeyError:
  217. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  218. token_name = '%s%d' % (value.upper(), self.i)
  219. try:
  220. # Make sure we don't have unicode in our token names
  221. token_name.encode('ascii')
  222. except UnicodeEncodeError:
  223. token_name = 'ANONSTR_%d' % self.i
  224. else:
  225. token_name = 'ANONSTR_%d' % self.i
  226. self.i += 1
  227. token_name = '__' + token_name
  228. elif isinstance(p, PatternRE):
  229. if p in self.token_reverse: # Kind of a wierd placement.name
  230. token_name = self.token_reverse[p].name
  231. else:
  232. token_name = 'ANONRE_%d' % self.i
  233. self.i += 1
  234. else:
  235. assert False, p
  236. if token_name not in self.token_set:
  237. assert p not in self.token_reverse
  238. self.token_set.add(token_name)
  239. tokendef = TokenDef(token_name, p)
  240. self.token_reverse[p] = tokendef
  241. self.tokens.append(tokendef)
  242. return Token('TOKEN', token_name, -1)
  243. def _rfind(s, choices):
  244. return max(s.rfind(c) for c in choices)
  245. def _fix_escaping(s):
  246. w = ''
  247. i = iter(s)
  248. for n in i:
  249. w += n
  250. if n == '\\':
  251. n2 = next(i)
  252. if n2 == '\\':
  253. w += '\\\\'
  254. elif n2 not in 'unftr':
  255. w += '\\'
  256. w += n2
  257. w = w.replace('\\"', '"').replace("'", "\\'")
  258. to_eval = "u'''%s'''" % w
  259. try:
  260. s = literal_eval(to_eval)
  261. except SyntaxError as e:
  262. raise ValueError(s, e)
  263. return s
  264. def _literal_to_pattern(literal):
  265. v = literal.value
  266. flag_start = _rfind(v, '/"')+1
  267. assert flag_start > 0
  268. flags = v[flag_start:]
  269. assert all(f in _RE_FLAGS for f in flags), flags
  270. v = v[:flag_start]
  271. assert v[0] == v[-1] and v[0] in '"/'
  272. x = v[1:-1]
  273. s = _fix_escaping(x)
  274. if v[0] == '"':
  275. s = s.replace('\\\\', '\\')
  276. return { 'STRING': PatternStr,
  277. 'REGEXP': PatternRE }[literal.type](s, flags)
  278. class PrepareLiterals(InlineTransformer):
  279. def literal(self, literal):
  280. return T('pattern', [_literal_to_pattern(literal)])
  281. def range(self, start, end):
  282. assert start.type == end.type == 'STRING'
  283. start = start.value[1:-1]
  284. end = end.value[1:-1]
  285. assert len(start) == len(end) == 1
  286. regexp = '[%s-%s]' % (start, end)
  287. return T('pattern', [PatternRE(regexp)])
  288. class SplitLiterals(InlineTransformer):
  289. def pattern(self, p):
  290. if isinstance(p, PatternStr) and len(p.value)>1:
  291. return T('expansion', [T('pattern', [PatternStr(ch, flags=p.flags)]) for ch in p.value])
  292. return T('pattern', [p])
  293. class TokenTreeToPattern(Transformer):
  294. def pattern(self, ps):
  295. p ,= ps
  296. return p
  297. def expansion(self, items):
  298. if len(items) == 1:
  299. return items[0]
  300. if len({i.flags for i in items}) > 1:
  301. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  302. return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags)
  303. def expansions(self, exps):
  304. if len(exps) == 1:
  305. return exps[0]
  306. if len({i.flags for i in exps}) > 1:
  307. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  308. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags)
  309. def expr(self, args):
  310. inner, op = args
  311. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
  312. def _interleave(l, item):
  313. for e in l:
  314. yield e
  315. if isinstance(e, T):
  316. if e.data in ('literal', 'range'):
  317. yield item
  318. elif is_terminal(e):
  319. yield item
  320. def _choice_of_rules(rules):
  321. return T('expansions', [T('expansion', [Token('RULE', name)]) for name in rules])
  322. def dict_update_safe(d1, d2):
  323. for k, v in d2.items():
  324. assert k not in d1
  325. d1[k] = v
  326. class Grammar:
  327. def __init__(self, rule_defs, token_defs, ignore):
  328. self.token_defs = token_defs
  329. self.rule_defs = rule_defs
  330. self.ignore = ignore
  331. def _prepare_scanless_grammar(self, start):
  332. # XXX Pretty hacky! There should be a better way to write this method..
  333. rule_defs = deepcopy(self.rule_defs)
  334. term_defs = self.token_defs
  335. # Implement the "%ignore" feature without a lexer..
  336. terms_to_ignore = {name:'__'+name for name in self.ignore}
  337. if terms_to_ignore:
  338. assert set(terms_to_ignore) <= {name for name, _t in term_defs}
  339. term_defs = [(terms_to_ignore.get(name,name),t) for name,t in term_defs]
  340. expr = Token('RULE', '__ignore')
  341. for r, tree, _o in rule_defs:
  342. for exp in tree.find_data('expansion'):
  343. exp.children = list(_interleave(exp.children, expr))
  344. if r == start:
  345. exp.children = [expr] + exp.children
  346. for exp in tree.find_data('expr'):
  347. exp.children[0] = T('expansion', list(_interleave(exp.children[:1], expr)))
  348. _ignore_tree = T('expr', [_choice_of_rules(terms_to_ignore.values()), Token('OP', '?')])
  349. rule_defs.append(('__ignore', _ignore_tree, None))
  350. # Convert all tokens to rules
  351. new_terminal_names = {name: '__token_'+name for name, _t in term_defs}
  352. for name, tree, options in rule_defs:
  353. for exp in chain( tree.find_data('expansion'), tree.find_data('expr') ):
  354. for i, sym in enumerate(exp.children):
  355. if sym in new_terminal_names:
  356. exp.children[i] = Token(sym.type, new_terminal_names[sym])
  357. for name, (tree, priority) in term_defs: # TODO transfer priority to rule?
  358. if name.startswith('_'):
  359. options = RuleOptions(filter_out=True, priority=-priority)
  360. else:
  361. options = RuleOptions(keep_all_tokens=True, create_token=name, priority=-priority)
  362. name = new_terminal_names[name]
  363. inner_name = name + '_inner'
  364. rule_defs.append((name, _choice_of_rules([inner_name]), None))
  365. rule_defs.append((inner_name, tree, options))
  366. return [], rule_defs
  367. def compile(self, lexer=False, start=None):
  368. if not lexer:
  369. token_defs, rule_defs = self._prepare_scanless_grammar(start)
  370. else:
  371. token_defs = list(self.token_defs)
  372. rule_defs = self.rule_defs
  373. # =================
  374. # Compile Tokens
  375. # =================
  376. # Convert token-trees to strings/regexps
  377. transformer = PrepareLiterals() * TokenTreeToPattern()
  378. tokens = [TokenDef(name, transformer.transform(token_tree), priority)
  379. for name, (token_tree, priority) in token_defs]
  380. # =================
  381. # Compile Rules
  382. # =================
  383. ebnf_to_bnf = EBNF_to_BNF()
  384. simplify_rule = SimplifyRule_Visitor()
  385. transformer = PrepareLiterals()
  386. if not lexer:
  387. transformer *= SplitLiterals()
  388. transformer *= ExtractAnonTokens(tokens) # Adds to tokens
  389. rules = {}
  390. for name, rule_tree, options in rule_defs:
  391. assert name not in rules, name
  392. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  393. tree = transformer.transform(rule_tree)
  394. rules[name] = ebnf_to_bnf.transform(tree), options
  395. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  396. for tree, _o in rules.values():
  397. simplify_rule.visit(tree)
  398. rule_tree_to_text = RuleTreeToText()
  399. rules = {origin: (rule_tree_to_text.transform(tree), options) for origin, (tree, options) in rules.items()}
  400. return tokens, rules, self.ignore
  401. class RuleOptions:
  402. def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False, priority=None):
  403. self.keep_all_tokens = keep_all_tokens
  404. self.expand1 = expand1
  405. self.create_token = create_token # used for scanless postprocessing
  406. self.priority = priority
  407. self.filter_out = filter_out # remove this rule from the tree
  408. # used for "token"-rules in scanless
  409. @classmethod
  410. def from_rule(cls, name, *x):
  411. if len(x) > 1:
  412. priority, expansions = x
  413. priority = int(priority)
  414. else:
  415. expansions ,= x
  416. priority = None
  417. keep_all_tokens = name.startswith('!')
  418. name = name.lstrip('!')
  419. expand1 = name.startswith('?')
  420. name = name.lstrip('?')
  421. return name, expansions, cls(keep_all_tokens, expand1, priority=priority)
  422. _imported_grammars = {}
  423. def import_grammar(grammar_path):
  424. if grammar_path not in _imported_grammars:
  425. for import_path in IMPORT_PATHS:
  426. with open(os.path.join(import_path, grammar_path)) as f:
  427. text = f.read()
  428. grammar = load_grammar(text, grammar_path)
  429. _imported_grammars[grammar_path] = grammar
  430. return _imported_grammars[grammar_path]
  431. def resolve_token_references(token_defs):
  432. # TODO Cycles detection
  433. # TODO Solve with transitive closure (maybe)
  434. token_dict = {k:t for k, (t,_p) in token_defs}
  435. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  436. while True:
  437. changed = False
  438. for name, (token_tree, _p) in token_defs:
  439. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  440. for i, item in enumerate(exp.children):
  441. if isinstance(item, Token):
  442. if item.type == 'RULE':
  443. raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name))
  444. if item.type == 'TOKEN':
  445. exp.children[i] = token_dict[item]
  446. changed = True
  447. if not changed:
  448. break
  449. class GrammarLoader:
  450. def __init__(self):
  451. tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()]
  452. rules = [RuleOptions.from_rule(name, x) for name, x in RULES.items()]
  453. d = {r: ([(x.split(), None) for x in xs], o) for r, xs, o in rules}
  454. rules, callback = ParseTreeBuilder(d, T).apply()
  455. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
  456. parser_conf = ParserConf(rules, callback, 'start')
  457. self.parser = LALR(lexer_conf, parser_conf)
  458. self.canonize_tree = CanonizeTree()
  459. def load_grammar(self, grammar_text, name='<?>'):
  460. "Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
  461. try:
  462. tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
  463. except UnexpectedInput as e:
  464. raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name))
  465. except UnexpectedToken as e:
  466. if e.expected == ['_COLON']:
  467. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  468. elif e.expected == ['RULE']:
  469. raise GrammarError("Missing alias at line %s column %s" % (e.line, e.column))
  470. elif 'STRING' in e.expected:
  471. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  472. elif e.expected == ['_OR']:
  473. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  474. raise
  475. # Extract grammar items
  476. token_defs = [c.children for c in tree.children if c.data=='token']
  477. rule_defs = [c.children for c in tree.children if c.data=='rule']
  478. statements = [c.children for c in tree.children if c.data=='statement']
  479. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  480. token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
  481. token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
  482. # Execute statements
  483. ignore = []
  484. for (stmt,) in statements:
  485. if stmt.data == 'ignore':
  486. t ,= stmt.children
  487. ignore.append(t)
  488. elif stmt.data == 'import':
  489. dotted_path = stmt.children[0].children
  490. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  491. grammar_path = os.path.join(*dotted_path[:-1]) + '.g'
  492. g = import_grammar(grammar_path)
  493. token_options = dict(g.token_defs)[dotted_path[-1]]
  494. assert isinstance(token_options, tuple) and len(token_options)==2
  495. token_defs.append([name.value, token_options])
  496. else:
  497. assert False, stmt
  498. # Verify correctness 1
  499. for name, _ in token_defs:
  500. if name.startswith('__'):
  501. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  502. # Handle ignore tokens
  503. # XXX A slightly hacky solution. Recognition of %ignore TOKEN as separate comes from the lexer's
  504. # inability to handle duplicate tokens (two names, one value)
  505. ignore_names = []
  506. for t in ignore:
  507. if t.data=='expansions' and len(t.children) == 1:
  508. t2 ,= t.children
  509. if t2.data=='expansion' and len(t2.children) == 1:
  510. item ,= t2.children
  511. if isinstance(item, Token) and item.type == 'TOKEN':
  512. ignore_names.append(item.value)
  513. continue
  514. name = '__IGNORE_%d'% len(ignore_names)
  515. ignore_names.append(name)
  516. token_defs.append((name, (t, 0)))
  517. # Verify correctness 2
  518. token_names = set()
  519. for name, _ in token_defs:
  520. if name in token_names:
  521. raise GrammarError("Token '%s' defined more than once" % name)
  522. token_names.add(name)
  523. # Resolve token references
  524. resolve_token_references(token_defs)
  525. rules = [RuleOptions.from_rule(*x) for x in rule_defs]
  526. rule_names = set()
  527. for name, _x, _o in rules:
  528. if name.startswith('__'):
  529. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  530. if name in rule_names:
  531. raise GrammarError("Rule '%s' defined more than once" % name)
  532. rule_names.add(name)
  533. for name, expansions, _o in rules:
  534. used_symbols = {t for x in expansions.find_data('expansion')
  535. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  536. for sym in used_symbols:
  537. if is_terminal(sym):
  538. if sym not in token_names:
  539. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  540. else:
  541. if sym not in rule_names:
  542. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  543. # TODO don't include unused tokens, they can only cause trouble!
  544. return Grammar(rules, token_defs, ignore_names)
  545. load_grammar = GrammarLoader().load_grammar