This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

678 рядки
23 KiB

  1. "Parses and creates Grammar objects"
  2. import os.path
  3. from itertools import chain
  4. import re
  5. from ast import literal_eval
  6. from copy import deepcopy
  7. from .lexer import Token, UnexpectedInput
  8. from .parse_tree_builder import ParseTreeBuilder
  9. from .parser_frontends import LALR
  10. from .parsers.lalr_parser import UnexpectedToken
  11. from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  12. from .grammar import RuleOptions, Rule
  13. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  14. __path__ = os.path.dirname(__file__)
  15. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  16. _RE_FLAGS = 'imslux'
  17. _TOKEN_NAMES = {
  18. '.' : 'DOT',
  19. ',' : 'COMMA',
  20. ':' : 'COLON',
  21. ';' : 'SEMICOLON',
  22. '+' : 'PLUS',
  23. '-' : 'MINUS',
  24. '*' : 'STAR',
  25. '/' : 'SLASH',
  26. '\\' : 'BACKSLASH',
  27. '|' : 'VBAR',
  28. '?' : 'QMARK',
  29. '!' : 'BANG',
  30. '@' : 'AT',
  31. '#' : 'HASH',
  32. '$' : 'DOLLAR',
  33. '%' : 'PERCENT',
  34. '^' : 'CIRCUMFLEX',
  35. '&' : 'AMPERSAND',
  36. '_' : 'UNDERSCORE',
  37. '<' : 'LESSTHAN',
  38. '>' : 'MORETHAN',
  39. '=' : 'EQUAL',
  40. '"' : 'DBLQUOTE',
  41. '\'' : 'QUOTE',
  42. '`' : 'BACKQUOTE',
  43. '~' : 'TILDE',
  44. '(' : 'LPAR',
  45. ')' : 'RPAR',
  46. '{' : 'LBRACE',
  47. '}' : 'RBRACE',
  48. '[' : 'LSQB',
  49. ']' : 'RSQB',
  50. '\n' : 'NEWLINE',
  51. '\r\n' : 'CRLF',
  52. '\t' : 'TAB',
  53. ' ' : 'SPACE',
  54. }
  55. # Grammar Parser
  56. TOKENS = {
  57. '_LPAR': r'\(',
  58. '_RPAR': r'\)',
  59. '_LBRA': r'\[',
  60. '_RBRA': r'\]',
  61. 'OP': '[+*][?]?|[?](?![a-z])',
  62. '_COLON': ':',
  63. '_OR': r'\|',
  64. '_DOT': r'\.',
  65. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  66. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  67. 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
  68. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/[%s]*' % _RE_FLAGS,
  69. '_NL': r'(\r?\n)+\s*',
  70. 'WS': r'[ \t]+',
  71. 'COMMENT': r'//[^\n]*',
  72. '_TO': '->',
  73. '_IGNORE': r'%ignore',
  74. '_IMPORT': r'%import',
  75. 'NUMBER': r'\d+',
  76. }
  77. RULES = {
  78. 'start': ['_list'],
  79. '_list': ['_item', '_list _item'],
  80. '_item': ['rule', 'token', 'statement', '_NL'],
  81. 'rule': ['RULE _COLON expansions _NL',
  82. 'RULE _DOT NUMBER _COLON expansions _NL'],
  83. 'expansions': ['alias',
  84. 'expansions _OR alias',
  85. 'expansions _NL _OR alias'],
  86. '?alias': ['expansion _TO RULE', 'expansion'],
  87. 'expansion': ['_expansion'],
  88. '_expansion': ['', '_expansion expr'],
  89. '?expr': ['atom',
  90. 'atom OP'],
  91. '?atom': ['_LPAR expansions _RPAR',
  92. 'maybe',
  93. 'name',
  94. 'literal',
  95. 'range'],
  96. '?name': ['RULE', 'TOKEN'],
  97. 'maybe': ['_LBRA expansions _RBRA'],
  98. 'range': ['STRING _DOT _DOT STRING'],
  99. 'token': ['TOKEN _COLON expansions _NL',
  100. 'TOKEN _DOT NUMBER _COLON expansions _NL'],
  101. 'statement': ['ignore', 'import'],
  102. 'ignore': ['_IGNORE expansions _NL'],
  103. 'import': ['_IMPORT import_args _NL',
  104. '_IMPORT import_args _TO TOKEN'],
  105. 'import_args': ['_import_args'],
  106. '_import_args': ['name', '_import_args _DOT name'],
  107. 'literal': ['REGEXP', 'STRING'],
  108. }
  109. class EBNF_to_BNF(InlineTransformer):
  110. def __init__(self):
  111. self.new_rules = {}
  112. self.rules_by_expr = {}
  113. self.prefix = 'anon'
  114. self.i = 0
  115. self.rule_options = None
  116. def _add_recurse_rule(self, type_, expr):
  117. if expr in self.rules_by_expr:
  118. return self.rules_by_expr[expr]
  119. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  120. self.i += 1
  121. t = Token('RULE', new_name, -1)
  122. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options
  123. self.rules_by_expr[expr] = t
  124. return t
  125. def expr(self, rule, op):
  126. if op.value == '?':
  127. return T('expansions', [rule, T('expansion', [])])
  128. elif op.value == '+':
  129. # a : b c+ d
  130. # -->
  131. # a : b _c d
  132. # _c : _c c | c;
  133. return self._add_recurse_rule('plus', rule)
  134. elif op.value == '*':
  135. # a : b c* d
  136. # -->
  137. # a : b _c? d
  138. # _c : _c c | c;
  139. new_name = self._add_recurse_rule('star', rule)
  140. return T('expansions', [new_name, T('expansion', [])])
  141. assert False, op
  142. class SimplifyRule_Visitor(Visitor):
  143. @staticmethod
  144. def _flatten(tree):
  145. while True:
  146. to_expand = [i for i, child in enumerate(tree.children)
  147. if isinstance(child, T) and child.data == tree.data]
  148. if not to_expand:
  149. break
  150. tree.expand_kids_by_index(*to_expand)
  151. def expansion(self, tree):
  152. # rules_list unpacking
  153. # a : b (c|d) e
  154. # -->
  155. # a : b c e | b d e
  156. #
  157. # In AST terms:
  158. # expansion(b, expansions(c, d), e)
  159. # -->
  160. # expansions( expansion(b, c, e), expansion(b, d, e) )
  161. while True:
  162. self._flatten(tree)
  163. for i, child in enumerate(tree.children):
  164. if isinstance(child, T) and child.data == 'expansions':
  165. tree.data = 'expansions'
  166. tree.children = [self.visit(T('expansion', [option if i==j else other
  167. for j, other in enumerate(tree.children)]))
  168. for option in child.children]
  169. break
  170. else:
  171. break
  172. def alias(self, tree):
  173. rule, alias_name = tree.children
  174. if rule.data == 'expansions':
  175. aliases = []
  176. for child in tree.children[0].children:
  177. aliases.append(T('alias', [child, alias_name]))
  178. tree.data = 'expansions'
  179. tree.children = aliases
  180. expansions = _flatten
  181. class RuleTreeToText(Transformer):
  182. def expansions(self, x):
  183. return x
  184. def expansion(self, symbols):
  185. return [sym.value for sym in symbols], None
  186. def alias(self, x):
  187. (expansion, _alias), alias = x
  188. assert _alias is None, (alias, expansion, '-', _alias)
  189. return expansion, alias.value
  190. class CanonizeTree(InlineTransformer):
  191. def maybe(self, expr):
  192. return T('expr', [expr, Token('OP', '?', -1)])
  193. def tokenmods(self, *args):
  194. if len(args) == 1:
  195. return list(args)
  196. tokenmods, value = args
  197. return tokenmods + [value]
  198. class ExtractAnonTokens(InlineTransformer):
  199. "Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
  200. def __init__(self, tokens):
  201. self.tokens = tokens
  202. self.token_set = {td.name for td in self.tokens}
  203. self.token_reverse = {td.pattern: td for td in tokens}
  204. self.i = 0
  205. def pattern(self, p):
  206. value = p.value
  207. if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
  208. raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
  209. if isinstance(p, PatternStr):
  210. try:
  211. # If already defined, use the user-defined token name
  212. token_name = self.token_reverse[p].name
  213. except KeyError:
  214. # Try to assign an indicative anon-token name, otherwise use a numbered name
  215. try:
  216. token_name = _TOKEN_NAMES[value]
  217. except KeyError:
  218. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  219. token_name = '%s%d' % (value.upper(), self.i)
  220. try:
  221. # Make sure we don't have unicode in our token names
  222. token_name.encode('ascii')
  223. except UnicodeEncodeError:
  224. token_name = 'ANONSTR_%d' % self.i
  225. else:
  226. token_name = 'ANONSTR_%d' % self.i
  227. self.i += 1
  228. token_name = '__' + token_name
  229. elif isinstance(p, PatternRE):
  230. if p in self.token_reverse: # Kind of a wierd placement.name
  231. token_name = self.token_reverse[p].name
  232. else:
  233. token_name = 'ANONRE_%d' % self.i
  234. self.i += 1
  235. else:
  236. assert False, p
  237. if token_name not in self.token_set:
  238. assert p not in self.token_reverse
  239. self.token_set.add(token_name)
  240. tokendef = TokenDef(token_name, p)
  241. self.token_reverse[p] = tokendef
  242. self.tokens.append(tokendef)
  243. return Token('TOKEN', token_name, -1)
  244. def _rfind(s, choices):
  245. return max(s.rfind(c) for c in choices)
  246. def _fix_escaping(s):
  247. w = ''
  248. i = iter(s)
  249. for n in i:
  250. w += n
  251. if n == '\\':
  252. n2 = next(i)
  253. if n2 == '\\':
  254. w += '\\\\'
  255. elif n2 not in 'unftr':
  256. w += '\\'
  257. w += n2
  258. w = w.replace('\\"', '"').replace("'", "\\'")
  259. to_eval = "u'''%s'''" % w
  260. try:
  261. s = literal_eval(to_eval)
  262. except SyntaxError as e:
  263. raise ValueError(s, e)
  264. return s
  265. def _literal_to_pattern(literal):
  266. v = literal.value
  267. flag_start = _rfind(v, '/"')+1
  268. assert flag_start > 0
  269. flags = v[flag_start:]
  270. assert all(f in _RE_FLAGS for f in flags), flags
  271. v = v[:flag_start]
  272. assert v[0] == v[-1] and v[0] in '"/'
  273. x = v[1:-1]
  274. s = _fix_escaping(x)
  275. if v[0] == '"':
  276. s = s.replace('\\\\', '\\')
  277. return { 'STRING': PatternStr,
  278. 'REGEXP': PatternRE }[literal.type](s, flags)
  279. class PrepareLiterals(InlineTransformer):
  280. def literal(self, literal):
  281. return T('pattern', [_literal_to_pattern(literal)])
  282. def range(self, start, end):
  283. assert start.type == end.type == 'STRING'
  284. start = start.value[1:-1]
  285. end = end.value[1:-1]
  286. assert len(start) == len(end) == 1
  287. regexp = '[%s-%s]' % (start, end)
  288. return T('pattern', [PatternRE(regexp)])
  289. class SplitLiterals(InlineTransformer):
  290. def pattern(self, p):
  291. if isinstance(p, PatternStr) and len(p.value)>1:
  292. return T('expansion', [T('pattern', [PatternStr(ch, flags=p.flags)]) for ch in p.value])
  293. return T('pattern', [p])
  294. class TokenTreeToPattern(Transformer):
  295. def pattern(self, ps):
  296. p ,= ps
  297. return p
  298. def expansion(self, items):
  299. if len(items) == 1:
  300. return items[0]
  301. if len({i.flags for i in items}) > 1:
  302. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  303. return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags)
  304. def expansions(self, exps):
  305. if len(exps) == 1:
  306. return exps[0]
  307. if len({i.flags for i in exps}) > 1:
  308. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  309. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)), exps[0].flags)
  310. def expr(self, args):
  311. inner, op = args
  312. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
  313. def _interleave(l, item):
  314. for e in l:
  315. yield e
  316. if isinstance(e, T):
  317. if e.data in ('literal', 'range'):
  318. yield item
  319. elif is_terminal(e):
  320. yield item
  321. def _choice_of_rules(rules):
  322. return T('expansions', [T('expansion', [Token('RULE', name)]) for name in rules])
  323. def dict_update_safe(d1, d2):
  324. for k, v in d2.items():
  325. assert k not in d1
  326. d1[k] = v
  327. class Grammar:
  328. def __init__(self, rule_defs, token_defs, ignore):
  329. self.token_defs = token_defs
  330. self.rule_defs = rule_defs
  331. self.ignore = ignore
  332. def _prepare_scanless_grammar(self, start):
  333. # XXX Pretty hacky! There should be a better way to write this method..
  334. rule_defs = deepcopy(self.rule_defs)
  335. term_defs = self.token_defs
  336. # Implement the "%ignore" feature without a lexer..
  337. terms_to_ignore = {name:'__'+name for name in self.ignore}
  338. if terms_to_ignore:
  339. assert set(terms_to_ignore) <= {name for name, _t in term_defs}
  340. term_defs = [(terms_to_ignore.get(name,name),t) for name,t in term_defs]
  341. expr = Token('RULE', '__ignore')
  342. for r, tree, _o in rule_defs:
  343. for exp in tree.find_data('expansion'):
  344. exp.children = list(_interleave(exp.children, expr))
  345. if r == start:
  346. exp.children = [expr] + exp.children
  347. for exp in tree.find_data('expr'):
  348. exp.children[0] = T('expansion', list(_interleave(exp.children[:1], expr)))
  349. _ignore_tree = T('expr', [_choice_of_rules(terms_to_ignore.values()), Token('OP', '?')])
  350. rule_defs.append(('__ignore', _ignore_tree, None))
  351. # Convert all tokens to rules
  352. new_terminal_names = {name: '__token_'+name for name, _t in term_defs}
  353. for name, tree, options in rule_defs:
  354. for exp in chain( tree.find_data('expansion'), tree.find_data('expr') ):
  355. for i, sym in enumerate(exp.children):
  356. if sym in new_terminal_names:
  357. exp.children[i] = Token(sym.type, new_terminal_names[sym])
  358. for name, (tree, priority) in term_defs: # TODO transfer priority to rule?
  359. if name.startswith('_'):
  360. options = RuleOptions(filter_out=True, priority=-priority)
  361. else:
  362. options = RuleOptions(keep_all_tokens=True, create_token=name, priority=-priority)
  363. name = new_terminal_names[name]
  364. inner_name = name + '_inner'
  365. rule_defs.append((name, _choice_of_rules([inner_name]), None))
  366. rule_defs.append((inner_name, tree, options))
  367. return [], rule_defs
  368. def compile(self, lexer=False, start=None):
  369. if not lexer:
  370. token_defs, rule_defs = self._prepare_scanless_grammar(start)
  371. else:
  372. token_defs = list(self.token_defs)
  373. rule_defs = self.rule_defs
  374. # =================
  375. # Compile Tokens
  376. # =================
  377. # Convert token-trees to strings/regexps
  378. transformer = PrepareLiterals() * TokenTreeToPattern()
  379. tokens = [TokenDef(name, transformer.transform(token_tree), priority)
  380. for name, (token_tree, priority) in token_defs]
  381. # =================
  382. # Compile Rules
  383. # =================
  384. ebnf_to_bnf = EBNF_to_BNF()
  385. simplify_rule = SimplifyRule_Visitor()
  386. transformer = PrepareLiterals()
  387. if not lexer:
  388. transformer *= SplitLiterals()
  389. transformer *= ExtractAnonTokens(tokens) # Adds to tokens
  390. rules = {}
  391. for name, rule_tree, options in rule_defs:
  392. assert name not in rules, name
  393. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  394. tree = transformer.transform(rule_tree)
  395. rules[name] = ebnf_to_bnf.transform(tree), options
  396. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  397. rule_tree_to_text = RuleTreeToText()
  398. new_rules = []
  399. for origin, (tree, options) in rules.items():
  400. simplify_rule.visit(tree)
  401. expansions = rule_tree_to_text.transform(tree)
  402. for expansion, alias in expansions:
  403. if alias and origin.startswith('_'):
  404. raise Exception("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)" % (origin, alias))
  405. rule = Rule(origin, expansion, alias, options)
  406. new_rules.append(rule)
  407. return tokens, new_rules, self.ignore
  408. _imported_grammars = {}
  409. def import_grammar(grammar_path):
  410. if grammar_path not in _imported_grammars:
  411. for import_path in IMPORT_PATHS:
  412. with open(os.path.join(import_path, grammar_path)) as f:
  413. text = f.read()
  414. grammar = load_grammar(text, grammar_path)
  415. _imported_grammars[grammar_path] = grammar
  416. return _imported_grammars[grammar_path]
  417. def resolve_token_references(token_defs):
  418. # TODO Cycles detection
  419. # TODO Solve with transitive closure (maybe)
  420. token_dict = {k:t for k, (t,_p) in token_defs}
  421. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  422. while True:
  423. changed = False
  424. for name, (token_tree, _p) in token_defs:
  425. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  426. for i, item in enumerate(exp.children):
  427. if isinstance(item, Token):
  428. if item.type == 'RULE':
  429. raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name))
  430. if item.type == 'TOKEN':
  431. exp.children[i] = token_dict[item]
  432. changed = True
  433. if not changed:
  434. break
  435. def options_from_rule(name, *x):
  436. if len(x) > 1:
  437. priority, expansions = x
  438. priority = int(priority)
  439. else:
  440. expansions ,= x
  441. priority = None
  442. keep_all_tokens = name.startswith('!')
  443. name = name.lstrip('!')
  444. expand1 = name.startswith('?')
  445. name = name.lstrip('?')
  446. return name, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority)
  447. class GrammarLoader:
  448. def __init__(self):
  449. tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()]
  450. rules = [options_from_rule(name, x) for name, x in RULES.items()]
  451. rules = [Rule(r, x.split(), None, o) for r, xs, o in rules for x in xs]
  452. callback = ParseTreeBuilder(rules, T).apply()
  453. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
  454. parser_conf = ParserConf(rules, callback, 'start')
  455. self.parser = LALR(lexer_conf, parser_conf)
  456. self.canonize_tree = CanonizeTree()
  457. def load_grammar(self, grammar_text, name='<?>'):
  458. "Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
  459. try:
  460. tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
  461. except UnexpectedInput as e:
  462. raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name))
  463. except UnexpectedToken as e:
  464. if e.expected == ['_COLON']:
  465. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  466. elif e.expected == ['RULE']:
  467. raise GrammarError("Missing alias at line %s column %s" % (e.line, e.column))
  468. elif 'STRING' in e.expected:
  469. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  470. elif e.expected == ['_OR']:
  471. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  472. raise
  473. # Extract grammar items
  474. token_defs = [c.children for c in tree.children if c.data=='token']
  475. rule_defs = [c.children for c in tree.children if c.data=='rule']
  476. statements = [c.children for c in tree.children if c.data=='statement']
  477. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  478. token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
  479. token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
  480. # Execute statements
  481. ignore = []
  482. for (stmt,) in statements:
  483. if stmt.data == 'ignore':
  484. t ,= stmt.children
  485. ignore.append(t)
  486. elif stmt.data == 'import':
  487. dotted_path = stmt.children[0].children
  488. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  489. grammar_path = os.path.join(*dotted_path[:-1]) + '.g'
  490. g = import_grammar(grammar_path)
  491. token_options = dict(g.token_defs)[dotted_path[-1]]
  492. assert isinstance(token_options, tuple) and len(token_options)==2
  493. token_defs.append([name.value, token_options])
  494. else:
  495. assert False, stmt
  496. # Verify correctness 1
  497. for name, _ in token_defs:
  498. if name.startswith('__'):
  499. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  500. # Handle ignore tokens
  501. # XXX A slightly hacky solution. Recognition of %ignore TOKEN as separate comes from the lexer's
  502. # inability to handle duplicate tokens (two names, one value)
  503. ignore_names = []
  504. for t in ignore:
  505. if t.data=='expansions' and len(t.children) == 1:
  506. t2 ,= t.children
  507. if t2.data=='expansion' and len(t2.children) == 1:
  508. item ,= t2.children
  509. if isinstance(item, Token) and item.type == 'TOKEN':
  510. ignore_names.append(item.value)
  511. continue
  512. name = '__IGNORE_%d'% len(ignore_names)
  513. ignore_names.append(name)
  514. token_defs.append((name, (t, 0)))
  515. # Verify correctness 2
  516. token_names = set()
  517. for name, _ in token_defs:
  518. if name in token_names:
  519. raise GrammarError("Token '%s' defined more than once" % name)
  520. token_names.add(name)
  521. if set(ignore_names) > token_names:
  522. raise GrammarError("Tokens %s were marked to ignore but were not defined!" % (set(ignore_names) - token_names))
  523. # Resolve token references
  524. resolve_token_references(token_defs)
  525. rules = [options_from_rule(*x) for x in rule_defs]
  526. rule_names = set()
  527. for name, _x, _o in rules:
  528. if name.startswith('__'):
  529. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  530. if name in rule_names:
  531. raise GrammarError("Rule '%s' defined more than once" % name)
  532. rule_names.add(name)
  533. for name, expansions, _o in rules:
  534. used_symbols = {t for x in expansions.find_data('expansion')
  535. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  536. for sym in used_symbols:
  537. if is_terminal(sym):
  538. if sym not in token_names:
  539. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  540. else:
  541. if sym not in rule_names:
  542. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  543. # TODO don't include unused tokens, they can only cause trouble!
  544. return Grammar(rules, token_defs, ignore_names)
  545. load_grammar = GrammarLoader().load_grammar