This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
 
 

642 行
22 KiB

  1. "Parses and creates Grammar objects"
  2. import os.path
  3. from itertools import chain
  4. import re
  5. from ast import literal_eval
  6. from copy import deepcopy
  7. from .lexer import Token, UnexpectedInput
  8. from .parse_tree_builder import ParseTreeBuilder
  9. from .parser_frontends import LALR
  10. from .parsers.lalr_parser import UnexpectedToken
  11. from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  12. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  13. __path__ = os.path.dirname(__file__)
  14. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  15. _TOKEN_NAMES = {
  16. '.' : 'DOT',
  17. ',' : 'COMMA',
  18. ':' : 'COLON',
  19. ';' : 'SEMICOLON',
  20. '+' : 'PLUS',
  21. '-' : 'MINUS',
  22. '*' : 'STAR',
  23. '/' : 'SLASH',
  24. '\\' : 'BACKSLASH',
  25. '|' : 'VBAR',
  26. '?' : 'QMARK',
  27. '!' : 'BANG',
  28. '@' : 'AT',
  29. '#' : 'HASH',
  30. '$' : 'DOLLAR',
  31. '%' : 'PERCENT',
  32. '^' : 'CIRCUMFLEX',
  33. '&' : 'AMPERSAND',
  34. '_' : 'UNDERSCORE',
  35. '<' : 'LESSTHAN',
  36. '>' : 'MORETHAN',
  37. '=' : 'EQUAL',
  38. '"' : 'DBLQUOTE',
  39. '\'' : 'QUOTE',
  40. '`' : 'BACKQUOTE',
  41. '~' : 'TILDE',
  42. '(' : 'LPAR',
  43. ')' : 'RPAR',
  44. '{' : 'LBRACE',
  45. '}' : 'RBRACE',
  46. '[' : 'LSQB',
  47. ']' : 'RSQB',
  48. '\n' : 'NEWLINE',
  49. '\r\n' : 'CRLF',
  50. '\t' : 'TAB',
  51. ' ' : 'SPACE',
  52. }
  53. # Grammar Parser
  54. TOKENS = {
  55. '_LPAR': r'\(',
  56. '_RPAR': r'\)',
  57. '_LBRA': r'\[',
  58. '_RBRA': r'\]',
  59. 'OP': '[+*][?]?|[?](?![a-z])',
  60. '_COLON': ':',
  61. '_OR': r'\|',
  62. '_DOT': r'\.',
  63. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  64. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  65. 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
  66. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/i?',
  67. '_NL': r'(\r?\n)+\s*',
  68. 'WS': r'[ \t]+',
  69. 'COMMENT': r'//[^\n]*',
  70. '_TO': '->',
  71. '_IGNORE': r'%ignore',
  72. '_IMPORT': r'%import',
  73. 'NUMBER': '\d+',
  74. }
  75. RULES = {
  76. 'start': ['_list'],
  77. '_list': ['_item', '_list _item'],
  78. '_item': ['rule', 'token', 'statement', '_NL'],
  79. 'rule': ['RULE _COLON expansions _NL',
  80. 'RULE _DOT NUMBER _COLON expansions _NL'],
  81. 'expansions': ['alias',
  82. 'expansions _OR alias',
  83. 'expansions _NL _OR alias'],
  84. '?alias': ['expansion _TO RULE', 'expansion'],
  85. 'expansion': ['_expansion'],
  86. '_expansion': ['', '_expansion expr'],
  87. '?expr': ['atom',
  88. 'atom OP'],
  89. '?atom': ['_LPAR expansions _RPAR',
  90. 'maybe',
  91. 'name',
  92. 'literal',
  93. 'range'],
  94. '?name': ['RULE', 'TOKEN'],
  95. 'maybe': ['_LBRA expansions _RBRA'],
  96. 'range': ['STRING _DOT _DOT STRING'],
  97. 'token': ['TOKEN _COLON expansions _NL',
  98. 'TOKEN _DOT NUMBER _COLON expansions _NL'],
  99. 'statement': ['ignore', 'import'],
  100. 'ignore': ['_IGNORE expansions _NL'],
  101. 'import': ['_IMPORT import_args _NL',
  102. '_IMPORT import_args _TO TOKEN'],
  103. 'import_args': ['_import_args'],
  104. '_import_args': ['name', '_import_args _DOT name'],
  105. 'literal': ['REGEXP', 'STRING'],
  106. }
  107. class EBNF_to_BNF(InlineTransformer):
  108. def __init__(self):
  109. self.new_rules = {}
  110. self.rules_by_expr = {}
  111. self.prefix = 'anon'
  112. self.i = 0
  113. self.rule_options = None
  114. def _add_recurse_rule(self, type_, expr):
  115. if expr in self.rules_by_expr:
  116. return self.rules_by_expr[expr]
  117. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  118. self.i += 1
  119. t = Token('RULE', new_name, -1)
  120. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options
  121. self.rules_by_expr[expr] = t
  122. return t
  123. def expr(self, rule, op):
  124. if op.value == '?':
  125. return T('expansions', [rule, T('expansion', [])])
  126. elif op.value == '+':
  127. # a : b c+ d
  128. # -->
  129. # a : b _c d
  130. # _c : _c c | c;
  131. return self._add_recurse_rule('plus', rule)
  132. elif op.value == '*':
  133. # a : b c* d
  134. # -->
  135. # a : b _c? d
  136. # _c : _c c | c;
  137. new_name = self._add_recurse_rule('star', rule)
  138. return T('expansions', [new_name, T('expansion', [])])
  139. assert False, op
  140. class SimplifyRule_Visitor(Visitor):
  141. @staticmethod
  142. def _flatten(tree):
  143. while True:
  144. to_expand = [i for i, child in enumerate(tree.children)
  145. if isinstance(child, T) and child.data == tree.data]
  146. if not to_expand:
  147. break
  148. tree.expand_kids_by_index(*to_expand)
  149. def expansion(self, tree):
  150. # rules_list unpacking
  151. # a : b (c|d) e
  152. # -->
  153. # a : b c e | b d e
  154. #
  155. # In AST terms:
  156. # expansion(b, expansions(c, d), e)
  157. # -->
  158. # expansions( expansion(b, c, e), expansion(b, d, e) )
  159. while True:
  160. self._flatten(tree)
  161. for i, child in enumerate(tree.children):
  162. if isinstance(child, T) and child.data == 'expansions':
  163. tree.data = 'expansions'
  164. tree.children = [self.visit(T('expansion', [option if i==j else other
  165. for j, other in enumerate(tree.children)]))
  166. for option in child.children]
  167. break
  168. else:
  169. break
  170. def alias(self, tree):
  171. rule, alias_name = tree.children
  172. if rule.data == 'expansions':
  173. aliases = []
  174. for child in tree.children[0].children:
  175. aliases.append(T('alias', [child, alias_name]))
  176. tree.data = 'expansions'
  177. tree.children = aliases
  178. expansions = _flatten
  179. class RuleTreeToText(Transformer):
  180. def expansions(self, x):
  181. return x
  182. def expansion(self, symbols):
  183. return [sym.value for sym in symbols], None
  184. def alias(self, x):
  185. (expansion, _alias), alias = x
  186. assert _alias is None, (alias, expansion, '-', _alias)
  187. return expansion, alias.value
  188. class CanonizeTree(InlineTransformer):
  189. def maybe(self, expr):
  190. return T('expr', [expr, Token('OP', '?', -1)])
  191. def tokenmods(self, *args):
  192. if len(args) == 1:
  193. return list(args)
  194. tokenmods, value = args
  195. return tokenmods + [value]
  196. class ExtractAnonTokens(InlineTransformer):
  197. "Create a unique list of anonymous tokens. Attempt to give meaningful names to them when we add them"
  198. def __init__(self, tokens):
  199. self.tokens = tokens
  200. self.token_set = {td.name for td in self.tokens}
  201. self.token_reverse = {td.pattern: td for td in tokens}
  202. self.i = 0
  203. def pattern(self, p):
  204. value = p.value
  205. if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
  206. raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
  207. if isinstance(p, PatternStr):
  208. try:
  209. # If already defined, use the user-defined token name
  210. token_name = self.token_reverse[p].name
  211. except KeyError:
  212. # Try to assign an indicative anon-token name, otherwise use a numbered name
  213. try:
  214. token_name = _TOKEN_NAMES[value]
  215. except KeyError:
  216. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  217. token_name = '%s%d' % (value.upper(), self.i)
  218. try:
  219. # Make sure we don't have unicode in our token names
  220. token_name.encode('ascii')
  221. except UnicodeEncodeError:
  222. token_name = 'ANONSTR_%d' % self.i
  223. else:
  224. token_name = 'ANONSTR_%d' % self.i
  225. self.i += 1
  226. token_name = '__' + token_name
  227. elif isinstance(p, PatternRE):
  228. if p in self.token_reverse: # Kind of a wierd placement.name
  229. token_name = self.token_reverse[p].name
  230. else:
  231. token_name = 'ANONRE_%d' % self.i
  232. self.i += 1
  233. else:
  234. assert False, p
  235. if token_name not in self.token_set:
  236. assert p not in self.token_reverse
  237. self.token_set.add(token_name)
  238. tokendef = TokenDef(token_name, p)
  239. self.token_reverse[p] = tokendef
  240. self.tokens.append(tokendef)
  241. return Token('TOKEN', token_name, -1)
  242. def _literal_to_pattern(literal):
  243. v = literal.value
  244. if v[-1] in 'i':
  245. flags = v[-1]
  246. v = v[:-1]
  247. else:
  248. flags = None
  249. assert v[0] == v[-1] and v[0] in '"/'
  250. x = v[1:-1].replace("'", r"\'")
  251. s = literal_eval("u'''%s'''" % x)
  252. return { 'STRING': PatternStr,
  253. 'REGEXP': PatternRE }[literal.type](s, flags)
  254. class PrepareLiterals(InlineTransformer):
  255. def literal(self, literal):
  256. return T('pattern', [_literal_to_pattern(literal)])
  257. def range(self, start, end):
  258. assert start.type == end.type == 'STRING'
  259. start = start.value[1:-1]
  260. end = end.value[1:-1]
  261. assert len(start) == len(end) == 1
  262. regexp = '[%s-%s]' % (start, end)
  263. return T('pattern', [PatternRE(regexp)])
  264. class SplitLiterals(InlineTransformer):
  265. def pattern(self, p):
  266. if isinstance(p, PatternStr) and len(p.value)>1:
  267. return T('expansion', [T('pattern', [PatternStr(ch, flags=p.flags)]) for ch in p.value])
  268. return T('pattern', [p])
  269. class TokenTreeToPattern(Transformer):
  270. def pattern(self, ps):
  271. p ,= ps
  272. return p
  273. def expansion(self, items):
  274. if len(items) == 1:
  275. return items[0]
  276. if len({i.flags for i in items}) > 1:
  277. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  278. return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags)
  279. def expansions(self, exps):
  280. if len(exps) == 1:
  281. return exps[0]
  282. assert all(i.flags is None for i in exps)
  283. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)))
  284. def expr(self, args):
  285. inner, op = args
  286. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
  287. def _interleave(l, item):
  288. for e in l:
  289. yield e
  290. if isinstance(e, T):
  291. if e.data in ('literal', 'range'):
  292. yield item
  293. elif is_terminal(e):
  294. yield item
  295. def _choice_of_rules(rules):
  296. return T('expansions', [T('expansion', [Token('RULE', name)]) for name in rules])
  297. def dict_update_safe(d1, d2):
  298. for k, v in d2.items():
  299. assert k not in d1
  300. d1[k] = v
  301. class Grammar:
  302. def __init__(self, rule_defs, token_defs, ignore):
  303. self.token_defs = token_defs
  304. self.rule_defs = rule_defs
  305. self.ignore = ignore
  306. def _prepare_scanless_grammar(self, start):
  307. # XXX Pretty hacky! There should be a better way to write this method..
  308. rule_defs = deepcopy(self.rule_defs)
  309. term_defs = self.token_defs
  310. # Implement the "%ignore" feature without a lexer..
  311. terms_to_ignore = {name:'__'+name for name in self.ignore}
  312. if terms_to_ignore:
  313. assert set(terms_to_ignore) <= {name for name, _t in term_defs}
  314. term_defs = [(terms_to_ignore.get(name,name),t) for name,t in term_defs]
  315. expr = Token('RULE', '__ignore')
  316. for r, tree, _o in rule_defs:
  317. for exp in tree.find_data('expansion'):
  318. exp.children = list(_interleave(exp.children, expr))
  319. if r == start:
  320. exp.children = [expr] + exp.children
  321. for exp in tree.find_data('expr'):
  322. exp.children[0] = T('expansion', list(_interleave(exp.children[:1], expr)))
  323. _ignore_tree = T('expr', [_choice_of_rules(terms_to_ignore.values()), Token('OP', '?')])
  324. rule_defs.append(('__ignore', _ignore_tree, None))
  325. # Convert all tokens to rules
  326. new_terminal_names = {name: '__token_'+name for name, _t in term_defs}
  327. for name, tree, options in rule_defs:
  328. for exp in chain( tree.find_data('expansion'), tree.find_data('expr') ):
  329. for i, sym in enumerate(exp.children):
  330. if sym in new_terminal_names:
  331. exp.children[i] = Token(sym.type, new_terminal_names[sym])
  332. for name, (tree, priority) in term_defs: # TODO transfer priority to rule?
  333. if name.startswith('_'):
  334. options = RuleOptions(filter_out=True, priority=priority)
  335. else:
  336. options = RuleOptions(keep_all_tokens=True, create_token=name, priority=priority)
  337. name = new_terminal_names[name]
  338. inner_name = name + '_inner'
  339. rule_defs.append((name, _choice_of_rules([inner_name]), None))
  340. rule_defs.append((inner_name, tree, options))
  341. return [], rule_defs
  342. def compile(self, lexer=False, start=None):
  343. if not lexer:
  344. token_defs, rule_defs = self._prepare_scanless_grammar(start)
  345. else:
  346. token_defs = list(self.token_defs)
  347. rule_defs = self.rule_defs
  348. # =================
  349. # Compile Tokens
  350. # =================
  351. # Convert token-trees to strings/regexps
  352. transformer = PrepareLiterals() * TokenTreeToPattern()
  353. tokens = [TokenDef(name, transformer.transform(token_tree), priority)
  354. for name, (token_tree, priority) in token_defs]
  355. # =================
  356. # Compile Rules
  357. # =================
  358. ebnf_to_bnf = EBNF_to_BNF()
  359. simplify_rule = SimplifyRule_Visitor()
  360. transformer = PrepareLiterals()
  361. if not lexer:
  362. transformer *= SplitLiterals()
  363. transformer *= ExtractAnonTokens(tokens) # Adds to tokens
  364. rules = {}
  365. for name, rule_tree, options in rule_defs:
  366. assert name not in rules, name
  367. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  368. tree = transformer.transform(rule_tree)
  369. rules[name] = ebnf_to_bnf.transform(tree), options
  370. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  371. for tree, _o in rules.values():
  372. simplify_rule.visit(tree)
  373. rule_tree_to_text = RuleTreeToText()
  374. rules = {origin: (rule_tree_to_text.transform(tree), options) for origin, (tree, options) in rules.items()}
  375. return tokens, rules, self.ignore
  376. class RuleOptions:
  377. def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False, priority=None):
  378. self.keep_all_tokens = keep_all_tokens
  379. self.expand1 = expand1
  380. self.create_token = create_token # used for scanless postprocessing
  381. self.priority = priority
  382. self.filter_out = filter_out # remove this rule from the tree
  383. # used for "token"-rules in scanless
  384. @classmethod
  385. def from_rule(cls, name, *x):
  386. if len(x) > 1:
  387. priority, expansions = x
  388. priority = int(priority)
  389. else:
  390. expansions ,= x
  391. priority = None
  392. keep_all_tokens = name.startswith('!')
  393. name = name.lstrip('!')
  394. expand1 = name.startswith('?')
  395. name = name.lstrip('?')
  396. return name, expansions, cls(keep_all_tokens, expand1, priority=priority)
  397. _imported_grammars = {}
  398. def import_grammar(grammar_path):
  399. if grammar_path not in _imported_grammars:
  400. for import_path in IMPORT_PATHS:
  401. with open(os.path.join(import_path, grammar_path)) as f:
  402. text = f.read()
  403. grammar = load_grammar(text, grammar_path)
  404. _imported_grammars[grammar_path] = grammar
  405. return _imported_grammars[grammar_path]
  406. def resolve_token_references(token_defs):
  407. # TODO Cycles detection
  408. # TODO Solve with transitive closure (maybe)
  409. token_dict = {k:t for k, (t,_p) in token_defs}
  410. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  411. while True:
  412. changed = False
  413. for name, (token_tree, _p) in token_defs:
  414. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  415. for i, item in enumerate(exp.children):
  416. if isinstance(item, Token):
  417. if item.type == 'RULE':
  418. raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name))
  419. if item.type == 'TOKEN':
  420. exp.children[i] = token_dict[item]
  421. changed = True
  422. if not changed:
  423. break
  424. class GrammarLoader:
  425. def __init__(self):
  426. tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()]
  427. rules = [RuleOptions.from_rule(name, x) for name, x in RULES.items()]
  428. d = {r: ([(x.split(), None) for x in xs], o) for r, xs, o in rules}
  429. rules, callback = ParseTreeBuilder(d, T).apply()
  430. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'])
  431. parser_conf = ParserConf(rules, callback, 'start')
  432. self.parser = LALR(lexer_conf, parser_conf)
  433. self.canonize_tree = CanonizeTree()
  434. def load_grammar(self, grammar_text, name='<?>'):
  435. "Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
  436. try:
  437. tree = self.canonize_tree.transform( self.parser.parse(grammar_text+'\n') )
  438. except UnexpectedInput as e:
  439. raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name))
  440. except UnexpectedToken as e:
  441. if e.expected == ['_COLON']:
  442. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  443. elif e.expected == ['RULE']:
  444. raise GrammarError("Missing alias at line %s column %s" % (e.line, e.column))
  445. elif 'STRING' in e.expected:
  446. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  447. elif e.expected == ['_OR']:
  448. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  449. raise
  450. # Extract grammar items
  451. token_defs = [c.children for c in tree.children if c.data=='token']
  452. rule_defs = [c.children for c in tree.children if c.data=='rule']
  453. statements = [c.children for c in tree.children if c.data=='statement']
  454. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  455. token_defs = [td if len(td)==3 else (td[0], 1, td[1]) for td in token_defs]
  456. token_defs = [(name.value, (t, int(p))) for name, p, t in token_defs]
  457. # Execute statements
  458. ignore = []
  459. for (stmt,) in statements:
  460. if stmt.data == 'ignore':
  461. t ,= stmt.children
  462. ignore.append(t)
  463. elif stmt.data == 'import':
  464. dotted_path = stmt.children[0].children
  465. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  466. grammar_path = os.path.join(*dotted_path[:-1]) + '.g'
  467. g = import_grammar(grammar_path)
  468. token_options = dict(g.token_defs)[dotted_path[-1]]
  469. assert isinstance(token_options, tuple) and len(token_options)==2
  470. token_defs.append([name.value, token_options])
  471. else:
  472. assert False, stmt
  473. # Verify correctness 1
  474. for name, _ in token_defs:
  475. if name.startswith('__'):
  476. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  477. # Handle ignore tokens
  478. # XXX A slightly hacky solution. Recognition of %ignore TOKEN as separate comes from the lexer's
  479. # inability to handle duplicate tokens (two names, one value)
  480. ignore_names = []
  481. for t in ignore:
  482. if t.data=='expansions' and len(t.children) == 1:
  483. t2 ,= t.children
  484. if t2.data=='expansion' and len(t2.children) == 1:
  485. item ,= t2.children
  486. if isinstance(item, Token) and item.type == 'TOKEN':
  487. ignore_names.append(item.value)
  488. continue
  489. name = '__IGNORE_%d'% len(ignore_names)
  490. ignore_names.append(name)
  491. token_defs.append((name, (t, 0)))
  492. # Verify correctness 2
  493. token_names = set()
  494. for name, _ in token_defs:
  495. if name in token_names:
  496. raise GrammarError("Token '%s' defined more than once" % name)
  497. token_names.add(name)
  498. # Resolve token references
  499. resolve_token_references(token_defs)
  500. rules = [RuleOptions.from_rule(*x) for x in rule_defs]
  501. rule_names = set()
  502. for name, _x, _o in rules:
  503. if name.startswith('__'):
  504. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  505. if name in rule_names:
  506. raise GrammarError("Rule '%s' defined more than once" % name)
  507. rule_names.add(name)
  508. for name, expansions, _o in rules:
  509. used_symbols = {t for x in expansions.find_data('expansion')
  510. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  511. for sym in used_symbols:
  512. if is_terminal(sym):
  513. if sym not in token_names:
  514. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  515. else:
  516. if sym not in rule_names:
  517. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  518. # TODO don't include unused tokens, they can only cause trouble!
  519. return Grammar(rules, token_defs, ignore_names)
  520. load_grammar = GrammarLoader().load_grammar