This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

636 Zeilen
22 KiB

  1. import os.path
  2. from itertools import chain
  3. import re
  4. from ast import literal_eval
  5. from copy import deepcopy
  6. from .lexer import Token, UnexpectedInput
  7. from .parse_tree_builder import ParseTreeBuilder
  8. from .parser_frontends import LALR
  9. from .parsers.lalr_parser import UnexpectedToken
  10. from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  11. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  12. __path__ = os.path.dirname(__file__)
  13. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  14. _TOKEN_NAMES = {
  15. '.' : 'DOT',
  16. ',' : 'COMMA',
  17. ':' : 'COLON',
  18. ';' : 'SEMICOLON',
  19. '+' : 'PLUS',
  20. '-' : 'MINUS',
  21. '*' : 'STAR',
  22. '/' : 'SLASH',
  23. '\\' : 'BACKSLASH',
  24. '|' : 'VBAR',
  25. '?' : 'QMARK',
  26. '!' : 'BANG',
  27. '@' : 'AT',
  28. '#' : 'HASH',
  29. '$' : 'DOLLAR',
  30. '%' : 'PERCENT',
  31. '^' : 'CIRCUMFLEX',
  32. '&' : 'AMPERSAND',
  33. '_' : 'UNDERSCORE',
  34. '<' : 'LESSTHAN',
  35. '>' : 'MORETHAN',
  36. '=' : 'EQUAL',
  37. '"' : 'DBLQUOTE',
  38. '\'' : 'QUOTE',
  39. '`' : 'BACKQUOTE',
  40. '~' : 'TILDE',
  41. '(' : 'LPAR',
  42. ')' : 'RPAR',
  43. '{' : 'LBRACE',
  44. '}' : 'RBRACE',
  45. '[' : 'LSQB',
  46. ']' : 'RSQB',
  47. '\n' : 'NEWLINE',
  48. '\r\n' : 'CRLF',
  49. '\t' : 'TAB',
  50. ' ' : 'SPACE',
  51. }
  52. # Grammar Parser
  53. TOKENS = {
  54. '_LPAR': r'\(',
  55. '_RPAR': r'\)',
  56. '_LBRA': r'\[',
  57. '_RBRA': r'\]',
  58. 'OP': '[+*][?]?|[?](?![a-z])',
  59. '_COLON': ':',
  60. '_OR': r'\|',
  61. '_DOT': r'\.',
  62. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  63. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  64. 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
  65. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/\n])*?/i?',
  66. '_NL': r'(\r?\n)+\s*',
  67. 'WS': r'[ \t]+',
  68. 'COMMENT': r'//[^\n]*',
  69. '_TO': '->',
  70. '_IGNORE': r'%ignore',
  71. '_IMPORT': r'%import',
  72. }
  73. RULES = {
  74. 'start': ['_list'],
  75. '_list': ['_item', '_list _item'],
  76. '_item': ['rule', 'token', 'statement', '_NL'],
  77. 'rule': ['RULE _COLON expansions _NL'],
  78. 'expansions': ['alias',
  79. 'expansions _OR alias',
  80. 'expansions _NL _OR alias'],
  81. '?alias': ['expansion _TO RULE', 'expansion'],
  82. 'expansion': ['_expansion'],
  83. '_expansion': ['', '_expansion expr'],
  84. '?expr': ['atom',
  85. 'atom OP'],
  86. '?atom': ['_LPAR expansions _RPAR',
  87. 'maybe',
  88. 'name',
  89. 'literal',
  90. 'range'],
  91. '?name': ['RULE', 'TOKEN'],
  92. 'maybe': ['_LBRA expansions _RBRA'],
  93. 'range': ['STRING _DOT _DOT STRING'],
  94. 'token': ['TOKEN _COLON expansions _NL'],
  95. 'statement': ['ignore', 'import'],
  96. 'ignore': ['_IGNORE expansions _NL'],
  97. 'import': ['_IMPORT import_args _NL',
  98. '_IMPORT import_args _TO TOKEN'],
  99. 'import_args': ['_import_args'],
  100. '_import_args': ['name', '_import_args _DOT name'],
  101. 'literal': ['REGEXP', 'STRING'],
  102. }
  103. class EBNF_to_BNF(InlineTransformer):
  104. def __init__(self):
  105. self.new_rules = {}
  106. self.rules_by_expr = {}
  107. self.prefix = 'anon'
  108. self.i = 0
  109. self.rule_options = None
  110. def _add_recurse_rule(self, type_, expr):
  111. if expr in self.rules_by_expr:
  112. return self.rules_by_expr[expr]
  113. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  114. self.i += 1
  115. t = Token('RULE', new_name, -1)
  116. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options
  117. self.rules_by_expr[expr] = t
  118. return t
  119. def expr(self, rule, op):
  120. if op.value == '?':
  121. return T('expansions', [rule, T('expansion', [])])
  122. elif op.value == '+':
  123. # a : b c+ d
  124. # -->
  125. # a : b _c d
  126. # _c : _c c | c;
  127. return self._add_recurse_rule('plus', rule)
  128. elif op.value == '*':
  129. # a : b c* d
  130. # -->
  131. # a : b _c? d
  132. # _c : _c c | c;
  133. new_name = self._add_recurse_rule('star', rule)
  134. return T('expansions', [new_name, T('expansion', [])])
  135. assert False, op
  136. class SimplifyRule_Visitor(Visitor):
  137. @staticmethod
  138. def _flatten(tree):
  139. while True:
  140. to_expand = [i for i, child in enumerate(tree.children)
  141. if isinstance(child, T) and child.data == tree.data]
  142. if not to_expand:
  143. break
  144. tree.expand_kids_by_index(*to_expand)
  145. def expansion(self, tree):
  146. # rules_list unpacking
  147. # a : b (c|d) e
  148. # -->
  149. # a : b c e | b d e
  150. #
  151. # In AST terms:
  152. # expansion(b, expansions(c, d), e)
  153. # -->
  154. # expansions( expansion(b, c, e), expansion(b, d, e) )
  155. while True:
  156. self._flatten(tree)
  157. for i, child in enumerate(tree.children):
  158. if isinstance(child, T) and child.data == 'expansions':
  159. tree.data = 'expansions'
  160. tree.children = [self.visit(T('expansion', [option if i==j else other
  161. for j, other in enumerate(tree.children)]))
  162. for option in child.children]
  163. break
  164. else:
  165. break
  166. def alias(self, tree):
  167. rule, alias_name = tree.children
  168. if rule.data == 'expansions':
  169. aliases = []
  170. for child in tree.children[0].children:
  171. aliases.append(T('alias', [child, alias_name]))
  172. tree.data = 'expansions'
  173. tree.children = aliases
  174. expansions = _flatten
  175. def dict_update_safe(d1, d2):
  176. for k, v in d2.items():
  177. assert k not in d1
  178. d1[k] = v
  179. class RuleTreeToText(Transformer):
  180. def expansions(self, x):
  181. return x
  182. def expansion(self, symbols):
  183. return [sym.value for sym in symbols], None
  184. def alias(self, x):
  185. (expansion, _alias), alias = x
  186. assert _alias is None, (alias, expansion, '-', _alias)
  187. return expansion, alias.value
  188. class SimplifyTree(InlineTransformer):
  189. def maybe(self, expr):
  190. return T('expr', [expr, Token('OP', '?', -1)])
  191. def tokenmods(self, *args):
  192. if len(args) == 1:
  193. return list(args)
  194. tokenmods, value = args
  195. return tokenmods + [value]
  196. class ExtractAnonTokens(InlineTransformer):
  197. def __init__(self, tokens):
  198. self.tokens = tokens
  199. self.token_set = {td.name for td in self.tokens}
  200. self.token_reverse = {td.pattern: td for td in tokens}
  201. self.i = 0
  202. def pattern(self, p):
  203. value = p.value
  204. if p in self.token_reverse and p.flags != self.token_reverse[p].pattern.flags:
  205. raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
  206. if isinstance(p, PatternStr):
  207. try:
  208. # If already defined, use the user-defined token name
  209. token_name = self.token_reverse[p].name
  210. except KeyError:
  211. # Try to assign an indicative anon-token name, otherwise use a numbered name
  212. try:
  213. token_name = _TOKEN_NAMES[value]
  214. except KeyError:
  215. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  216. token_name = '%s%d' % (value.upper(), self.i)
  217. try:
  218. # Make sure we don't have unicode in our token names
  219. token_name.encode('ascii')
  220. except UnicodeEncodeError:
  221. token_name = 'ANONSTR_%d' % self.i
  222. else:
  223. token_name = 'ANONSTR_%d' % self.i
  224. self.i += 1
  225. token_name = '__' + token_name
  226. elif isinstance(p, PatternRE):
  227. if p in self.token_reverse: # Kind of a wierd placement.name
  228. token_name = self.token_reverse[p].name
  229. else:
  230. token_name = 'ANONRE_%d' % self.i
  231. self.i += 1
  232. else:
  233. assert False, p
  234. if token_name not in self.token_set:
  235. assert p not in self.token_reverse
  236. self.token_set.add(token_name)
  237. tokendef = TokenDef(token_name, p)
  238. self.token_reverse[p] = tokendef
  239. self.tokens.append(tokendef)
  240. return Token('TOKEN', token_name, -1)
  241. def _literal_to_pattern(literal):
  242. v = literal.value
  243. if v[-1] in 'i':
  244. flags = v[-1]
  245. v = v[:-1]
  246. else:
  247. flags = None
  248. assert v[0] == v[-1] and v[0] in '"/'
  249. x = v[1:-1].replace("'", r"\'")
  250. s = literal_eval("u'''%s'''" % x)
  251. return { 'STRING': PatternStr,
  252. 'REGEXP': PatternRE }[literal.type](s, flags)
  253. class PrepareLiterals(InlineTransformer):
  254. def literal(self, literal):
  255. return T('pattern', [_literal_to_pattern(literal)])
  256. def range(self, start, end):
  257. assert start.type == end.type == 'STRING'
  258. start = start.value[1:-1]
  259. end = end.value[1:-1]
  260. assert len(start) == len(end) == 1
  261. regexp = '[%s-%s]' % (start, end)
  262. return T('pattern', [PatternRE(regexp)])
  263. class SplitLiterals(InlineTransformer):
  264. def pattern(self, p):
  265. if isinstance(p, PatternStr) and len(p.value)>1:
  266. return T('expansion', [T('pattern', [PatternStr(ch, flags=p.flags)]) for ch in p.value])
  267. return T('pattern', [p])
  268. class TokenTreeToPattern(Transformer):
  269. def pattern(self, ps):
  270. p ,= ps
  271. return p
  272. def expansion(self, items):
  273. if len(items) == 1:
  274. return items[0]
  275. if len({i.flags for i in items}) > 1:
  276. raise GrammarError("Lark doesn't support joining tokens with conflicting flags!")
  277. return PatternRE(''.join(i.to_regexp() for i in items), items[0].flags)
  278. def expansions(self, exps):
  279. if len(exps) == 1:
  280. return exps[0]
  281. assert all(i.flags is None for i in exps)
  282. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)))
  283. def expr(self, args):
  284. inner, op = args
  285. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
  286. def _interleave(l, item):
  287. for e in l:
  288. yield e
  289. if isinstance(e, T):
  290. if e.data in ('literal', 'range'):
  291. yield item
  292. elif is_terminal(e):
  293. yield item
  294. def _choice_of_rules(rules):
  295. return T('expansions', [T('expansion', [Token('RULE', name)]) for name in rules])
  296. class Grammar:
  297. def __init__(self, rule_defs, token_defs, extra):
  298. self.token_defs = token_defs
  299. self.rule_defs = rule_defs
  300. self.extra = extra
  301. def _prepare_scanless_grammar(self, start):
  302. # XXX Pretty hacky! There should be a better way to write this method..
  303. rule_defs = deepcopy(self.rule_defs)
  304. term_defs = self.token_defs
  305. # Implement the "%ignore" feature without a lexer..
  306. terms_to_ignore = {name:'__'+name for name in self.extra['ignore']}
  307. if terms_to_ignore:
  308. assert set(terms_to_ignore) <= {name for name, t in term_defs}
  309. term_defs = [(terms_to_ignore.get(name,name),t) for name,t in term_defs]
  310. expr = Token('RULE', '__ignore')
  311. for r, tree, _o in rule_defs:
  312. for exp in tree.find_data('expansion'):
  313. exp.children = list(_interleave(exp.children, expr))
  314. if r == start:
  315. exp.children = [expr] + exp.children
  316. for exp in tree.find_data('expr'):
  317. exp.children[0] = T('expansion', list(_interleave(exp.children[:1], expr)))
  318. _ignore_tree = T('expr', [_choice_of_rules(terms_to_ignore.values()), Token('OP', '?')])
  319. rule_defs.append(('__ignore', _ignore_tree, None))
  320. # Convert all tokens to rules
  321. new_terminal_names = {name: '__token_'+name for name, tree in term_defs}
  322. for name, tree, options in rule_defs:
  323. for exp in chain( tree.find_data('expansion'), tree.find_data('expr') ):
  324. for i, sym in enumerate(exp.children):
  325. if sym in new_terminal_names:
  326. exp.children[i] = Token(sym.type, new_terminal_names[sym])
  327. for name, tree in term_defs:
  328. if name.startswith('_'):
  329. options = RuleOptions(filter_out=True)
  330. else:
  331. options = RuleOptions(keep_all_tokens=True, create_token=name)
  332. name = new_terminal_names[name]
  333. inner_name = name + '_inner'
  334. rule_defs.append((name, _choice_of_rules([inner_name]), None))
  335. rule_defs.append((inner_name, tree, options))
  336. return [], rule_defs
  337. def compile(self, lexer=False, start=None):
  338. if not lexer:
  339. token_defs, rule_defs = self._prepare_scanless_grammar(start)
  340. else:
  341. token_defs = list(self.token_defs)
  342. rule_defs = self.rule_defs
  343. # =================
  344. # Compile Tokens
  345. # =================
  346. token_tree_to_pattern = TokenTreeToPattern()
  347. # Convert tokens to strings/regexps
  348. tokens = []
  349. for name, token_tree in token_defs:
  350. token_tree = PrepareLiterals().transform(token_tree)
  351. pattern = token_tree_to_pattern.transform(token_tree)
  352. tokens.append(TokenDef(name, pattern) )
  353. # Resolve regexp assignments of the form /..${X}../
  354. # XXX This is deprecated, since you can express most regexps with EBNF
  355. # XXX Also, since this happens after import, it can be a source of bugs
  356. token_dict = {td.name: td.pattern.to_regexp() for td in tokens}
  357. while True:
  358. changed = False
  359. for t in tokens:
  360. if isinstance(t.pattern, PatternRE):
  361. sp = re.split(r'(\$\{%s})' % TOKENS['TOKEN'], t.pattern.value)
  362. if sp:
  363. value = ''.join(token_dict[x[2:-1]] if x.startswith('${') and x.endswith('}') else x
  364. for x in sp)
  365. if value != t.pattern.value:
  366. t.pattern.value = value
  367. changed = True
  368. if not changed:
  369. break
  370. # =================
  371. # Compile Rules
  372. # =================
  373. extract_anon = ExtractAnonTokens(tokens)
  374. ebnf_to_bnf = EBNF_to_BNF()
  375. simplify_rule = SimplifyRule_Visitor()
  376. rule_tree_to_text = RuleTreeToText()
  377. rules = {}
  378. for name, rule_tree, options in rule_defs:
  379. assert name not in rules, name
  380. rule_tree = PrepareLiterals().transform(rule_tree)
  381. if not lexer:
  382. rule_tree = SplitLiterals().transform(rule_tree)
  383. tree = extract_anon.transform(rule_tree) # Adds to tokens
  384. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  385. rules[name] = ebnf_to_bnf.transform(tree), options
  386. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  387. for tree, _o in rules.values():
  388. simplify_rule.visit(tree)
  389. rules = {origin: (rule_tree_to_text.transform(tree), options) for origin, (tree, options) in rules.items()}
  390. return tokens, rules, self.extra
  391. class RuleOptions:
  392. def __init__(self, keep_all_tokens=False, expand1=False, create_token=None, filter_out=False):
  393. self.keep_all_tokens = keep_all_tokens
  394. self.expand1 = expand1
  395. self.create_token = create_token # used for scanless postprocessing
  396. self.filter_out = filter_out # remove this rule from the tree
  397. # used for "token"-rules in scanless
  398. @classmethod
  399. def from_rule(cls, name, expansions):
  400. keep_all_tokens = name.startswith('!')
  401. name = name.lstrip('!')
  402. expand1 = name.startswith('?')
  403. name = name.lstrip('?')
  404. return name, expansions, cls(keep_all_tokens, expand1)
  405. _imported_grammars = {}
  406. def import_grammar(grammar_path):
  407. if grammar_path not in _imported_grammars:
  408. for import_path in IMPORT_PATHS:
  409. with open(os.path.join(import_path, grammar_path)) as f:
  410. text = f.read()
  411. grammar = load_grammar(text, grammar_path)
  412. _imported_grammars[grammar_path] = grammar
  413. return _imported_grammars[grammar_path]
  414. def resolve_token_references(token_defs):
  415. token_dict = dict(token_defs)
  416. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  417. while True:
  418. changed = False
  419. for name, token_tree in token_defs:
  420. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  421. for i, item in enumerate(exp.children):
  422. if isinstance(item, Token):
  423. if item.type == 'RULE':
  424. raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name))
  425. if item.type == 'TOKEN':
  426. exp.children[i] = token_dict[item]
  427. changed = True
  428. if not changed:
  429. break
  430. class GrammarLoader:
  431. def __init__(self):
  432. tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()]
  433. rules = [RuleOptions.from_rule(name, x) for name, x in RULES.items()]
  434. d = {r: ([(x.split(), None) for x in xs], o) for r, xs, o in rules}
  435. rules, callback = ParseTreeBuilder(T).create_tree_builder(d, None)
  436. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'], None)
  437. parser_conf = ParserConf(rules, callback, 'start')
  438. self.parser = LALR(lexer_conf, parser_conf)
  439. self.simplify_tree = SimplifyTree()
  440. def load_grammar(self, grammar_text, name='<?>'):
  441. try:
  442. tree = self.simplify_tree.transform( self.parser.parse(grammar_text+'\n') )
  443. except UnexpectedInput as e:
  444. raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name))
  445. except UnexpectedToken as e:
  446. if e.expected == ['_COLON']:
  447. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  448. elif e.expected == ['RULE']:
  449. raise GrammarError("Missing alias at line %s column %s" % (e.line, e.column))
  450. elif 'STRING' in e.expected:
  451. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  452. elif e.expected == ['_OR']:
  453. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  454. raise
  455. # Extract grammar items
  456. token_defs = [c.children for c in tree.children if c.data=='token']
  457. rule_defs = [c.children for c in tree.children if c.data=='rule']
  458. statements = [c.children for c in tree.children if c.data=='statement']
  459. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  460. token_defs = [(name.value, t) for name, t in token_defs]
  461. # Execute statements
  462. ignore = []
  463. for (stmt,) in statements:
  464. if stmt.data == 'ignore':
  465. expansions ,= stmt.children
  466. ignore.append(expansions)
  467. elif stmt.data == 'import':
  468. dotted_path = stmt.children[0].children
  469. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  470. grammar_path = os.path.join(*dotted_path[:-1]) + '.g'
  471. g = import_grammar(grammar_path)
  472. token_tree = dict(g.token_defs)[dotted_path[-1]]
  473. token_defs.append([name.value, token_tree])
  474. else:
  475. assert False, stmt
  476. # Verify correctness 1
  477. for name, _ in token_defs:
  478. if name.startswith('__'):
  479. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  480. # Handle ignore tokens
  481. ignore_names = []
  482. for i, t in enumerate(ignore):
  483. if t.data == 'expansions' and len(t.children) == 1:
  484. x ,= t.children
  485. if x.data == 'expansion' and len(x.children) == 1:
  486. item ,= x.children
  487. if isinstance(item, Token) and item.type == 'TOKEN':
  488. # XXX is this really a wise solution? -- Erez
  489. ignore_names.append(item.value)
  490. continue
  491. name = '__IGNORE_%d'%i
  492. token_defs.append((name, t))
  493. ignore_names.append(name)
  494. # Resolve token references
  495. resolve_token_references(token_defs)
  496. # Verify correctness 2
  497. token_names = set()
  498. for name, _ in token_defs:
  499. if name in token_names:
  500. raise GrammarError("Token '%s' defined more than once" % name)
  501. token_names.add(name)
  502. rules = [RuleOptions.from_rule(name, x) for name, x in rule_defs]
  503. rule_names = set()
  504. for name, _x, _o in rules:
  505. if name.startswith('__'):
  506. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  507. if name in rule_names:
  508. raise GrammarError("Rule '%s' defined more than once" % name)
  509. rule_names.add(name)
  510. for name, expansions, _o in rules:
  511. used_symbols = {t for x in expansions.find_data('expansion')
  512. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  513. for sym in used_symbols:
  514. if is_terminal(sym):
  515. if sym not in token_names:
  516. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  517. else:
  518. if sym not in rule_names:
  519. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  520. # TODO don't include unused tokens, they can only cause trouble!
  521. return Grammar(rules, token_defs, {'ignore': ignore_names})
  522. load_grammar = GrammarLoader().load_grammar