This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

591 lines
20 KiB

  1. import os.path
  2. from itertools import chain
  3. import re
  4. from ast import literal_eval
  5. from .lexer import Lexer, Token, UnexpectedInput
  6. from .parse_tree_builder import ParseTreeBuilder
  7. from .parser_frontends import LALR
  8. from .parsers.lalr_parser import UnexpectedToken
  9. from .common import is_terminal, GrammarError, LexerConf, ParserConf, PatternStr, PatternRE, TokenDef
  10. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  11. __path__ = os.path.dirname(__file__)
  12. IMPORT_PATHS = [os.path.join(__path__, 'grammars')]
  13. _TOKEN_NAMES = {
  14. '.' : 'DOT',
  15. ',' : 'COMMA',
  16. ':' : 'COLON',
  17. ';' : 'SEMICOLON',
  18. '+' : 'PLUS',
  19. '-' : 'MINUS',
  20. '*' : 'STAR',
  21. '/' : 'SLASH',
  22. '\\' : 'BACKSLASH',
  23. '|' : 'VBAR',
  24. '?' : 'QMARK',
  25. '!' : 'BANG',
  26. '@' : 'AT',
  27. '#' : 'HASH',
  28. '$' : 'DOLLAR',
  29. '%' : 'PERCENT',
  30. '^' : 'CIRCUMFLEX',
  31. '&' : 'AMPERSAND',
  32. '_' : 'UNDERSCORE',
  33. '<' : 'LESSTHAN',
  34. '>' : 'MORETHAN',
  35. '=' : 'EQUAL',
  36. '"' : 'DBLQUOTE',
  37. '\'' : 'QUOTE',
  38. '`' : 'BACKQUOTE',
  39. '~' : 'TILDE',
  40. '(' : 'LPAR',
  41. ')' : 'RPAR',
  42. '{' : 'LBRACE',
  43. '}' : 'RBRACE',
  44. '[' : 'LSQB',
  45. ']' : 'RSQB',
  46. '\n' : 'NEWLINE',
  47. '\r\n' : 'CRLF',
  48. '\t' : 'TAB',
  49. ' ' : 'SPACE',
  50. }
  51. # Grammar Parser
  52. TOKENS = {
  53. '_LPAR': r'\(',
  54. '_RPAR': r'\)',
  55. '_LBRA': r'\[',
  56. '_RBRA': r'\]',
  57. 'OP': '[+*][?]?|[?](?![a-z])',
  58. '_COLON': ':',
  59. '_OR': r'\|',
  60. '_DOT': r'\.',
  61. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  62. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  63. 'STRING': r'"(\\"|\\\\|[^"])*?"',
  64. 'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/',
  65. '_NL': r'(\r?\n)+\s*',
  66. 'WS': r'[ \t]+',
  67. 'COMMENT': r'//[^\n]*',
  68. '_TO': '->',
  69. '_IGNORE': r'%ignore',
  70. '_IMPORT': r'%import',
  71. }
  72. RULES = {
  73. 'start': ['_list'],
  74. '_list': ['_item', '_list _item'],
  75. '_item': ['rule', 'token', 'statement', '_NL'],
  76. 'rule': ['RULE _COLON expansions _NL'],
  77. 'expansions': ['alias',
  78. 'expansions _OR alias',
  79. 'expansions _NL _OR alias'],
  80. '?alias': ['expansion _TO RULE', 'expansion'],
  81. 'expansion': ['_expansion'],
  82. '_expansion': ['', '_expansion expr'],
  83. '?expr': ['atom',
  84. 'atom OP'],
  85. '?atom': ['_LPAR expansions _RPAR',
  86. 'maybe',
  87. 'name',
  88. 'literal',
  89. 'range'],
  90. '?name': ['RULE', 'TOKEN'],
  91. 'maybe': ['_LBRA expansions _RBRA'],
  92. 'range': ['STRING _DOT _DOT STRING'],
  93. 'token': ['TOKEN _COLON expansions _NL'],
  94. 'statement': ['ignore', 'import'],
  95. 'ignore': ['_IGNORE expansions _NL'],
  96. 'import': ['_IMPORT import_args _NL',
  97. '_IMPORT import_args _TO TOKEN'],
  98. 'import_args': ['_import_args'],
  99. '_import_args': ['name', '_import_args _DOT name'],
  100. 'literal': ['REGEXP', 'STRING'],
  101. }
  102. class EBNF_to_BNF(InlineTransformer):
  103. def __init__(self):
  104. self.new_rules = {}
  105. self.rules_by_expr = {}
  106. self.prefix = 'anon'
  107. self.i = 0
  108. self.rule_options = None
  109. def _add_recurse_rule(self, type_, expr):
  110. if expr in self.rules_by_expr:
  111. return self.rules_by_expr[expr]
  112. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  113. self.i += 1
  114. t = Token('RULE', new_name, -1)
  115. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])]), self.rule_options
  116. self.rules_by_expr[expr] = t
  117. return t
  118. def expr(self, rule, op):
  119. if op.value == '?':
  120. return T('expansions', [rule, T('expansion', [])])
  121. elif op.value == '+':
  122. # a : b c+ d
  123. # -->
  124. # a : b _c d
  125. # _c : _c c | c;
  126. return self._add_recurse_rule('plus', rule)
  127. elif op.value == '*':
  128. # a : b c* d
  129. # -->
  130. # a : b _c? d
  131. # _c : _c c | c;
  132. new_name = self._add_recurse_rule('star', rule)
  133. return T('expansions', [new_name, T('expansion', [])])
  134. assert False, op
  135. class SimplifyRule_Visitor(Visitor):
  136. @staticmethod
  137. def _flatten(tree):
  138. while True:
  139. to_expand = [i for i, child in enumerate(tree.children)
  140. if isinstance(child, T) and child.data == tree.data]
  141. if not to_expand:
  142. break
  143. tree.expand_kids_by_index(*to_expand)
  144. def expansion(self, tree):
  145. # rules_list unpacking
  146. # a : b (c|d) e
  147. # -->
  148. # a : b c e | b d e
  149. #
  150. # In AST terms:
  151. # expansion(b, expansions(c, d), e)
  152. # -->
  153. # expansions( expansion(b, c, e), expansion(b, d, e) )
  154. while True:
  155. self._flatten(tree)
  156. for i, child in enumerate(tree.children):
  157. if isinstance(child, T) and child.data == 'expansions':
  158. tree.data = 'expansions'
  159. tree.children = [self.visit(T('expansion', [option if i==j else other
  160. for j, other in enumerate(tree.children)]))
  161. for option in child.children]
  162. break
  163. else:
  164. break
  165. def alias(self, tree):
  166. rule, alias_name = tree.children
  167. if rule.data == 'expansions':
  168. aliases = []
  169. for child in tree.children[0].children:
  170. aliases.append(T('alias', [child, alias_name]))
  171. tree.data = 'expansions'
  172. tree.children = aliases
  173. expansions = _flatten
  174. def dict_update_safe(d1, d2):
  175. for k, v in d2.items():
  176. assert k not in d1
  177. d1[k] = v
  178. class RuleTreeToText(Transformer):
  179. def expansions(self, x):
  180. return x
  181. def expansion(self, symbols):
  182. return [sym.value for sym in symbols], None
  183. def alias(self, x):
  184. (expansion, _alias), alias = x
  185. assert _alias is None, (alias, expansion, '-', _alias)
  186. return expansion, alias.value
  187. class SimplifyTree(InlineTransformer):
  188. def maybe(self, expr):
  189. return T('expr', [expr, Token('OP', '?', -1)])
  190. def tokenmods(self, *args):
  191. if len(args) == 1:
  192. return list(args)
  193. tokenmods, value = args
  194. return tokenmods + [value]
  195. class ExtractAnonTokens(InlineTransformer):
  196. def __init__(self, tokens):
  197. self.tokens = tokens
  198. self.token_set = {td.name for td in self.tokens}
  199. self.str_reverse = {td.pattern.value: td.name for td in tokens if isinstance(td.pattern, PatternStr)}
  200. self.re_reverse = {td.pattern.value: td.name for td in tokens if isinstance(td.pattern, PatternRE)}
  201. self.i = 0
  202. def pattern(self, p):
  203. value = p.value
  204. if isinstance(p, PatternStr):
  205. try:
  206. # If already defined, use the user-defined token name
  207. token_name = self.str_reverse[value]
  208. except KeyError:
  209. # Try to assign an indicative anon-token name, otherwise use a numbered name
  210. try:
  211. token_name = _TOKEN_NAMES[value]
  212. except KeyError:
  213. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  214. token_name = '%s%d' % (value.upper(), self.i)
  215. try:
  216. # Make sure we don't have unicode in our token names
  217. token_name.encode('ascii')
  218. except UnicodeEncodeError:
  219. token_name = 'ANONSTR_%d' % self.i
  220. else:
  221. token_name = 'ANONSTR_%d' % self.i
  222. self.i += 1
  223. token_name = '__' + token_name
  224. elif isinstance(p, PatternRE):
  225. if value in self.re_reverse: # Kind of a wierd placement
  226. token_name = self.re_reverse[value]
  227. else:
  228. token_name = 'ANONRE_%d' % self.i
  229. self.i += 1
  230. else:
  231. assert False, p
  232. if token_name not in self.token_set:
  233. self.token_set.add(token_name)
  234. if isinstance(p, PatternStr):
  235. assert value not in self.str_reverse
  236. self.str_reverse[value] = token_name
  237. else:
  238. assert value not in self.re_reverse
  239. self.re_reverse[value] = token_name
  240. self.tokens.append(TokenDef(token_name, p))
  241. return Token('TOKEN', token_name, -1)
  242. def _literal_to_pattern(literal):
  243. v = literal.value
  244. assert v[0] == v[-1] and v[0] in '"/'
  245. s = literal_eval("u'''%s'''" % v[1:-1])
  246. return { 'STRING': PatternStr,
  247. 'REGEXP': PatternRE }[literal.type](s)
  248. class PrepareLiterals(InlineTransformer):
  249. def literal(self, literal):
  250. return T('pattern', [_literal_to_pattern(literal)])
  251. def range(self, start, end):
  252. assert start.type == end.type == 'STRING'
  253. start = start.value[1:-1]
  254. end = end.value[1:-1]
  255. assert len(start) == len(end) == 1
  256. regexp = '[%s-%s]' % (start, end)
  257. return T('pattern', [PatternRE(regexp)])
  258. class SplitLiterals(InlineTransformer):
  259. def pattern(self, p):
  260. if isinstance(p, PatternStr) and len(p.value)>1:
  261. return T('expansion', [T('pattern', [PatternStr(ch)]) for ch in p.value])
  262. return T('pattern', [p])
  263. class TokenTreeToPattern(Transformer):
  264. def pattern(self, ps):
  265. p ,= ps
  266. return p
  267. def expansion(self, items):
  268. if len(items) == 1:
  269. return items[0]
  270. return PatternRE(''.join(i.to_regexp() for i in items))
  271. def expansions(self, exps):
  272. if len(exps) == 1:
  273. return exps[0]
  274. return PatternRE('(?:%s)' % ('|'.join(i.to_regexp() for i in exps)))
  275. def expr(self, args):
  276. inner, op = args
  277. return PatternRE('(?:%s)%s' % (inner.to_regexp(), op))
  278. def interleave(l, item):
  279. for e in l:
  280. yield e
  281. if isinstance(e, T):
  282. if e.data == 'literal':
  283. yield item
  284. elif is_terminal(e):
  285. yield item
  286. class Grammar:
  287. def __init__(self, rule_defs, token_defs, extra):
  288. self.token_defs = token_defs
  289. self.rule_defs = rule_defs
  290. self.extra = extra
  291. def compile(self, lexer=False, start=None):
  292. if not lexer:
  293. # XXX VERY HACKY!! There must be a better way..
  294. ignore_tokens = [('_'+name, t) for name, t in self.token_defs if name in self.extra['ignore']]
  295. if ignore_tokens:
  296. self.token_defs = [('_'+name if name in self.extra['ignore'] else name,t) for name,t in self.token_defs]
  297. ignore_names = [t[0] for t in ignore_tokens]
  298. expr = Token('RULE', '__ignore')
  299. for r, tree, _o in self.rule_defs:
  300. for exp in tree.find_data('expansion'):
  301. exp.children = list(interleave(exp.children, expr))
  302. if r == start: # TODO use GrammarRule or similar (RuleOptions?)
  303. exp.children = [expr] + exp.children
  304. x = [T('expansion', [Token('RULE', x)]) for x in ignore_names]
  305. _ignore_tree = T('expr', [T('expansions', x), Token('OP', '?')])
  306. self.rule_defs.append(('__ignore', _ignore_tree, None))
  307. for name, tree in self.token_defs:
  308. self.rule_defs.append((name, tree, RuleOptions(keep_all_tokens=True)))
  309. token_defs = []
  310. else:
  311. token_defs = list(self.token_defs)
  312. # =================
  313. # Compile Tokens
  314. # =================
  315. token_tree_to_pattern = TokenTreeToPattern()
  316. # Convert tokens to strings/regexps
  317. tokens = []
  318. for name, token_tree in token_defs:
  319. token_tree = PrepareLiterals().transform(token_tree)
  320. pattern = token_tree_to_pattern.transform(token_tree)
  321. tokens.append(TokenDef(name, pattern) )
  322. # Resolve regexp assignments of the form /..${X}../
  323. # XXX This is deprecated, since you can express most regexps with EBNF
  324. # XXX Also, since this happens after import, it can be a source of bugs
  325. token_dict = {td.name: td.pattern.to_regexp() for td in tokens}
  326. while True:
  327. changed = False
  328. for t in tokens:
  329. if isinstance(t.pattern, PatternRE):
  330. sp = re.split(r'(\$\{%s})' % TOKENS['TOKEN'], t.pattern.value)
  331. if sp:
  332. value = ''.join(token_dict[x[2:-1]] if x.startswith('${') and x.endswith('}') else x
  333. for x in sp)
  334. if value != t.pattern.value:
  335. t.pattern.value = value
  336. changed = True
  337. if not changed:
  338. break
  339. # =================
  340. # Compile Rules
  341. # =================
  342. extract_anon = ExtractAnonTokens(tokens)
  343. ebnf_to_bnf = EBNF_to_BNF()
  344. simplify_rule = SimplifyRule_Visitor()
  345. rule_tree_to_text = RuleTreeToText()
  346. rules = {}
  347. for name, rule_tree, options in self.rule_defs:
  348. assert name not in rules, name
  349. rule_tree = PrepareLiterals().transform(rule_tree)
  350. if not lexer:
  351. rule_tree = SplitLiterals().transform(rule_tree)
  352. tree = extract_anon.transform(rule_tree) # Adds to tokens
  353. ebnf_to_bnf.rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
  354. rules[name] = ebnf_to_bnf.transform(tree), options
  355. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  356. for tree, _o in rules.values():
  357. simplify_rule.visit(tree)
  358. rules = {origin: (rule_tree_to_text.transform(tree), options) for origin, (tree, options) in rules.items()}
  359. return tokens, rules, self.extra
  360. class RuleOptions:
  361. def __init__(self, keep_all_tokens=False, expand1=False):
  362. self.keep_all_tokens = keep_all_tokens
  363. self.expand1 = expand1
  364. def _extract_options_for_rule(name, expansions):
  365. keep_all_tokens = name.startswith('!')
  366. name = name.lstrip('!')
  367. expand1 = name.startswith('?')
  368. name = name.lstrip('?')
  369. return name, expansions, RuleOptions(keep_all_tokens, expand1)
  370. _imported_grammars = {}
  371. def import_grammar(grammar_path):
  372. if grammar_path not in _imported_grammars:
  373. for import_path in IMPORT_PATHS:
  374. with open(os.path.join(import_path, grammar_path)) as f:
  375. text = f.read()
  376. grammar = load_grammar(text, grammar_path)
  377. _imported_grammars[grammar_path] = grammar
  378. return _imported_grammars[grammar_path]
  379. def resolve_token_references(token_defs):
  380. token_dict = dict(token_defs)
  381. assert len(token_dict) == len(token_defs), "Same name defined twice?"
  382. while True:
  383. changed = False
  384. for name, token_tree in token_defs:
  385. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  386. for i, item in enumerate(exp.children):
  387. if isinstance(item, Token):
  388. if item.type == 'RULE':
  389. raise GrammarError("Rules aren't allowed inside tokens (%s in %s)" % (item, name))
  390. if item.type == 'TOKEN':
  391. exp.children[i] = token_dict[item]
  392. changed = True
  393. if not changed:
  394. break
  395. class GrammarLoader:
  396. def __init__(self):
  397. tokens = [TokenDef(name, PatternRE(value)) for name, value in TOKENS.items()]
  398. rules = [_extract_options_for_rule(name, x) for name, x in RULES.items()]
  399. d = {r: ([(x.split(), None) for x in xs], o) for r, xs, o in rules}
  400. rules, callback = ParseTreeBuilder(T).create_tree_builder(d, None)
  401. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'], None)
  402. parser_conf = ParserConf(rules, callback, 'start')
  403. self.parser = LALR(lexer_conf, parser_conf)
  404. self.simplify_tree = SimplifyTree()
  405. def load_grammar(self, grammar_text, name='<?>'):
  406. try:
  407. tree = self.simplify_tree.transform( self.parser.parse(grammar_text+'\n') )
  408. except UnexpectedInput as e:
  409. raise GrammarError("Unexpected input %r at line %d column %d in %s" % (e.context, e.line, e.column, name))
  410. except UnexpectedToken as e:
  411. if '_COLON' in e.expected:
  412. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  413. elif 'literal' in e.expected:
  414. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  415. elif e.expected == ['_OR']:
  416. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  417. raise
  418. # Extract grammar items
  419. token_defs = [c.children for c in tree.children if c.data=='token']
  420. rule_defs = [c.children for c in tree.children if c.data=='rule']
  421. statements = [c.children for c in tree.children if c.data=='statement']
  422. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  423. token_defs = [(name.value, t) for name, t in token_defs]
  424. # Execute statements
  425. ignore = []
  426. for (stmt,) in statements:
  427. if stmt.data == 'ignore':
  428. expansions ,= stmt.children
  429. ignore.append(expansions)
  430. elif stmt.data == 'import':
  431. dotted_path = stmt.children[0].children
  432. name = stmt.children[1] if len(stmt.children)>1 else dotted_path[-1]
  433. grammar_path = os.path.join(*dotted_path[:-1]) + '.g'
  434. g = import_grammar(grammar_path)
  435. token_tree = dict(g.token_defs)[dotted_path[-1]]
  436. token_defs.append([name.value, token_tree])
  437. else:
  438. assert False, command
  439. # Verify correctness 1
  440. for name, _ in token_defs:
  441. if name.startswith('__'):
  442. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  443. # Handle ignore tokens
  444. ignore_names = []
  445. for i, t in enumerate(ignore):
  446. if t.data == 'expansions' and len(t.children) == 1:
  447. x ,= t.children
  448. if x.data == 'expansion' and len(x.children) == 1:
  449. item ,= x.children
  450. if isinstance(item, Token) and item.type == 'TOKEN':
  451. # XXX is this really a wise solution? -- Erez
  452. ignore_names.append(item.value)
  453. continue
  454. name = '__IGNORE_%d'%i
  455. token_defs.append((name, t))
  456. ignore_names.append(name)
  457. # Resolve token references
  458. resolve_token_references(token_defs)
  459. # Verify correctness 2
  460. token_names = set()
  461. for name, _ in token_defs:
  462. if name in token_names:
  463. raise GrammarError("Token '%s' defined more than once" % name)
  464. token_names.add(name)
  465. rules = [_extract_options_for_rule(name, x) for name, x in rule_defs]
  466. rule_names = set()
  467. for name, _x, _o in rules:
  468. if name.startswith('__'):
  469. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  470. if name in rule_names:
  471. raise GrammarError("Rule '%s' defined more than once" % name)
  472. rule_names.add(name)
  473. for name, expansions, _o in rules:
  474. used_symbols = {t for x in expansions.find_data('expansion')
  475. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  476. for sym in used_symbols:
  477. if is_terminal(sym):
  478. if sym not in token_names:
  479. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, name))
  480. else:
  481. if sym not in rule_names:
  482. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, name))
  483. # TODO don't include unused tokens, they can only cause trouble!
  484. return Grammar(rules, token_defs, {'ignore': ignore_names})
  485. load_grammar = GrammarLoader().load_grammar