This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

495 lines
16 KiB

  1. from itertools import chain
  2. import re
  3. import codecs
  4. from .lexer import Lexer, Token, UnexpectedInput, TokenDef__Str, TokenDef__Regexp
  5. from .parse_tree_builder import ParseTreeBuilder
  6. from .parser_frontends import LALR
  7. from .parsers.lalr_parser import UnexpectedToken
  8. from .common import is_terminal, GrammarError, LexerConf, ParserConf
  9. from .tree import Tree as T, Transformer, InlineTransformer, Visitor
  10. unicode_escape = codecs.getdecoder('unicode_escape')
  11. _TOKEN_NAMES = {
  12. '.' : 'DOT',
  13. ',' : 'COMMA',
  14. ':' : 'COLON',
  15. ';' : 'SEMICOLON',
  16. '+' : 'PLUS',
  17. '-' : 'MINUS',
  18. '*' : 'STAR',
  19. '/' : 'SLASH',
  20. '\\' : 'BACKSLASH',
  21. '|' : 'VBAR',
  22. '?' : 'QMARK',
  23. '!' : 'BANG',
  24. '@' : 'AT',
  25. '#' : 'HASH',
  26. '$' : 'DOLLAR',
  27. '%' : 'PERCENT',
  28. '^' : 'CIRCUMFLEX',
  29. '&' : 'AMPERSAND',
  30. '_' : 'UNDERSCORE',
  31. '<' : 'LESSTHAN',
  32. '>' : 'MORETHAN',
  33. '=' : 'EQUAL',
  34. '"' : 'DBLQUOTE',
  35. '\'' : 'QUOTE',
  36. '`' : 'BACKQUOTE',
  37. '~' : 'TILDE',
  38. '(' : 'LPAR',
  39. ')' : 'RPAR',
  40. '{' : 'LBRACE',
  41. '}' : 'RBRACE',
  42. '[' : 'LSQB',
  43. ']' : 'RSQB',
  44. '\n' : 'NEWLINE',
  45. '\r\n' : 'CRLF',
  46. '\t' : 'TAB',
  47. ' ' : 'SPACE',
  48. }
  49. # Grammar Parser
  50. TOKENS = {
  51. '_LPAR': r'\(',
  52. '_RPAR': r'\)',
  53. '_LBRA': r'\[',
  54. '_RBRA': r'\]',
  55. 'OP': '[+*?](?![a-z])',
  56. '_COLON': ':',
  57. '_OR': r'\|',
  58. '_DOT': r'\.',
  59. '_PERCENT': r'%',
  60. 'RULE': '!?[_?]?[a-z][_a-z0-9]*',
  61. 'TOKEN': '_?[A-Z][_A-Z0-9]*',
  62. 'STRING': r'".*?[^\\]"',
  63. 'REGEXP': r"/(?!/).*?[^\\]/",
  64. '_NL': r'(\r?\n)+\s*',
  65. 'WS': r'[ \t]+',
  66. 'COMMENT': r'//[^\n]*',
  67. '_TO': '->'
  68. }
  69. RULES = {
  70. 'start': ['_list'],
  71. '_list': ['_item', '_list _item'],
  72. '_item': ['rule', 'token', 'statement', '_NL'],
  73. 'rule': ['RULE _COLON expansions _NL'],
  74. 'expansions': ['alias',
  75. 'expansions _OR alias',
  76. 'expansions _NL _OR alias'],
  77. '?alias': ['expansion _TO RULE', 'expansion'],
  78. 'expansion': ['_expansion'],
  79. '_expansion': ['', '_expansion expr'],
  80. '?expr': ['atom',
  81. 'atom OP'],
  82. '?atom': ['_LPAR expansions _RPAR',
  83. 'maybe',
  84. 'RULE',
  85. 'TOKEN',
  86. 'tokenvalue',
  87. 'range'],
  88. 'maybe': ['_LBRA expansions _RBRA'],
  89. 'range': ['STRING _DOT _DOT STRING'],
  90. 'token': ['TOKEN _COLON expansions _NL'],
  91. 'statement': ['_PERCENT RULE expansions _NL'],
  92. 'tokenvalue': ['REGEXP', 'STRING'],
  93. }
  94. class EBNF_to_BNF(InlineTransformer):
  95. def __init__(self):
  96. self.new_rules = {}
  97. self.rules_by_expr = {}
  98. self.prefix = 'anon'
  99. self.i = 0
  100. def _add_recurse_rule(self, type_, expr):
  101. if expr in self.rules_by_expr:
  102. return self.rules_by_expr[expr]
  103. new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
  104. self.i += 1
  105. t = Token('RULE', new_name, -1)
  106. self.new_rules[new_name] = T('expansions', [T('expansion', [expr]), T('expansion', [t, expr])])
  107. self.rules_by_expr[expr] = t
  108. return t
  109. def expr(self, rule, op):
  110. if op.value == '?':
  111. return T('expansions', [rule, T('expansion', [])])
  112. elif op.value == '+':
  113. # a : b c+ d
  114. # -->
  115. # a : b _c d
  116. # _c : _c c | c;
  117. return self._add_recurse_rule('plus', rule)
  118. elif op.value == '*':
  119. # a : b c* d
  120. # -->
  121. # a : b _c? d
  122. # _c : _c c | c;
  123. new_name = self._add_recurse_rule('star', rule)
  124. return T('expansions', [new_name, T('expansion', [])])
  125. assert False, op
  126. class SimplifyRule_Visitor(Visitor):
  127. @staticmethod
  128. def _flatten(tree):
  129. while True:
  130. to_expand = [i for i, child in enumerate(tree.children)
  131. if isinstance(child, T) and child.data == tree.data]
  132. if not to_expand:
  133. break
  134. tree.expand_kids_by_index(*to_expand)
  135. def expansion(self, tree):
  136. # rules_list unpacking
  137. # a : b (c|d) e
  138. # -->
  139. # a : b c e | b d e
  140. #
  141. # In AST terms:
  142. # expansion(b, expansions(c, d), e)
  143. # -->
  144. # expansions( expansion(b, c, e), expansion(b, d, e) )
  145. while True:
  146. self._flatten(tree)
  147. for i, child in enumerate(tree.children):
  148. if isinstance(child, T) and child.data == 'expansions':
  149. tree.data = 'expansions'
  150. tree.children = [self.visit(T('expansion', [option if i==j else other
  151. for j, other in enumerate(tree.children)]))
  152. for option in child.children]
  153. break
  154. else:
  155. break
  156. def alias(self, tree):
  157. rule, alias_name = tree.children
  158. if rule.data == 'expansions':
  159. aliases = []
  160. for child in tree.children[0].children:
  161. aliases.append(T('alias', [child, alias_name]))
  162. tree.data = 'expansions'
  163. tree.children = aliases
  164. expansions = _flatten
  165. def dict_update_safe(d1, d2):
  166. for k, v in d2.items():
  167. assert k not in d1
  168. d1[k] = v
  169. class RuleTreeToText(Transformer):
  170. def expansions(self, x):
  171. return x
  172. def expansion(self, symbols):
  173. return [sym.value for sym in symbols], None
  174. def alias(self, x):
  175. (expansion, _alias), alias = x
  176. assert _alias is None, (alias, expansion, '-', _alias)
  177. return expansion, alias.value
  178. class SimplifyTree(InlineTransformer):
  179. def maybe(self, expr):
  180. return T('expr', [expr, Token('OP', '?', -1)])
  181. def tokenmods(self, *args):
  182. if len(args) == 1:
  183. return list(args)
  184. tokenmods, value = args
  185. return tokenmods + [value]
  186. class ExtractAnonTokens(InlineTransformer):
  187. def __init__(self, tokens, token_set):
  188. self.tokens = tokens
  189. self.token_set = token_set
  190. self.token_reverse = {td.value: td.name for td in tokens}
  191. self.i = 0
  192. def tokenvalue(self, token):
  193. if token.type == 'STRING':
  194. value = token.value[1:-1]
  195. try:
  196. # If already defined, use the user-defined token name
  197. token_name = self.token_reverse[value]
  198. except KeyError:
  199. # Try to assign an indicative anon-token name, otherwise use a numbered name
  200. try:
  201. token_name = _TOKEN_NAMES[value]
  202. except KeyError:
  203. if value.isalnum() and value[0].isalpha() and ('__'+value.upper()) not in self.token_set:
  204. token_name = value.upper() # This can create name duplications for unidentical tokens
  205. else:
  206. token_name = 'ANONSTR_%d' % self.i
  207. self.i += 1
  208. token_name = '__' + token_name
  209. elif token.type == 'REGEXP':
  210. token_name = 'ANONRE_%d' % self.i
  211. value = token.value
  212. self.i += 1
  213. else:
  214. assert False, token
  215. if value in self.token_reverse: # Kind of a wierd placement
  216. token_name = self.token_reverse[value]
  217. if token_name not in self.token_set:
  218. self.token_set.add(token_name)
  219. if token.type == 'STRING':
  220. self.tokens.append(TokenDef__Str(token_name, token[1:-1]))
  221. else:
  222. self.tokens.append(TokenDef__Regexp(token_name, token[1:-1]))
  223. assert value not in self.token_reverse, value
  224. self.token_reverse[value] = token_name
  225. return Token('TOKEN', token_name, -1)
  226. class TokenValue(object):
  227. def __init__(self, value):
  228. self.value = value
  229. class TokenValue__Str(TokenValue):
  230. def to_regexp(self):
  231. return re.escape(self.value)
  232. class TokenValue__Regexp(TokenValue):
  233. def to_regexp(self):
  234. return self.value
  235. class TokenTreeToRegexp(Transformer):
  236. def tokenvalue(self, tv):
  237. tv ,= tv
  238. value = tv.value[1:-1]
  239. if r'\u' in value:
  240. # XXX for now, you can't mix unicode escaping and unicode characters at the same token
  241. value = unicode_escape(value)[0]
  242. if tv.type == 'REGEXP':
  243. return TokenValue__Regexp(value)
  244. elif tv.type == 'STRING':
  245. return TokenValue__Str(value)
  246. assert False
  247. def expansion(self, items):
  248. if len(items) == 1:
  249. return items[0]
  250. return TokenValue__Regexp(''.join(i.to_regexp() for i in items))
  251. def expansions(self, exps):
  252. if len(exps) == 1:
  253. return exps[0]
  254. return TokenValue__Regexp('%s' % ('|'.join(i.to_regexp() for i in exps)))
  255. def range(self, items):
  256. assert all(i.type=='STRING' for i in items)
  257. items = [i[1:-1] for i in items]
  258. start, end = items
  259. assert len(start) == len(end) == 1, (start, end)
  260. return TokenValue__Regexp('[%s-%s]' % (start, end))
  261. def expr(self, args):
  262. inner, op = args
  263. return TokenValue__Regexp('(?:%s)%s' % (inner.to_regexp(), op))
  264. class Grammar:
  265. def __init__(self, ruledefs, tokendefs, extra):
  266. self.tokendefs = tokendefs
  267. self.ruledefs = ruledefs
  268. self.extra = extra
  269. def compile(self, lexer=False):
  270. assert lexer
  271. tokendefs = [(name.value, t) for name, t in self.tokendefs]
  272. ignore = []
  273. for i, t in enumerate(self.extra['ignore']):
  274. name = '__IGNORE_%d'%i
  275. tokendefs.append((name, t))
  276. ignore.append(name)
  277. self.extra['ignore'] = ignore
  278. # =================
  279. # Compile Tokens
  280. # =================
  281. token_to_regexp = TokenTreeToRegexp()
  282. token_dict = dict(tokendefs)
  283. assert len(token_dict) == len(tokendefs), "Same name defined twice?"
  284. # Resolve token assignments
  285. while True:
  286. changed = False
  287. for name, token_tree in tokendefs:
  288. for exp in chain(token_tree.find_data('expansion'), token_tree.find_data('expr')):
  289. for i, item in enumerate(exp.children):
  290. if isinstance(item, Token):
  291. assert item.type != 'RULE', "Rules aren't allowed inside tokens"
  292. if item.type == 'TOKEN':
  293. exp.children[i] = token_dict[item]
  294. changed = True
  295. if not changed:
  296. break
  297. # Convert tokens to strings/regexps
  298. tokens = []
  299. for name, token_tree in tokendefs:
  300. regexp = token_to_regexp.transform(token_tree)
  301. if isinstance(regexp, TokenValue__Str):
  302. tokendef = TokenDef__Str(name, regexp.value)
  303. else:
  304. tokendef = TokenDef__Regexp(name, regexp.to_regexp())
  305. tokens.append(tokendef)
  306. # Resolve regexp assignments of the form /..${X}../
  307. # Not sure this is even important, since you can express most regexps with EBNF
  308. # TODO a nicer implementation of this
  309. token_dict = {td.name: td.to_regexp() for td in tokens}
  310. while True:
  311. changed = False
  312. for t in tokens:
  313. if isinstance(t, TokenDef__Regexp):
  314. sp = re.split(r'(\$\{%s})' % TOKENS['TOKEN'], t.value)
  315. if sp:
  316. value = ''.join(token_dict[x[2:-1]] if x.startswith('${') and x.endswith('}') else x
  317. for x in sp)
  318. if value != t.value:
  319. t.value = value
  320. changed = True
  321. if not changed:
  322. break
  323. # =================
  324. # Compile Rules
  325. # =================
  326. extract_anon = ExtractAnonTokens(tokens, set(token_dict))
  327. ebnf_to_bnf = EBNF_to_BNF()
  328. simplify_rule = SimplifyRule_Visitor()
  329. rule_tree_to_text = RuleTreeToText()
  330. rules = {}
  331. for name, rule_tree in self.ruledefs:
  332. assert name not in rules
  333. tree = extract_anon.transform(rule_tree) # Adds to tokens
  334. rules[name] = ebnf_to_bnf.transform(tree)
  335. dict_update_safe(rules, ebnf_to_bnf.new_rules)
  336. for r in rules.values():
  337. simplify_rule.visit(r)
  338. rules = {origin: rule_tree_to_text.transform(tree) for origin, tree in rules.items()}
  339. return tokens, rules, self.extra
  340. class GrammarRule:
  341. def __init__(self, name, expansions):
  342. self.keep_all_tokens = name.startswith('!')
  343. name = name.lstrip('!')
  344. self.expand1 = name.startswith('?')
  345. name = name.lstrip('?')
  346. self.name = name
  347. self.expansions = expansions
  348. class GrammarLoader:
  349. def __init__(self):
  350. tokens = [TokenDef__Regexp(name, value) for name, value in TOKENS.items()]
  351. d = {r: [(x.split(), None) for x in xs] for r, xs in RULES.items()}
  352. rules, callback = ParseTreeBuilder(T).create_tree_builder(d, None)
  353. lexer_conf = LexerConf(tokens, ['WS', 'COMMENT'], None)
  354. parser_conf = ParserConf(rules, callback, 'start')
  355. self.parser = LALR(lexer_conf, parser_conf)
  356. self.simplify_tree = SimplifyTree()
  357. def load_grammar(self, grammar_text):
  358. try:
  359. tree = self.simplify_tree.transform( self.parser.parse(grammar_text+'\n') )
  360. except UnexpectedInput as e:
  361. raise GrammarError("Unexpected input %r at line %d column %d" % (e.context, e.line, e.column))
  362. except UnexpectedToken as e:
  363. if '_COLON' in e.expected:
  364. raise GrammarError("Missing colon at line %s column %s" % (e.line, e.column))
  365. elif 'tokenvalue' in e.expected:
  366. raise GrammarError("Expecting a value at line %s column %s" % (e.line, e.column))
  367. elif e.expected == ['_OR']:
  368. raise GrammarError("Newline without starting a new option (Expecting '|') at line %s column %s" % (e.line, e.column))
  369. raise
  370. # Extract grammar items
  371. token_defs = [c.children for c in tree.children if c.data=='token']
  372. rule_defs = [c.children for c in tree.children if c.data=='rule']
  373. statements = [c.children for c in tree.children if c.data=='statement']
  374. assert len(token_defs) + len(rule_defs) + len(statements) == len(tree.children)
  375. # Verify correctness
  376. token_names = set()
  377. for name, _ in token_defs:
  378. if name.startswith('__'):
  379. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  380. if name in token_names:
  381. raise GrammarError("Token '%s' defined more than once" % name)
  382. token_names.add(name)
  383. rules = [GrammarRule(name, x) for name, x in rule_defs]
  384. rule_names = set()
  385. for r in rules:
  386. if r.name.startswith('__'):
  387. raise GrammarError('Names starting with double-underscore are reserved (Error at %s)' % name)
  388. if r.name in rule_names:
  389. raise GrammarError("Token '%s' defined more than once" % name)
  390. rule_names.add(r.name)
  391. for r in rules:
  392. used_symbols = {t for x in r.expansions.find_data('expansion')
  393. for t in x.scan_values(lambda t: t.type in ('RULE', 'TOKEN'))}
  394. for sym in used_symbols:
  395. if is_terminal(sym):
  396. if sym not in token_names:
  397. raise GrammarError("Token '%s' used but not defined (in rule %s)" % (sym, r.name))
  398. else:
  399. if sym not in rule_names:
  400. raise GrammarError("Rule '%s' used but not defined (in rule %s)" % (sym, r.name))
  401. ignore = []
  402. for command, expansions in statements:
  403. if command == 'ignore':
  404. ignore.append(expansions)
  405. else:
  406. assert False, command
  407. return Grammar(rule_defs, token_defs, {'ignore': ignore})
  408. load_grammar = GrammarLoader().load_grammar