This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

252 lines
8.6 KiB

  1. ###{standalone
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Generates a stand-alone LALR(1) parser with a standard lexer
  7. #
  8. # Git: https://github.com/erezsh/lark
  9. # Author: Erez Shinan (erezshin@gmail.com)
  10. #
  11. #
  12. # >>> LICENSE
  13. #
  14. # This tool and its generated code use a separate license from Lark.
  15. #
  16. # It is licensed under GPLv2 or above.
  17. #
  18. # If you wish to purchase a commercial license for this tool and its
  19. # generated code, contact me via email.
  20. #
  21. # If GPL is incompatible with your free or open-source project,
  22. # contact me and we'll work it out (for free).
  23. #
  24. # This program is free software: you can redistribute it and/or modify
  25. # it under the terms of the GNU General Public License as published by
  26. # the Free Software Foundation, either version 2 of the License, or
  27. # (at your option) any later version.
  28. #
  29. # This program is distributed in the hope that it will be useful,
  30. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  31. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  32. # GNU General Public License for more details.
  33. #
  34. # See <http://www.gnu.org/licenses/>.
  35. #
  36. #
  37. ###}
  38. import codecs
  39. import sys
  40. import os
  41. from pprint import pprint
  42. from os import path
  43. from collections import defaultdict
  44. import lark
  45. from lark import Lark
  46. from lark.parsers.lalr_analysis import Shift, Reduce
  47. from ..grammar import Rule
  48. _dir = path.dirname(__file__)
  49. _larkdir = path.join(_dir, path.pardir)
  50. EXTRACT_STANDALONE_FILES = [
  51. 'tools/standalone.py',
  52. 'exceptions.py',
  53. 'utils.py',
  54. 'tree.py',
  55. 'visitors.py',
  56. 'indenter.py',
  57. 'lexer.py',
  58. 'parse_tree_builder.py',
  59. 'parsers/lalr_parser.py',
  60. ]
  61. def extract_sections(lines):
  62. section = None
  63. text = []
  64. sections = defaultdict(list)
  65. for l in lines:
  66. if l.startswith('###'):
  67. if l[3] == '{':
  68. section = l[4:].strip()
  69. elif l[3] == '}':
  70. sections[section] += text
  71. section = None
  72. text = []
  73. else:
  74. raise ValueError(l)
  75. elif section:
  76. text.append(l)
  77. return {name:''.join(text) for name, text in sections.items()}
  78. def _prepare_mres(mres):
  79. return [(p.pattern,{i: t for i, t in d.items()}) for p,d in mres]
  80. class TraditionalLexerAtoms:
  81. def __init__(self, lexer):
  82. self.mres = _prepare_mres(lexer.mres)
  83. self.newline_types = lexer.newline_types
  84. self.ignore_types = lexer.ignore_types
  85. self.callback = {name:_prepare_mres(c.mres)
  86. for name, c in lexer.callback.items()}
  87. def print_python(self):
  88. print('import re')
  89. print('class LexerRegexps: pass')
  90. print('NEWLINE_TYPES = %s' % self.newline_types)
  91. print('IGNORE_TYPES = %s' % self.ignore_types)
  92. self._print_python('lexer')
  93. def _print_python(self, var_name):
  94. print('MRES = (')
  95. pprint(self.mres)
  96. print(')')
  97. print('LEXER_CALLBACK = (')
  98. pprint(self.callback)
  99. print(')')
  100. print('lexer_regexps = LexerRegexps()')
  101. print('lexer_regexps.mres = [(re.compile(p), d) for p, d in MRES]')
  102. print('lexer_regexps.callback = {n: UnlessCallback([(re.compile(p), d) for p, d in mres])')
  103. print(' for n, mres in LEXER_CALLBACK.items()}')
  104. print('%s = (lexer_regexps)' % var_name)
  105. class ContextualLexerAtoms:
  106. def __init__(self, lexer):
  107. self.lexer_atoms = {state: TraditionalLexerAtoms(lexer) for state, lexer in lexer.lexers.items()}
  108. self.root_lexer_atoms = TraditionalLexerAtoms(lexer.root_lexer)
  109. def print_python(self):
  110. print('import re')
  111. print('class LexerRegexps: pass')
  112. print('NEWLINE_TYPES = %s' % self.root_lexer_atoms.newline_types)
  113. print('IGNORE_TYPES = %s' % self.root_lexer_atoms.ignore_types)
  114. print('LEXERS = {}')
  115. for state, lexer_atoms in self.lexer_atoms.items():
  116. lexer_atoms._print_python('LEXERS[%d]' % state)
  117. print('class ContextualLexer:')
  118. print(' def __init__(self):')
  119. print(' self.lexers = LEXERS')
  120. print(' self.set_parser_state(None)')
  121. print(' def set_parser_state(self, state):')
  122. print(' self.parser_state = state')
  123. print(' def lex(self, stream):')
  124. print(' newline_types = NEWLINE_TYPES')
  125. print(' ignore_types = IGNORE_TYPES')
  126. print(' lexers = LEXERS')
  127. print(' l = _Lex(lexers[self.parser_state], self.parser_state)')
  128. print(' for x in l.lex(stream, newline_types, ignore_types):')
  129. print(' yield x')
  130. print(' l.lexer = lexers[self.parser_state]')
  131. print(' l.state = self.parser_state')
  132. print('CON_LEXER = ContextualLexer()')
  133. print('def lex(stream):')
  134. print(' return CON_LEXER.lex(stream)')
  135. class GetRule:
  136. def __init__(self, rule_id):
  137. self.rule_id = rule_id
  138. def __repr__(self):
  139. return 'RULES[%d]' % self.rule_id
  140. rule_ids = {}
  141. token_types = {}
  142. def _get_token_type(token_type):
  143. if token_type not in token_types:
  144. token_types[token_type] = len(token_types)
  145. return token_types[token_type]
  146. class ParserAtoms:
  147. def __init__(self, parser):
  148. self.parse_table = parser._parse_table
  149. def print_python(self):
  150. print('class ParseTable: pass')
  151. print('parse_table = ParseTable()')
  152. print('STATES = {')
  153. for state, actions in self.parse_table.states.items():
  154. print(' %r: %r,' % (state, {_get_token_type(token): ((1, rule_ids[arg]) if action is Reduce else (0, arg))
  155. for token, (action, arg) in actions.items()}))
  156. print('}')
  157. print('TOKEN_TYPES = (')
  158. pprint({v:k for k, v in token_types.items()})
  159. print(')')
  160. print('parse_table.states = {s: {TOKEN_TYPES[t]: (a, RULES[x] if a is Reduce else x) for t, (a, x) in acts.items()}')
  161. print(' for s, acts in STATES.items()}')
  162. print('parse_table.start_state = %s' % self.parse_table.start_state)
  163. print('parse_table.end_state = %s' % self.parse_table.end_state)
  164. print('class Lark_StandAlone:')
  165. print(' def __init__(self, transformer=None, postlex=None):')
  166. print(' callback = parse_tree_builder.create_callback(transformer=transformer)')
  167. print(' callbacks = {rule: getattr(callback, rule.alias or rule.origin, None) for rule in RULES.values()}')
  168. print(' self.parser = _Parser(parse_table, callbacks)')
  169. print(' self.postlex = postlex')
  170. print(' def parse(self, stream):')
  171. print(' tokens = lex(stream)')
  172. print(' sps = CON_LEXER.set_parser_state')
  173. print(' if self.postlex: tokens = self.postlex.process(tokens)')
  174. print(' return self.parser.parse(tokens, sps)')
  175. class TreeBuilderAtoms:
  176. def __init__(self, lark):
  177. self.rules = lark.rules
  178. self.ptb = lark._parse_tree_builder
  179. def print_python(self):
  180. # print('class InlineTransformer: pass')
  181. print('RULES = {')
  182. for i, r in enumerate(self.rules):
  183. rule_ids[r] = i
  184. print(' %d: Rule(%r, [%s], alias=%r, options=%r),' % (i, r.origin, ', '.join(s.fullrepr for s in r.expansion), self.ptb.user_aliases[r], r.options ))
  185. print('}')
  186. print('parse_tree_builder = ParseTreeBuilder(RULES.values(), Tree)')
  187. def main(fobj, start):
  188. lark_inst = Lark(fobj, parser="lalr", lexer="contextual", start=start)
  189. lexer_atoms = ContextualLexerAtoms(lark_inst.parser.lexer)
  190. parser_atoms = ParserAtoms(lark_inst.parser.parser)
  191. tree_builder_atoms = TreeBuilderAtoms(lark_inst)
  192. print('# The file was automatically generated by Lark v%s' % lark.__version__)
  193. for pyfile in EXTRACT_STANDALONE_FILES:
  194. with open(os.path.join(_larkdir, pyfile)) as f:
  195. print (extract_sections(f)['standalone'])
  196. with open(os.path.join(_larkdir, 'grammar.py')) as grammar_py:
  197. print(grammar_py.read())
  198. print('Shift = 0')
  199. print('Reduce = 1')
  200. lexer_atoms.print_python()
  201. tree_builder_atoms.print_python()
  202. parser_atoms.print_python()
  203. if __name__ == '__main__':
  204. if len(sys.argv) < 2:
  205. print("Lark Stand-alone Generator Tool")
  206. print("Usage: python -m lark.tools.standalone <grammar-file> [<start>]")
  207. sys.exit(1)
  208. if len(sys.argv) == 3:
  209. fn, start = sys.argv[1:]
  210. elif len(sys.argv) == 2:
  211. fn, start = sys.argv[1], 'start'
  212. else:
  213. assert False, sys.argv
  214. with codecs.open(fn, encoding='utf8') as f:
  215. main(f, start)