This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

185 lines
5.5 KiB

  1. ###{standalone
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Git: https://github.com/erezsh/lark
  7. # Author: Erez Shinan (erezshin@gmail.com)
  8. #
  9. #
  10. # >>> LICENSE
  11. #
  12. # This tool and its generated code use a separate license from Lark.
  13. #
  14. # It is licensed under GPLv2 or above.
  15. #
  16. # If you wish to purchase a commercial license for this tool and its
  17. # generated code, contact me via email.
  18. #
  19. # This program is free software: you can redistribute it and/or modify
  20. # it under the terms of the GNU General Public License as published by
  21. # the Free Software Foundation, either version 2 of the License, or
  22. # (at your option) any later version.
  23. #
  24. # This program is distributed in the hope that it will be useful,
  25. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. # GNU General Public License for more details.
  28. #
  29. # See <http://www.gnu.org/licenses/>.
  30. #
  31. #
  32. ###}
  33. import codecs
  34. import sys
  35. import os
  36. from pprint import pprint
  37. from os import path
  38. from collections import defaultdict
  39. import lark
  40. from lark import Lark
  41. from ..grammar import Rule
  42. __dir__ = path.dirname(__file__)
  43. __larkdir__ = path.join(__dir__, path.pardir)
  44. EXTRACT_STANDALONE_FILES = [
  45. 'tools/standalone.py',
  46. 'utils.py',
  47. 'common.py',
  48. 'tree.py',
  49. 'lexer.py',
  50. 'parse_tree_builder.py',
  51. 'parsers/lalr_parser.py',
  52. ]
  53. def extract_sections(lines):
  54. section = None
  55. text = []
  56. sections = defaultdict(list)
  57. for l in lines:
  58. if l.startswith('###'):
  59. if l[3] == '{':
  60. section = l[4:].strip()
  61. elif l[3] == '}':
  62. sections[section] += text
  63. section = None
  64. text = []
  65. else:
  66. raise ValueError(l)
  67. elif section:
  68. text.append(l)
  69. return {name:''.join(text) for name, text in sections.items()}
  70. class LexerAtoms:
  71. def __init__(self, lexer):
  72. assert not lexer.callback
  73. self.mres = [(p.pattern,d) for p,d in lexer.mres]
  74. self.newline_types = lexer.newline_types
  75. self.ignore_types = lexer.ignore_types
  76. def print_python(self):
  77. print('import re')
  78. print('MRES = (')
  79. pprint(self.mres)
  80. print(')')
  81. print('NEWLINE_TYPES = %s' % self.newline_types)
  82. print('IGNORE_TYPES = %s' % self.ignore_types)
  83. print('class LexerRegexps: pass')
  84. print('lexer_regexps = LexerRegexps()')
  85. print('lexer_regexps.mres = [(re.compile(p), d) for p, d in MRES]')
  86. print('lexer_regexps.callback = {}')
  87. print('lexer = _Lex(lexer_regexps)')
  88. print('def lex(stream):')
  89. print(' return lexer.lex(stream, NEWLINE_TYPES, IGNORE_TYPES)')
  90. class GetRule:
  91. def __init__(self, rule_id):
  92. self.rule_id = rule_id
  93. def __repr__(self):
  94. return 'RULE_ID[%d]' % self.rule_id
  95. def get_rule_ids(x):
  96. if isinstance(x, (tuple, list)):
  97. return type(x)(map(get_rule_ids, x))
  98. elif isinstance(x, dict):
  99. return {get_rule_ids(k):get_rule_ids(v) for k, v in x.items()}
  100. elif isinstance(x, Rule):
  101. return GetRule(id(x))
  102. return x
  103. class ParserAtoms:
  104. def __init__(self, parser):
  105. self.parse_table = parser.analysis.parse_table
  106. def print_python(self):
  107. print('class ParseTable: pass')
  108. print('parse_table = ParseTable()')
  109. print('parse_table.states = (')
  110. pprint(get_rule_ids(self.parse_table.states))
  111. print(')')
  112. print('parse_table.start_state = %s' % self.parse_table.start_state)
  113. print('parse_table.end_state = %s' % self.parse_table.end_state)
  114. print('class Lark_StandAlone:')
  115. print(' def __init__(self, transformer=None):')
  116. print(' callback = parse_tree_builder.create_callback(transformer=transformer)')
  117. print(' callbacks = {rule: getattr(callback, rule.alias or rule.origin, None) for rule in RULES}')
  118. print(' self.parser = _Parser(parse_table, callbacks)')
  119. print(' def parse(self, stream):')
  120. print(' return self.parser.parse(lex(stream))')
  121. class TreeBuilderAtoms:
  122. def __init__(self, lark):
  123. self.rules = lark.rules
  124. self.ptb = lark._parse_tree_builder
  125. def print_python(self):
  126. print('RULE_ID = {')
  127. for r in self.rules:
  128. print(' %d: Rule(%r, %r, %r, %r),' % (id(r), r.origin, r.expansion, self.ptb.user_aliases[r], r.options ))
  129. print('}')
  130. print('RULES = list(RULE_ID.values())')
  131. print('parse_tree_builder = ParseTreeBuilder(RULES, Tree)')
  132. def main(fn):
  133. with codecs.open(fn, encoding='utf8') as f:
  134. lark_inst = Lark(f, parser="lalr")
  135. lexer_atoms = LexerAtoms(lark_inst.parser.lexer)
  136. parser_atoms = ParserAtoms(lark_inst.parser.parser)
  137. tree_builder_atoms = TreeBuilderAtoms(lark_inst)
  138. print('# Generated by Lark v%s' % lark.__version__)
  139. for pyfile in EXTRACT_STANDALONE_FILES:
  140. print (extract_sections(open(os.path.join(__larkdir__, pyfile)))['standalone'])
  141. print(open(os.path.join(__larkdir__, 'grammar.py')).read())
  142. print('Shift = 0')
  143. print('Reduce = 1')
  144. lexer_atoms.print_python()
  145. tree_builder_atoms.print_python()
  146. parser_atoms.print_python()
  147. # print('print(parser.parse(lex("1+2")).pretty())')
  148. if __name__ == '__main__':
  149. if len(sys.argv) < 2:
  150. print("Generates a stand-alone lalr parser")
  151. print("Usage: %s <grammar_path>" % sys.argv[0])
  152. sys.exit(1)
  153. fn ,= sys.argv[1:]
  154. main(fn)