This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

183 lines
5.1 KiB

  1. ###{standalone
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Generates a stand-alone LALR(1) parser with a standard lexer
  7. #
  8. # Git: https://github.com/erezsh/lark
  9. # Author: Erez Shinan (erezshin@gmail.com)
  10. #
  11. #
  12. # >>> LICENSE
  13. #
  14. # This tool and its generated code use a separate license from Lark,
  15. # and are subject to the terms of the Mozilla Public License, v. 2.0.
  16. # If a copy of the MPL was not distributed with this
  17. # file, You can obtain one at https://mozilla.org/MPL/2.0/.
  18. #
  19. # If you wish to purchase a commercial license for this tool and its
  20. # generated code, you may contact me via email or otherwise.
  21. #
  22. # If MPL2 is incompatible with your free or open-source project,
  23. # contact me and we'll work it out.
  24. #
  25. #
  26. from abc import ABC, abstractmethod
  27. ###}
  28. import sys
  29. import token, tokenize
  30. import os
  31. from os import path
  32. from collections import defaultdict
  33. from functools import partial
  34. from argparse import ArgumentParser
  35. import lark
  36. from lark.tools import lalr_argparser, build_lalr, make_warnings_comments
  37. from lark.grammar import Rule
  38. from lark.lexer import TerminalDef
  39. _dir = path.dirname(__file__)
  40. _larkdir = path.join(_dir, path.pardir)
  41. EXTRACT_STANDALONE_FILES = [
  42. 'tools/standalone.py',
  43. 'exceptions.py',
  44. 'utils.py',
  45. 'tree.py',
  46. 'visitors.py',
  47. 'grammar.py',
  48. 'lexer.py',
  49. 'common.py',
  50. 'parse_tree_builder.py',
  51. 'parsers/lalr_parser.py',
  52. 'parsers/lalr_analysis.py',
  53. 'parser_frontends.py',
  54. 'lark.py',
  55. 'indenter.py',
  56. ]
  57. def extract_sections(lines):
  58. section = None
  59. text = []
  60. sections = defaultdict(list)
  61. for l in lines:
  62. if l.startswith('###'):
  63. if l[3] == '{':
  64. section = l[4:].strip()
  65. elif l[3] == '}':
  66. sections[section] += text
  67. section = None
  68. text = []
  69. else:
  70. raise ValueError(l)
  71. elif section:
  72. text.append(l)
  73. return {name:''.join(text) for name, text in sections.items()}
  74. def strip_docstrings(line_gen):
  75. """ Strip comments and docstrings from a file.
  76. Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings
  77. """
  78. res = []
  79. prev_toktype = token.INDENT
  80. last_lineno = -1
  81. last_col = 0
  82. tokgen = tokenize.generate_tokens(line_gen)
  83. for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
  84. if slineno > last_lineno:
  85. last_col = 0
  86. if scol > last_col:
  87. res.append(" " * (scol - last_col))
  88. if toktype == token.STRING and prev_toktype == token.INDENT:
  89. # Docstring
  90. res.append("#--")
  91. elif toktype == tokenize.COMMENT:
  92. # Comment
  93. res.append("##\n")
  94. else:
  95. res.append(ttext)
  96. prev_toktype = toktype
  97. last_col = ecol
  98. last_lineno = elineno
  99. return ''.join(res)
  100. def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False):
  101. if output is None:
  102. output = partial(print, file=out)
  103. import pickle, zlib, base64
  104. def compressed_output(obj):
  105. s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
  106. c = zlib.compress(s)
  107. output(repr(base64.b64encode(c)))
  108. def output_decompress(name):
  109. output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals())
  110. output('# The file was automatically generated by Lark v%s' % lark.__version__)
  111. output('__version__ = "%s"' % lark.__version__)
  112. output()
  113. for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES):
  114. with open(os.path.join(_larkdir, pyfile)) as f:
  115. code = extract_sections(f)['standalone']
  116. if i: # if not this file
  117. code = strip_docstrings(partial(next, iter(code.splitlines(True))))
  118. output(code)
  119. data, m = lark_inst.memo_serialize([TerminalDef, Rule])
  120. output('import pickle, zlib, base64')
  121. if compress:
  122. output('DATA = (')
  123. compressed_output(data)
  124. output(')')
  125. output_decompress('DATA')
  126. output('MEMO = (')
  127. compressed_output(m)
  128. output(')')
  129. output_decompress('MEMO')
  130. else:
  131. output('DATA = (')
  132. output(data)
  133. output(')')
  134. output('MEMO = (')
  135. output(m)
  136. output(')')
  137. output('Shift = 0')
  138. output('Reduce = 1')
  139. output("def Lark_StandAlone(**kwargs):")
  140. output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)")
  141. def main():
  142. make_warnings_comments()
  143. parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool",
  144. parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options')
  145. parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression")
  146. if len(sys.argv)==1:
  147. parser.print_help(sys.stderr)
  148. sys.exit(1)
  149. ns = parser.parse_args()
  150. lark_inst, out = build_lalr(ns)
  151. gen_standalone(lark_inst, out=out, compress=ns.compress)
  152. if __name__ == '__main__':
  153. main()