This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

190 lines
5.3 KiB

  1. ###{standalone
  2. #
  3. #
  4. # Lark Stand-alone Generator Tool
  5. # ----------------------------------
  6. # Generates a stand-alone LALR(1) parser
  7. #
  8. # Git: https://github.com/erezsh/lark
  9. # Author: Erez Shinan (erezshin@gmail.com)
  10. #
  11. #
  12. # >>> LICENSE
  13. #
  14. # This tool and its generated code use a separate license from Lark,
  15. # and are subject to the terms of the Mozilla Public License, v. 2.0.
  16. # If a copy of the MPL was not distributed with this
  17. # file, You can obtain one at https://mozilla.org/MPL/2.0/.
  18. #
  19. # If you wish to purchase a commercial license for this tool and its
  20. # generated code, you may contact me via email or otherwise.
  21. #
  22. # If MPL2 is incompatible with your free or open-source project,
  23. # contact me and we'll work it out.
  24. #
  25. #
  26. from abc import ABC, abstractmethod
  27. from collections.abc import Sequence
  28. from types import ModuleType
  29. from typing import (
  30. TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any,
  31. Union, Iterable, IO, TYPE_CHECKING,
  32. Pattern as REPattern, ClassVar, Set,
  33. )
  34. ###}
  35. import sys
  36. import token, tokenize
  37. import os
  38. from os import path
  39. from collections import defaultdict
  40. from functools import partial
  41. from argparse import ArgumentParser
  42. import lark
  43. from lark.tools import lalr_argparser, build_lalr, make_warnings_comments
  44. from lark.grammar import Rule
  45. from lark.lexer import TerminalDef
  46. _dir = path.dirname(__file__)
  47. _larkdir = path.join(_dir, path.pardir)
  48. EXTRACT_STANDALONE_FILES = [
  49. 'tools/standalone.py',
  50. 'exceptions.py',
  51. 'utils.py',
  52. 'tree.py',
  53. 'visitors.py',
  54. 'grammar.py',
  55. 'lexer.py',
  56. 'common.py',
  57. 'parse_tree_builder.py',
  58. 'parsers/lalr_parser.py',
  59. 'parsers/lalr_analysis.py',
  60. 'parser_frontends.py',
  61. 'lark.py',
  62. 'indenter.py',
  63. ]
  64. def extract_sections(lines):
  65. section = None
  66. text = []
  67. sections = defaultdict(list)
  68. for l in lines:
  69. if l.startswith('###'):
  70. if l[3] == '{':
  71. section = l[4:].strip()
  72. elif l[3] == '}':
  73. sections[section] += text
  74. section = None
  75. text = []
  76. else:
  77. raise ValueError(l)
  78. elif section:
  79. text.append(l)
  80. return {name:''.join(text) for name, text in sections.items()}
  81. def strip_docstrings(line_gen):
  82. """ Strip comments and docstrings from a file.
  83. Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings
  84. """
  85. res = []
  86. prev_toktype = token.INDENT
  87. last_lineno = -1
  88. last_col = 0
  89. tokgen = tokenize.generate_tokens(line_gen)
  90. for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
  91. if slineno > last_lineno:
  92. last_col = 0
  93. if scol > last_col:
  94. res.append(" " * (scol - last_col))
  95. if toktype == token.STRING and prev_toktype == token.INDENT:
  96. # Docstring
  97. res.append("#--")
  98. elif toktype == tokenize.COMMENT:
  99. # Comment
  100. res.append("##\n")
  101. else:
  102. res.append(ttext)
  103. prev_toktype = toktype
  104. last_col = ecol
  105. last_lineno = elineno
  106. return ''.join(res)
  107. def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False):
  108. if output is None:
  109. output = partial(print, file=out)
  110. import pickle, zlib, base64
  111. def compressed_output(obj):
  112. s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
  113. c = zlib.compress(s)
  114. output(repr(base64.b64encode(c)))
  115. def output_decompress(name):
  116. output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals())
  117. output('# The file was automatically generated by Lark v%s' % lark.__version__)
  118. output('__version__ = "%s"' % lark.__version__)
  119. output()
  120. for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES):
  121. with open(os.path.join(_larkdir, pyfile)) as f:
  122. code = extract_sections(f)['standalone']
  123. if i: # if not this file
  124. code = strip_docstrings(partial(next, iter(code.splitlines(True))))
  125. output(code)
  126. data, m = lark_inst.memo_serialize([TerminalDef, Rule])
  127. output('import pickle, zlib, base64')
  128. if compress:
  129. output('DATA = (')
  130. compressed_output(data)
  131. output(')')
  132. output_decompress('DATA')
  133. output('MEMO = (')
  134. compressed_output(m)
  135. output(')')
  136. output_decompress('MEMO')
  137. else:
  138. output('DATA = (')
  139. output(data)
  140. output(')')
  141. output('MEMO = (')
  142. output(m)
  143. output(')')
  144. output('Shift = 0')
  145. output('Reduce = 1')
  146. output("def Lark_StandAlone(**kwargs):")
  147. output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)")
  148. def main():
  149. make_warnings_comments()
  150. parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool",
  151. parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options')
  152. parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression")
  153. if len(sys.argv)==1:
  154. parser.print_help(sys.stderr)
  155. sys.exit(1)
  156. ns = parser.parse_args()
  157. lark_inst, out = build_lalr(ns)
  158. gen_standalone(lark_inst, out=out, compress=ns.compress)
  159. if __name__ == '__main__':
  160. main()