This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2486 lines
85 KiB

  1. # -*- coding: utf-8 -*-
  2. from __future__ import absolute_import
  3. import re
  4. import unittest
  5. import logging
  6. import os
  7. import sys
  8. from copy import copy, deepcopy
  9. from lark.utils import Py36, isascii
  10. from lark import Token
  11. from lark.load_grammar import FromPackageLoader
  12. try:
  13. from cStringIO import StringIO as cStringIO
  14. except ImportError:
  15. # Available only in Python 2.x, 3.x only has io.StringIO from below
  16. cStringIO = None
  17. from io import (
  18. StringIO as uStringIO,
  19. BytesIO,
  20. open,
  21. )
  22. try:
  23. import regex
  24. except ImportError:
  25. regex = None
  26. import lark
  27. from lark import logger
  28. from lark.lark import Lark
  29. from lark.exceptions import GrammarError, ParseError, UnexpectedToken, UnexpectedInput, UnexpectedCharacters
  30. from lark.tree import Tree
  31. from lark.visitors import Transformer, Transformer_InPlace, v_args
  32. from lark.grammar import Rule
  33. from lark.lexer import TerminalDef, Lexer, TraditionalLexer
  34. from lark.indenter import Indenter
  35. __all__ = ['TestParsers']
  36. __path__ = os.path.dirname(__file__)
  37. def _read(n, *args):
  38. with open(os.path.join(__path__, n), *args) as f:
  39. return f.read()
  40. class TestParsers(unittest.TestCase):
  41. def test_big_list(self):
  42. Lark(r"""
  43. start: {}
  44. """.format(
  45. "|".join(['"%s"'%i for i in range(250)])
  46. ))
  47. def test_same_ast(self):
  48. "Tests that Earley and LALR parsers produce equal trees"
  49. g = Lark(r"""start: "(" name_list ("," "*" NAME)? ")"
  50. name_list: NAME | name_list "," NAME
  51. NAME: /\w+/ """, parser='lalr')
  52. l = g.parse('(a,b,c,*x)')
  53. g = Lark(r"""start: "(" name_list ("," "*" NAME)? ")"
  54. name_list: NAME | name_list "," NAME
  55. NAME: /\w/+ """)
  56. l2 = g.parse('(a,b,c,*x)')
  57. assert l == l2, '%s != %s' % (l.pretty(), l2.pretty())
  58. def test_infinite_recurse(self):
  59. g = """start: a
  60. a: a | "a"
  61. """
  62. self.assertRaises(GrammarError, Lark, g, parser='lalr')
  63. # TODO: should it? shouldn't it?
  64. # l = Lark(g, parser='earley', lexer='dynamic')
  65. # self.assertRaises(ParseError, l.parse, 'a')
  66. def test_propagate_positions(self):
  67. g = Lark("""start: a
  68. a: "a"
  69. """, propagate_positions=True)
  70. r = g.parse('a')
  71. self.assertEqual( r.children[0].meta.line, 1 )
  72. g = Lark("""start: x
  73. x: a
  74. a: "a"
  75. """, propagate_positions=True)
  76. r = g.parse('a')
  77. self.assertEqual( r.children[0].meta.line, 1 )
  78. def test_expand1(self):
  79. g = Lark("""start: a
  80. ?a: b
  81. b: "x"
  82. """)
  83. r = g.parse('x')
  84. self.assertEqual( r.children[0].data, "b" )
  85. g = Lark("""start: a
  86. ?a: b -> c
  87. b: "x"
  88. """)
  89. r = g.parse('x')
  90. self.assertEqual( r.children[0].data, "c" )
  91. g = Lark("""start: a
  92. ?a: B -> c
  93. B: "x"
  94. """)
  95. self.assertEqual( r.children[0].data, "c" )
  96. g = Lark("""start: a
  97. ?a: b b -> c
  98. b: "x"
  99. """)
  100. r = g.parse('xx')
  101. self.assertEqual( r.children[0].data, "c" )
  102. def test_comment_in_rule_definition(self):
  103. g = Lark("""start: a
  104. a: "a"
  105. // A comment
  106. // Another comment
  107. | "b"
  108. // Still more
  109. c: "unrelated"
  110. """)
  111. r = g.parse('b')
  112. self.assertEqual( r.children[0].data, "a" )
  113. def test_visit_tokens(self):
  114. class T(Transformer):
  115. def a(self, children):
  116. return children[0] + "!"
  117. def A(self, tok):
  118. return tok.update(value=tok.upper())
  119. # Test regular
  120. g = """start: a
  121. a : A
  122. A: "x"
  123. """
  124. p = Lark(g, parser='lalr')
  125. r = T(False).transform(p.parse("x"))
  126. self.assertEqual( r.children, ["x!"] )
  127. r = T().transform(p.parse("x"))
  128. self.assertEqual( r.children, ["X!"] )
  129. # Test internal transformer
  130. p = Lark(g, parser='lalr', transformer=T())
  131. r = p.parse("x")
  132. self.assertEqual( r.children, ["X!"] )
  133. def test_vargs_meta(self):
  134. @v_args(meta=True)
  135. class T1(Transformer):
  136. def a(self, children, meta):
  137. assert not children
  138. return meta.line
  139. def start(self, children, meta):
  140. return children
  141. @v_args(meta=True, inline=True)
  142. class T2(Transformer):
  143. def a(self, meta):
  144. return meta.line
  145. def start(self, meta, *res):
  146. return list(res)
  147. for T in (T1, T2):
  148. for internal in [False, True]:
  149. try:
  150. g = Lark(r"""start: a+
  151. a : "x" _NL?
  152. _NL: /\n/+
  153. """, parser='lalr', transformer=T() if internal else None, propagate_positions=True)
  154. except NotImplementedError:
  155. assert internal
  156. continue
  157. res = g.parse("xx\nx\nxxx\n\n\nxx")
  158. assert not internal
  159. res = T().transform(res)
  160. self.assertEqual(res, [1, 1, 2, 3, 3, 3, 6, 6])
  161. def test_vargs_tree(self):
  162. tree = Lark('''
  163. start: a a a
  164. !a: "A"
  165. ''').parse('AAA')
  166. tree_copy = deepcopy(tree)
  167. @v_args(tree=True)
  168. class T(Transformer):
  169. def a(self, tree):
  170. return 1
  171. def start(self, tree):
  172. return tree.children
  173. res = T().transform(tree)
  174. self.assertEqual(res, [1, 1, 1])
  175. self.assertEqual(tree, tree_copy)
  176. def test_embedded_transformer(self):
  177. class T(Transformer):
  178. def a(self, children):
  179. return "<a>"
  180. def b(self, children):
  181. return "<b>"
  182. def c(self, children):
  183. return "<c>"
  184. # Test regular
  185. g = Lark("""start: a
  186. a : "x"
  187. """, parser='lalr')
  188. r = T().transform(g.parse("x"))
  189. self.assertEqual( r.children, ["<a>"] )
  190. g = Lark("""start: a
  191. a : "x"
  192. """, parser='lalr', transformer=T())
  193. r = g.parse("x")
  194. self.assertEqual( r.children, ["<a>"] )
  195. # Test Expand1
  196. g = Lark("""start: a
  197. ?a : b
  198. b : "x"
  199. """, parser='lalr')
  200. r = T().transform(g.parse("x"))
  201. self.assertEqual( r.children, ["<b>"] )
  202. g = Lark("""start: a
  203. ?a : b
  204. b : "x"
  205. """, parser='lalr', transformer=T())
  206. r = g.parse("x")
  207. self.assertEqual( r.children, ["<b>"] )
  208. # Test Expand1 -> Alias
  209. g = Lark("""start: a
  210. ?a : b b -> c
  211. b : "x"
  212. """, parser='lalr')
  213. r = T().transform(g.parse("xx"))
  214. self.assertEqual( r.children, ["<c>"] )
  215. g = Lark("""start: a
  216. ?a : b b -> c
  217. b : "x"
  218. """, parser='lalr', transformer=T())
  219. r = g.parse("xx")
  220. self.assertEqual( r.children, ["<c>"] )
  221. def test_embedded_transformer_inplace(self):
  222. @v_args(tree=True)
  223. class T1(Transformer_InPlace):
  224. def a(self, tree):
  225. assert isinstance(tree, Tree), tree
  226. tree.children.append("tested")
  227. return tree
  228. def b(self, tree):
  229. return Tree(tree.data, tree.children + ['tested2'])
  230. @v_args(tree=True)
  231. class T2(Transformer):
  232. def a(self, tree):
  233. assert isinstance(tree, Tree), tree
  234. tree.children.append("tested")
  235. return tree
  236. def b(self, tree):
  237. return Tree(tree.data, tree.children + ['tested2'])
  238. class T3(Transformer):
  239. @v_args(tree=True)
  240. def a(self, tree):
  241. assert isinstance(tree, Tree)
  242. tree.children.append("tested")
  243. return tree
  244. @v_args(tree=True)
  245. def b(self, tree):
  246. return Tree(tree.data, tree.children + ['tested2'])
  247. for t in [T1(), T2(), T3()]:
  248. for internal in [False, True]:
  249. g = Lark("""start: a b
  250. a : "x"
  251. b : "y"
  252. """, parser='lalr', transformer=t if internal else None)
  253. r = g.parse("xy")
  254. if not internal:
  255. r = t.transform(r)
  256. a, b = r.children
  257. self.assertEqual(a.children, ["tested"])
  258. self.assertEqual(b.children, ["tested2"])
  259. def test_alias(self):
  260. Lark("""start: ["a"] "b" ["c"] "e" ["f"] ["g"] ["h"] "x" -> d """)
  261. def test_backwards_custom_lexer(self):
  262. class OldCustomLexer(Lexer):
  263. def __init__(self, lexer_conf):
  264. pass
  265. def lex(self, text):
  266. yield Token('A', 'A')
  267. p = Lark("""
  268. start: A
  269. %declare A
  270. """, parser='lalr', lexer=OldCustomLexer)
  271. r = p.parse('')
  272. self.assertEqual(r, Tree('start', [Token('A', 'A')]))
  273. def _make_full_earley_test(LEXER):
  274. def _Lark(grammar, **kwargs):
  275. return Lark(grammar, lexer=LEXER, parser='earley', propagate_positions=True, **kwargs)
  276. class _TestFullEarley(unittest.TestCase):
  277. def test_anon(self):
  278. # Fails an Earley implementation without special handling for empty rules,
  279. # or re-processing of already completed rules.
  280. g = Lark(r"""start: B
  281. B: ("ab"|/[^b]/)+
  282. """, lexer=LEXER)
  283. self.assertEqual( g.parse('abc').children[0], 'abc')
  284. def test_earley(self):
  285. g = Lark("""start: A "b" c
  286. A: "a"+
  287. c: "abc"
  288. """, parser="earley", lexer=LEXER)
  289. x = g.parse('aaaababc')
  290. def test_earley2(self):
  291. grammar = """
  292. start: statement+
  293. statement: "r"
  294. | "c" /[a-z]/+
  295. %ignore " "
  296. """
  297. program = """c b r"""
  298. l = Lark(grammar, parser='earley', lexer=LEXER)
  299. l.parse(program)
  300. @unittest.skipIf(LEXER=='dynamic', "Only relevant for the dynamic_complete parser")
  301. def test_earley3(self):
  302. """Tests prioritization and disambiguation for pseudo-terminals (there should be only one result)
  303. By default, `+` should immitate regexp greedy-matching
  304. """
  305. grammar = """
  306. start: A A
  307. A: "a"+
  308. """
  309. l = Lark(grammar, parser='earley', lexer=LEXER)
  310. res = l.parse("aaa")
  311. self.assertEqual(set(res.children), {'aa', 'a'})
  312. # XXX TODO fix Earley to maintain correct order
  313. # i.e. terminals it imitate greedy search for terminals, but lazy search for rules
  314. # self.assertEqual(res.children, ['aa', 'a'])
  315. def test_earley4(self):
  316. grammar = """
  317. start: A A?
  318. A: "a"+
  319. """
  320. l = Lark(grammar, parser='earley', lexer=LEXER)
  321. res = l.parse("aaa")
  322. assert set(res.children) == {'aa', 'a'} or res.children == ['aaa']
  323. # XXX TODO fix Earley to maintain correct order
  324. # i.e. terminals it imitate greedy search for terminals, but lazy search for rules
  325. # self.assertEqual(res.children, ['aaa'])
  326. def test_earley_repeating_empty(self):
  327. # This was a sneaky bug!
  328. grammar = """
  329. !start: "a" empty empty "b"
  330. empty: empty2
  331. empty2:
  332. """
  333. parser = Lark(grammar, parser='earley', lexer=LEXER)
  334. res = parser.parse('ab')
  335. empty_tree = Tree('empty', [Tree('empty2', [])])
  336. self.assertSequenceEqual(res.children, ['a', empty_tree, empty_tree, 'b'])
  337. @unittest.skipIf(LEXER=='standard', "Requires dynamic lexer")
  338. def test_earley_explicit_ambiguity(self):
  339. # This was a sneaky bug!
  340. grammar = """
  341. start: a b | ab
  342. a: "a"
  343. b: "b"
  344. ab: "ab"
  345. """
  346. parser = Lark(grammar, parser='earley', lexer=LEXER, ambiguity='explicit')
  347. ambig_tree = parser.parse('ab')
  348. self.assertEqual( ambig_tree.data, '_ambig')
  349. self.assertEqual( len(ambig_tree.children), 2)
  350. @unittest.skipIf(LEXER=='standard', "Requires dynamic lexer")
  351. def test_ambiguity1(self):
  352. grammar = """
  353. start: cd+ "e"
  354. !cd: "c"
  355. | "d"
  356. | "cd"
  357. """
  358. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  359. ambig_tree = l.parse('cde')
  360. assert ambig_tree.data == '_ambig', ambig_tree
  361. assert len(ambig_tree.children) == 2
  362. @unittest.skipIf(LEXER=='standard', "Requires dynamic lexer")
  363. def test_ambiguity2(self):
  364. grammar = """
  365. ANY: /[a-zA-Z0-9 ]+/
  366. a.2: "A" b+
  367. b.2: "B"
  368. c: ANY
  369. start: (a|c)*
  370. """
  371. l = Lark(grammar, parser='earley', lexer=LEXER)
  372. res = l.parse('ABX')
  373. expected = Tree('start', [
  374. Tree('a', [
  375. Tree('b', [])
  376. ]),
  377. Tree('c', [
  378. 'X'
  379. ])
  380. ])
  381. self.assertEqual(res, expected)
  382. def test_ambiguous_intermediate_node(self):
  383. grammar = """
  384. start: ab bc d?
  385. !ab: "A" "B"?
  386. !bc: "B"? "C"
  387. !d: "D"
  388. """
  389. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  390. ambig_tree = l.parse("ABCD")
  391. expected = {
  392. Tree('start', [Tree('ab', ['A']), Tree('bc', ['B', 'C']), Tree('d', ['D'])]),
  393. Tree('start', [Tree('ab', ['A', 'B']), Tree('bc', ['C']), Tree('d', ['D'])])
  394. }
  395. self.assertEqual(ambig_tree.data, '_ambig')
  396. self.assertEqual(set(ambig_tree.children), expected)
  397. def test_ambiguous_symbol_and_intermediate_nodes(self):
  398. grammar = """
  399. start: ab bc cd
  400. !ab: "A" "B"?
  401. !bc: "B"? "C"?
  402. !cd: "C"? "D"
  403. """
  404. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  405. ambig_tree = l.parse("ABCD")
  406. expected = {
  407. Tree('start', [
  408. Tree('ab', ['A', 'B']),
  409. Tree('bc', ['C']),
  410. Tree('cd', ['D'])
  411. ]),
  412. Tree('start', [
  413. Tree('ab', ['A', 'B']),
  414. Tree('bc', []),
  415. Tree('cd', ['C', 'D'])
  416. ]),
  417. Tree('start', [
  418. Tree('ab', ['A']),
  419. Tree('bc', ['B', 'C']),
  420. Tree('cd', ['D'])
  421. ]),
  422. Tree('start', [
  423. Tree('ab', ['A']),
  424. Tree('bc', ['B']),
  425. Tree('cd', ['C', 'D'])
  426. ]),
  427. }
  428. self.assertEqual(ambig_tree.data, '_ambig')
  429. self.assertEqual(set(ambig_tree.children), expected)
  430. def test_nested_ambiguous_intermediate_nodes(self):
  431. grammar = """
  432. start: ab bc cd e?
  433. !ab: "A" "B"?
  434. !bc: "B"? "C"?
  435. !cd: "C"? "D"
  436. !e: "E"
  437. """
  438. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  439. ambig_tree = l.parse("ABCDE")
  440. expected = {
  441. Tree('start', [
  442. Tree('ab', ['A', 'B']),
  443. Tree('bc', ['C']),
  444. Tree('cd', ['D']),
  445. Tree('e', ['E'])
  446. ]),
  447. Tree('start', [
  448. Tree('ab', ['A']),
  449. Tree('bc', ['B', 'C']),
  450. Tree('cd', ['D']),
  451. Tree('e', ['E'])
  452. ]),
  453. Tree('start', [
  454. Tree('ab', ['A']),
  455. Tree('bc', ['B']),
  456. Tree('cd', ['C', 'D']),
  457. Tree('e', ['E'])
  458. ]),
  459. Tree('start', [
  460. Tree('ab', ['A', 'B']),
  461. Tree('bc', []),
  462. Tree('cd', ['C', 'D']),
  463. Tree('e', ['E'])
  464. ]),
  465. }
  466. self.assertEqual(ambig_tree.data, '_ambig')
  467. self.assertEqual(set(ambig_tree.children), expected)
  468. def test_nested_ambiguous_intermediate_nodes2(self):
  469. grammar = """
  470. start: ab bc cd de f
  471. !ab: "A" "B"?
  472. !bc: "B"? "C"?
  473. !cd: "C"? "D"?
  474. !de: "D"? "E"
  475. !f: "F"
  476. """
  477. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  478. ambig_tree = l.parse("ABCDEF")
  479. expected = {
  480. Tree('start', [
  481. Tree('ab', ['A', 'B']),
  482. Tree('bc', ['C']),
  483. Tree('cd', ['D']),
  484. Tree('de', ['E']),
  485. Tree('f', ['F']),
  486. ]),
  487. Tree('start', [
  488. Tree('ab', ['A']),
  489. Tree('bc', ['B', 'C']),
  490. Tree('cd', ['D']),
  491. Tree('de', ['E']),
  492. Tree('f', ['F']),
  493. ]),
  494. Tree('start', [
  495. Tree('ab', ['A']),
  496. Tree('bc', ['B']),
  497. Tree('cd', ['C', 'D']),
  498. Tree('de', ['E']),
  499. Tree('f', ['F']),
  500. ]),
  501. Tree('start', [
  502. Tree('ab', ['A']),
  503. Tree('bc', ['B']),
  504. Tree('cd', ['C']),
  505. Tree('de', ['D', 'E']),
  506. Tree('f', ['F']),
  507. ]),
  508. Tree('start', [
  509. Tree('ab', ['A', "B"]),
  510. Tree('bc', []),
  511. Tree('cd', ['C']),
  512. Tree('de', ['D', 'E']),
  513. Tree('f', ['F']),
  514. ]),
  515. Tree('start', [
  516. Tree('ab', ['A']),
  517. Tree('bc', ['B', 'C']),
  518. Tree('cd', []),
  519. Tree('de', ['D', 'E']),
  520. Tree('f', ['F']),
  521. ]),
  522. Tree('start', [
  523. Tree('ab', ['A', 'B']),
  524. Tree('bc', []),
  525. Tree('cd', ['C', 'D']),
  526. Tree('de', ['E']),
  527. Tree('f', ['F']),
  528. ]),
  529. Tree('start', [
  530. Tree('ab', ['A', 'B']),
  531. Tree('bc', ['C']),
  532. Tree('cd', []),
  533. Tree('de', ['D', 'E']),
  534. Tree('f', ['F']),
  535. ]),
  536. }
  537. self.assertEqual(ambig_tree.data, '_ambig')
  538. self.assertEqual(set(ambig_tree.children), expected)
  539. def test_ambiguous_intermediate_node_unnamed_token(self):
  540. grammar = """
  541. start: ab bc "D"
  542. !ab: "A" "B"?
  543. !bc: "B"? "C"
  544. """
  545. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  546. ambig_tree = l.parse("ABCD")
  547. expected = {
  548. Tree('start', [Tree('ab', ['A']), Tree('bc', ['B', 'C'])]),
  549. Tree('start', [Tree('ab', ['A', 'B']), Tree('bc', ['C'])])
  550. }
  551. self.assertEqual(ambig_tree.data, '_ambig')
  552. self.assertEqual(set(ambig_tree.children), expected)
  553. def test_ambiguous_intermediate_node_inlined_rule(self):
  554. grammar = """
  555. start: ab _bc d?
  556. !ab: "A" "B"?
  557. _bc: "B"? "C"
  558. !d: "D"
  559. """
  560. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  561. ambig_tree = l.parse("ABCD")
  562. expected = {
  563. Tree('start', [Tree('ab', ['A']), Tree('d', ['D'])]),
  564. Tree('start', [Tree('ab', ['A', 'B']), Tree('d', ['D'])])
  565. }
  566. self.assertEqual(ambig_tree.data, '_ambig')
  567. self.assertEqual(set(ambig_tree.children), expected)
  568. def test_ambiguous_intermediate_node_conditionally_inlined_rule(self):
  569. grammar = """
  570. start: ab bc d?
  571. !ab: "A" "B"?
  572. !?bc: "B"? "C"
  573. !d: "D"
  574. """
  575. l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER)
  576. ambig_tree = l.parse("ABCD")
  577. expected = {
  578. Tree('start', [Tree('ab', ['A']), Tree('bc', ['B', 'C']), Tree('d', ['D'])]),
  579. Tree('start', [Tree('ab', ['A', 'B']), 'C', Tree('d', ['D'])])
  580. }
  581. self.assertEqual(ambig_tree.data, '_ambig')
  582. self.assertEqual(set(ambig_tree.children), expected)
  583. def test_fruitflies_ambig(self):
  584. grammar = """
  585. start: noun verb noun -> simple
  586. | noun verb "like" noun -> comparative
  587. noun: adj? NOUN
  588. verb: VERB
  589. adj: ADJ
  590. NOUN: "flies" | "bananas" | "fruit"
  591. VERB: "like" | "flies"
  592. ADJ: "fruit"
  593. %import common.WS
  594. %ignore WS
  595. """
  596. parser = Lark(grammar, ambiguity='explicit', lexer=LEXER)
  597. tree = parser.parse('fruit flies like bananas')
  598. expected = Tree('_ambig', [
  599. Tree('comparative', [
  600. Tree('noun', ['fruit']),
  601. Tree('verb', ['flies']),
  602. Tree('noun', ['bananas'])
  603. ]),
  604. Tree('simple', [
  605. Tree('noun', [Tree('adj', ['fruit']), 'flies']),
  606. Tree('verb', ['like']),
  607. Tree('noun', ['bananas'])
  608. ])
  609. ])
  610. # self.assertEqual(tree, expected)
  611. self.assertEqual(tree.data, expected.data)
  612. self.assertEqual(set(tree.children), set(expected.children))
  613. @unittest.skipIf(LEXER!='dynamic_complete', "Only relevant for the dynamic_complete parser")
  614. def test_explicit_ambiguity2(self):
  615. grammar = r"""
  616. start: NAME+
  617. NAME: /\w+/
  618. %ignore " "
  619. """
  620. text = """cat"""
  621. parser = _Lark(grammar, start='start', ambiguity='explicit')
  622. tree = parser.parse(text)
  623. self.assertEqual(tree.data, '_ambig')
  624. combinations = {tuple(str(s) for s in t.children) for t in tree.children}
  625. self.assertEqual(combinations, {
  626. ('cat',),
  627. ('ca', 't'),
  628. ('c', 'at'),
  629. ('c', 'a' ,'t')
  630. })
  631. def test_term_ambig_resolve(self):
  632. grammar = r"""
  633. !start: NAME+
  634. NAME: /\w+/
  635. %ignore " "
  636. """
  637. text = """foo bar"""
  638. parser = Lark(grammar)
  639. tree = parser.parse(text)
  640. self.assertEqual(tree.children, ['foo', 'bar'])
  641. def test_cycle(self):
  642. grammar = """
  643. start: start?
  644. """
  645. l = Lark(grammar, ambiguity='resolve', lexer=LEXER)
  646. tree = l.parse('')
  647. self.assertEqual(tree, Tree('start', []))
  648. l = Lark(grammar, ambiguity='explicit', lexer=LEXER)
  649. tree = l.parse('')
  650. self.assertEqual(tree, Tree('start', []))
  651. def test_cycles(self):
  652. grammar = """
  653. a: b
  654. b: c*
  655. c: a
  656. """
  657. l = Lark(grammar, start='a', ambiguity='resolve', lexer=LEXER)
  658. tree = l.parse('')
  659. self.assertEqual(tree, Tree('a', [Tree('b', [])]))
  660. l = Lark(grammar, start='a', ambiguity='explicit', lexer=LEXER)
  661. tree = l.parse('')
  662. self.assertEqual(tree, Tree('a', [Tree('b', [])]))
  663. def test_many_cycles(self):
  664. grammar = """
  665. start: a? | start start
  666. !a: "a"
  667. """
  668. l = Lark(grammar, ambiguity='resolve', lexer=LEXER)
  669. tree = l.parse('a')
  670. self.assertEqual(tree, Tree('start', [Tree('a', ['a'])]))
  671. l = Lark(grammar, ambiguity='explicit', lexer=LEXER)
  672. tree = l.parse('a')
  673. self.assertEqual(tree, Tree('start', [Tree('a', ['a'])]))
  674. def test_cycles_with_child_filter(self):
  675. grammar = """
  676. a: _x
  677. _x: _x? b
  678. b:
  679. """
  680. grammar2 = """
  681. a: x
  682. x: x? b
  683. b:
  684. """
  685. l = Lark(grammar, start='a', ambiguity='resolve', lexer=LEXER)
  686. tree = l.parse('')
  687. self.assertEqual(tree, Tree('a', [Tree('b', [])]))
  688. l = Lark(grammar, start='a', ambiguity='explicit', lexer=LEXER)
  689. tree = l.parse('');
  690. self.assertEqual(tree, Tree('a', [Tree('b', [])]))
  691. l = Lark(grammar2, start='a', ambiguity='resolve', lexer=LEXER)
  692. tree = l.parse('');
  693. self.assertEqual(tree, Tree('a', [Tree('x', [Tree('b', [])])]))
  694. l = Lark(grammar2, start='a', ambiguity='explicit', lexer=LEXER)
  695. tree = l.parse('');
  696. self.assertEqual(tree, Tree('a', [Tree('x', [Tree('b', [])])]))
  697. # @unittest.skipIf(LEXER=='dynamic', "Not implemented in Dynamic Earley yet") # TODO
  698. # def test_not_all_derivations(self):
  699. # grammar = """
  700. # start: cd+ "e"
  701. # !cd: "c"
  702. # | "d"
  703. # | "cd"
  704. # """
  705. # l = Lark(grammar, parser='earley', ambiguity='explicit', lexer=LEXER, earley__all_derivations=False)
  706. # x = l.parse('cde')
  707. # assert x.data != '_ambig', x
  708. # assert len(x.children) == 1
  709. _NAME = "TestFullEarley" + LEXER.capitalize()
  710. _TestFullEarley.__name__ = _NAME
  711. globals()[_NAME] = _TestFullEarley
  712. __all__.append(_NAME)
  713. class CustomLexerNew(Lexer):
  714. """
  715. Purpose of this custom lexer is to test the integration,
  716. so it uses the traditionalparser as implementation without custom lexing behaviour.
  717. """
  718. def __init__(self, lexer_conf):
  719. self.lexer = TraditionalLexer(copy(lexer_conf))
  720. def lex(self, lexer_state, parser_state):
  721. return self.lexer.lex(lexer_state, parser_state)
  722. __future_interface__ = True
  723. class CustomLexerOld(Lexer):
  724. """
  725. Purpose of this custom lexer is to test the integration,
  726. so it uses the traditionalparser as implementation without custom lexing behaviour.
  727. """
  728. def __init__(self, lexer_conf):
  729. self.lexer = TraditionalLexer(copy(lexer_conf))
  730. def lex(self, text):
  731. ls = self.lexer.make_lexer_state(text)
  732. return self.lexer.lex(ls, None)
  733. __future_interface__ = False
  734. def _tree_structure_check(a, b):
  735. """
  736. Checks that both Tree objects have the same structure, without checking their values.
  737. """
  738. assert a.data == b.data and len(a.children) == len(b.children)
  739. for ca,cb in zip(a.children, b.children):
  740. assert type(ca) == type(cb)
  741. if isinstance(ca, Tree):
  742. _tree_structure_check(ca, cb)
  743. elif isinstance(ca, Token):
  744. assert ca.type == cb.type
  745. else:
  746. assert ca == cb
  747. class DualBytesLark:
  748. """
  749. A helper class that wraps both a normal parser, and a parser for bytes.
  750. It automatically transforms `.parse` calls for both lexer, returning the value from the text lexer
  751. It always checks that both produce the same output/error
  752. NOTE: Not currently used, but left here for future debugging.
  753. """
  754. def __init__(self, g, *args, **kwargs):
  755. self.text_lexer = Lark(g, *args, use_bytes=False, **kwargs)
  756. g = self.text_lexer.grammar_source.lower()
  757. if '\\u' in g or not isascii(g):
  758. # Bytes re can't deal with uniode escapes
  759. self.bytes_lark = None
  760. else:
  761. # Everything here should work, so use `use_bytes='force'`
  762. self.bytes_lark = Lark(self.text_lexer.grammar_source, *args, use_bytes='force', **kwargs)
  763. def parse(self, text, start=None):
  764. # TODO: Easy workaround, more complex checks would be beneficial
  765. if not isascii(text) or self.bytes_lark is None:
  766. return self.text_lexer.parse(text, start)
  767. try:
  768. rv = self.text_lexer.parse(text, start)
  769. except Exception as e:
  770. try:
  771. self.bytes_lark.parse(text.encode(), start)
  772. except Exception as be:
  773. assert type(e) == type(be), "Parser with and without `use_bytes` raise different exceptions"
  774. raise e
  775. assert False, "Parser without `use_bytes` raises exception, with doesn't"
  776. try:
  777. bv = self.bytes_lark.parse(text.encode(), start)
  778. except Exception as be:
  779. assert False, "Parser without `use_bytes` doesn't raise an exception, with does"
  780. _tree_structure_check(rv, bv)
  781. return rv
  782. @classmethod
  783. def open(cls, grammar_filename, rel_to=None, **options):
  784. if rel_to:
  785. basepath = os.path.dirname(rel_to)
  786. grammar_filename = os.path.join(basepath, grammar_filename)
  787. with open(grammar_filename, encoding='utf8') as f:
  788. return cls(f, **options)
  789. def save(self,f):
  790. self.text_lexer.save(f)
  791. if self.bytes_lark is not None:
  792. self.bytes_lark.save(f)
  793. def load(self,f):
  794. self.text_lexer = self.text_lexer.load(f)
  795. if self.bytes_lark is not None:
  796. self.bytes_lark.load(f)
  797. def _make_parser_test(LEXER, PARSER):
  798. lexer_class_or_name = {
  799. 'custom_new': CustomLexerNew,
  800. 'custom_old': CustomLexerOld,
  801. }.get(LEXER, LEXER)
  802. def _Lark(grammar, **kwargs):
  803. return Lark(grammar, lexer=lexer_class_or_name, parser=PARSER, propagate_positions=True, **kwargs)
  804. def _Lark_open(gfilename, **kwargs):
  805. return Lark.open(gfilename, lexer=lexer_class_or_name, parser=PARSER, propagate_positions=True, **kwargs)
  806. if (LEXER, PARSER) == ('standard', 'earley'):
  807. # Check that the `lark.lark` grammar represents can parse every example used in these tests.
  808. # Standard-Earley was an arbitrary choice, to make sure it only ran once.
  809. lalr_parser = Lark.open(os.path.join(os.path.dirname(lark.__file__), 'grammars/lark.lark'), parser='lalr')
  810. def wrap_with_test_grammar(f):
  811. def _f(x, **kwargs):
  812. inst = f(x, **kwargs)
  813. lalr_parser.parse(inst.source_grammar) # Test after instance creation. When the grammar should fail, don't test it.
  814. return inst
  815. return _f
  816. _Lark = wrap_with_test_grammar(_Lark)
  817. _Lark_open = wrap_with_test_grammar(_Lark_open)
  818. class _TestParser(unittest.TestCase):
  819. def test_basic1(self):
  820. g = _Lark("""start: a+ b a* "b" a*
  821. b: "b"
  822. a: "a"
  823. """)
  824. r = g.parse('aaabaab')
  825. self.assertEqual( ''.join(x.data for x in r.children), 'aaabaa' )
  826. r = g.parse('aaabaaba')
  827. self.assertEqual( ''.join(x.data for x in r.children), 'aaabaaa' )
  828. self.assertRaises(ParseError, g.parse, 'aaabaa')
  829. def test_basic2(self):
  830. # Multiple parsers and colliding tokens
  831. g = _Lark("""start: B A
  832. B: "12"
  833. A: "1" """)
  834. g2 = _Lark("""start: B A
  835. B: "12"
  836. A: "2" """)
  837. x = g.parse('121')
  838. assert x.data == 'start' and x.children == ['12', '1'], x
  839. x = g2.parse('122')
  840. assert x.data == 'start' and x.children == ['12', '2'], x
  841. @unittest.skipIf(cStringIO is None, "cStringIO not available")
  842. def test_stringio_bytes(self):
  843. """Verify that a Lark can be created from file-like objects other than Python's standard 'file' object"""
  844. _Lark(cStringIO(b'start: a+ b a* "b" a*\n b: "b"\n a: "a" '))
  845. def test_stringio_unicode(self):
  846. """Verify that a Lark can be created from file-like objects other than Python's standard 'file' object"""
  847. _Lark(uStringIO(u'start: a+ b a* "b" a*\n b: "b"\n a: "a" '))
  848. def test_unicode(self):
  849. g = _Lark(u"""start: UNIA UNIB UNIA
  850. UNIA: /\xa3/
  851. UNIB: /\u0101/
  852. """)
  853. g.parse(u'\xa3\u0101\u00a3')
  854. def test_unicode2(self):
  855. g = _Lark(r"""start: UNIA UNIB UNIA UNIC
  856. UNIA: /\xa3/
  857. UNIB: "a\u0101b\ "
  858. UNIC: /a?\u0101c\n/
  859. """)
  860. g.parse(u'\xa3a\u0101b\\ \u00a3\u0101c\n')
  861. def test_unicode3(self):
  862. g = _Lark(r"""start: UNIA UNIB UNIA UNIC
  863. UNIA: /\xa3/
  864. UNIB: "\u0101"
  865. UNIC: /\u0203/ /\n/
  866. """)
  867. g.parse(u'\xa3\u0101\u00a3\u0203\n')
  868. def test_hex_escape(self):
  869. g = _Lark(r"""start: A B C
  870. A: "\x01"
  871. B: /\x02/
  872. C: "\xABCD"
  873. """)
  874. g.parse('\x01\x02\xABCD')
  875. def test_unicode_literal_range_escape(self):
  876. g = _Lark(r"""start: A+
  877. A: "\u0061".."\u0063"
  878. """)
  879. g.parse('abc')
  880. def test_hex_literal_range_escape(self):
  881. g = _Lark(r"""start: A+
  882. A: "\x01".."\x03"
  883. """)
  884. g.parse('\x01\x02\x03')
  885. @unittest.skipIf(sys.version_info[0]==2 or sys.version_info[:2]==(3, 4),
  886. "bytes parser isn't perfect in Python2, exceptions don't work correctly")
  887. def test_bytes_utf8(self):
  888. g = r"""
  889. start: BOM? char+
  890. BOM: "\xef\xbb\xbf"
  891. char: CHAR1 | CHAR2 | CHAR3 | CHAR4
  892. CONTINUATION_BYTE: "\x80" .. "\xbf"
  893. CHAR1: "\x00" .. "\x7f"
  894. CHAR2: "\xc0" .. "\xdf" CONTINUATION_BYTE
  895. CHAR3: "\xe0" .. "\xef" CONTINUATION_BYTE CONTINUATION_BYTE
  896. CHAR4: "\xf0" .. "\xf7" CONTINUATION_BYTE CONTINUATION_BYTE CONTINUATION_BYTE
  897. """
  898. g = _Lark(g, use_bytes=True)
  899. s = u"🔣 地? gurīn".encode('utf-8')
  900. self.assertEqual(len(g.parse(s).children), 10)
  901. for enc, j in [("sjis", u"地球の絵はグリーンでグッド? Chikyuu no e wa guriin de guddo"),
  902. ("sjis", u"売春婦"),
  903. ("euc-jp", u"乂鵬鵠")]:
  904. s = j.encode(enc)
  905. self.assertRaises(UnexpectedCharacters, g.parse, s)
  906. @unittest.skipIf(PARSER == 'cyk', "Takes forever")
  907. def test_stack_for_ebnf(self):
  908. """Verify that stack depth isn't an issue for EBNF grammars"""
  909. g = _Lark(r"""start: a+
  910. a : "a" """)
  911. g.parse("a" * (sys.getrecursionlimit()*2 ))
  912. def test_expand1_lists_with_one_item(self):
  913. g = _Lark(r"""start: list
  914. ?list: item+
  915. item : A
  916. A: "a"
  917. """)
  918. r = g.parse("a")
  919. # because 'list' is an expand-if-contains-one rule and we only provided one element it should have expanded to 'item'
  920. self.assertSequenceEqual([subtree.data for subtree in r.children], ('item',))
  921. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  922. self.assertEqual(len(r.children), 1)
  923. def test_expand1_lists_with_one_item_2(self):
  924. g = _Lark(r"""start: list
  925. ?list: item+ "!"
  926. item : A
  927. A: "a"
  928. """)
  929. r = g.parse("a!")
  930. # because 'list' is an expand-if-contains-one rule and we only provided one element it should have expanded to 'item'
  931. self.assertSequenceEqual([subtree.data for subtree in r.children], ('item',))
  932. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  933. self.assertEqual(len(r.children), 1)
  934. def test_dont_expand1_lists_with_multiple_items(self):
  935. g = _Lark(r"""start: list
  936. ?list: item+
  937. item : A
  938. A: "a"
  939. """)
  940. r = g.parse("aa")
  941. # because 'list' is an expand-if-contains-one rule and we've provided more than one element it should *not* have expanded
  942. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  943. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  944. self.assertEqual(len(r.children), 1)
  945. # Sanity check: verify that 'list' contains the two 'item's we've given it
  946. [list] = r.children
  947. self.assertSequenceEqual([item.data for item in list.children], ('item', 'item'))
  948. def test_dont_expand1_lists_with_multiple_items_2(self):
  949. g = _Lark(r"""start: list
  950. ?list: item+ "!"
  951. item : A
  952. A: "a"
  953. """)
  954. r = g.parse("aa!")
  955. # because 'list' is an expand-if-contains-one rule and we've provided more than one element it should *not* have expanded
  956. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  957. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  958. self.assertEqual(len(r.children), 1)
  959. # Sanity check: verify that 'list' contains the two 'item's we've given it
  960. [list] = r.children
  961. self.assertSequenceEqual([item.data for item in list.children], ('item', 'item'))
  962. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  963. def test_empty_expand1_list(self):
  964. g = _Lark(r"""start: list
  965. ?list: item*
  966. item : A
  967. A: "a"
  968. """)
  969. r = g.parse("")
  970. # because 'list' is an expand-if-contains-one rule and we've provided less than one element (i.e. none) it should *not* have expanded
  971. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  972. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  973. self.assertEqual(len(r.children), 1)
  974. # Sanity check: verify that 'list' contains no 'item's as we've given it none
  975. [list] = r.children
  976. self.assertSequenceEqual([item.data for item in list.children], ())
  977. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  978. def test_empty_expand1_list_2(self):
  979. g = _Lark(r"""start: list
  980. ?list: item* "!"?
  981. item : A
  982. A: "a"
  983. """)
  984. r = g.parse("")
  985. # because 'list' is an expand-if-contains-one rule and we've provided less than one element (i.e. none) it should *not* have expanded
  986. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  987. # regardless of the amount of items: there should be only *one* child in 'start' because 'list' isn't an expand-all rule
  988. self.assertEqual(len(r.children), 1)
  989. # Sanity check: verify that 'list' contains no 'item's as we've given it none
  990. [list] = r.children
  991. self.assertSequenceEqual([item.data for item in list.children], ())
  992. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  993. def test_empty_flatten_list(self):
  994. g = _Lark(r"""start: list
  995. list: | item "," list
  996. item : A
  997. A: "a"
  998. """)
  999. r = g.parse("")
  1000. # Because 'list' is a flatten rule it's top-level element should *never* be expanded
  1001. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  1002. # Sanity check: verify that 'list' contains no 'item's as we've given it none
  1003. [list] = r.children
  1004. self.assertSequenceEqual([item.data for item in list.children], ())
  1005. @unittest.skipIf(True, "Flattening list isn't implemented (and may never be)")
  1006. def test_single_item_flatten_list(self):
  1007. g = _Lark(r"""start: list
  1008. list: | item "," list
  1009. item : A
  1010. A: "a"
  1011. """)
  1012. r = g.parse("a,")
  1013. # Because 'list' is a flatten rule it's top-level element should *never* be expanded
  1014. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  1015. # Sanity check: verify that 'list' contains exactly the one 'item' we've given it
  1016. [list] = r.children
  1017. self.assertSequenceEqual([item.data for item in list.children], ('item',))
  1018. @unittest.skipIf(True, "Flattening list isn't implemented (and may never be)")
  1019. def test_multiple_item_flatten_list(self):
  1020. g = _Lark(r"""start: list
  1021. #list: | item "," list
  1022. item : A
  1023. A: "a"
  1024. """)
  1025. r = g.parse("a,a,")
  1026. # Because 'list' is a flatten rule it's top-level element should *never* be expanded
  1027. self.assertSequenceEqual([subtree.data for subtree in r.children], ('list',))
  1028. # Sanity check: verify that 'list' contains exactly the two 'item's we've given it
  1029. [list] = r.children
  1030. self.assertSequenceEqual([item.data for item in list.children], ('item', 'item'))
  1031. @unittest.skipIf(True, "Flattening list isn't implemented (and may never be)")
  1032. def test_recurse_flatten(self):
  1033. """Verify that stack depth doesn't get exceeded on recursive rules marked for flattening."""
  1034. g = _Lark(r"""start: a | start a
  1035. a : A
  1036. A : "a" """)
  1037. # Force PLY to write to the debug log, but prevent writing it to the terminal (uses repr() on the half-built
  1038. # STree data structures, which uses recursion).
  1039. g.parse("a" * (sys.getrecursionlimit() // 4))
  1040. def test_token_collision(self):
  1041. g = _Lark(r"""start: "Hello" NAME
  1042. NAME: /\w/+
  1043. %ignore " "
  1044. """)
  1045. x = g.parse('Hello World')
  1046. self.assertSequenceEqual(x.children, ['World'])
  1047. x = g.parse('Hello HelloWorld')
  1048. self.assertSequenceEqual(x.children, ['HelloWorld'])
  1049. def test_token_collision_WS(self):
  1050. g = _Lark(r"""start: "Hello" NAME
  1051. NAME: /\w/+
  1052. %import common.WS
  1053. %ignore WS
  1054. """)
  1055. x = g.parse('Hello World')
  1056. self.assertSequenceEqual(x.children, ['World'])
  1057. x = g.parse('Hello HelloWorld')
  1058. self.assertSequenceEqual(x.children, ['HelloWorld'])
  1059. def test_token_collision2(self):
  1060. g = _Lark("""
  1061. !start: "starts"
  1062. %import common.LCASE_LETTER
  1063. """)
  1064. x = g.parse("starts")
  1065. self.assertSequenceEqual(x.children, ['starts'])
  1066. def test_templates(self):
  1067. g = _Lark(r"""
  1068. start: "[" sep{NUMBER, ","} "]"
  1069. sep{item, delim}: item (delim item)*
  1070. NUMBER: /\d+/
  1071. %ignore " "
  1072. """)
  1073. x = g.parse("[1, 2, 3, 4]")
  1074. self.assertSequenceEqual(x.children, [Tree('sep', ['1', '2', '3', '4'])])
  1075. x = g.parse("[1]")
  1076. self.assertSequenceEqual(x.children, [Tree('sep', ['1'])])
  1077. def test_templates_recursion(self):
  1078. g = _Lark(r"""
  1079. start: "[" _sep{NUMBER, ","} "]"
  1080. _sep{item, delim}: item | _sep{item, delim} delim item
  1081. NUMBER: /\d+/
  1082. %ignore " "
  1083. """)
  1084. x = g.parse("[1, 2, 3, 4]")
  1085. self.assertSequenceEqual(x.children, ['1', '2', '3', '4'])
  1086. x = g.parse("[1]")
  1087. self.assertSequenceEqual(x.children, ['1'])
  1088. def test_templates_import(self):
  1089. g = _Lark_open("test_templates_import.lark", rel_to=__file__)
  1090. x = g.parse("[1, 2, 3, 4]")
  1091. self.assertSequenceEqual(x.children, [Tree('sep', ['1', '2', '3', '4'])])
  1092. x = g.parse("[1]")
  1093. self.assertSequenceEqual(x.children, [Tree('sep', ['1'])])
  1094. def test_templates_alias(self):
  1095. g = _Lark(r"""
  1096. start: expr{"C"}
  1097. expr{t}: "A" t
  1098. | "B" t -> b
  1099. """)
  1100. x = g.parse("AC")
  1101. self.assertSequenceEqual(x.children, [Tree('expr', [])])
  1102. x = g.parse("BC")
  1103. self.assertSequenceEqual(x.children, [Tree('b', [])])
  1104. def test_templates_modifiers(self):
  1105. g = _Lark(r"""
  1106. start: expr{"B"}
  1107. !expr{t}: "A" t
  1108. """)
  1109. x = g.parse("AB")
  1110. self.assertSequenceEqual(x.children, [Tree('expr', ["A", "B"])])
  1111. g = _Lark(r"""
  1112. start: _expr{"B"}
  1113. !_expr{t}: "A" t
  1114. """)
  1115. x = g.parse("AB")
  1116. self.assertSequenceEqual(x.children, ["A", "B"])
  1117. g = _Lark(r"""
  1118. start: expr{b}
  1119. b: "B"
  1120. ?expr{t}: "A" t
  1121. """)
  1122. x = g.parse("AB")
  1123. self.assertSequenceEqual(x.children, [Tree('b',[])])
  1124. def test_templates_templates(self):
  1125. g = _Lark('''start: a{b}
  1126. a{t}: t{"a"}
  1127. b{x}: x''')
  1128. x = g.parse('a')
  1129. self.assertSequenceEqual(x.children, [Tree('a', [Tree('b',[])])])
  1130. def test_g_regex_flags(self):
  1131. g = _Lark("""
  1132. start: "a" /b+/ C
  1133. C: "C" | D
  1134. D: "D" E
  1135. E: "e"
  1136. """, g_regex_flags=re.I)
  1137. x1 = g.parse("ABBc")
  1138. x2 = g.parse("abdE")
  1139. # def test_string_priority(self):
  1140. # g = _Lark("""start: (A | /a?bb/)+
  1141. # A: "a" """)
  1142. # x = g.parse('abb')
  1143. # self.assertEqual(len(x.children), 2)
  1144. # # This parse raises an exception because the lexer will always try to consume
  1145. # # "a" first and will never match the regular expression
  1146. # # This behavior is subject to change!!
  1147. # # Thie won't happen with ambiguity handling.
  1148. # g = _Lark("""start: (A | /a?ab/)+
  1149. # A: "a" """)
  1150. # self.assertRaises(LexError, g.parse, 'aab')
  1151. def test_undefined_rule(self):
  1152. self.assertRaises(GrammarError, _Lark, """start: a""")
  1153. def test_undefined_token(self):
  1154. self.assertRaises(GrammarError, _Lark, """start: A""")
  1155. def test_rule_collision(self):
  1156. g = _Lark("""start: "a"+ "b"
  1157. | "a"+ """)
  1158. x = g.parse('aaaa')
  1159. x = g.parse('aaaab')
  1160. def test_rule_collision2(self):
  1161. g = _Lark("""start: "a"* "b"
  1162. | "a"+ """)
  1163. x = g.parse('aaaa')
  1164. x = g.parse('aaaab')
  1165. x = g.parse('b')
  1166. def test_token_not_anon(self):
  1167. """Tests that "a" is matched as an anonymous token, and not A.
  1168. """
  1169. g = _Lark("""start: "a"
  1170. A: "a" """)
  1171. x = g.parse('a')
  1172. self.assertEqual(len(x.children), 0, '"a" should be considered anonymous')
  1173. g = _Lark("""start: "a" A
  1174. A: "a" """)
  1175. x = g.parse('aa')
  1176. self.assertEqual(len(x.children), 1, 'only "a" should be considered anonymous')
  1177. self.assertEqual(x.children[0].type, "A")
  1178. g = _Lark("""start: /a/
  1179. A: /a/ """)
  1180. x = g.parse('a')
  1181. self.assertEqual(len(x.children), 1)
  1182. self.assertEqual(x.children[0].type, "A", "A isn't associated with /a/")
  1183. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  1184. def test_maybe(self):
  1185. g = _Lark("""start: ["a"] """)
  1186. x = g.parse('a')
  1187. x = g.parse('')
  1188. def test_start(self):
  1189. g = _Lark("""a: "a" a? """, start='a')
  1190. x = g.parse('a')
  1191. x = g.parse('aa')
  1192. x = g.parse('aaa')
  1193. def test_alias(self):
  1194. g = _Lark("""start: "a" -> b """)
  1195. x = g.parse('a')
  1196. self.assertEqual(x.data, "b")
  1197. def test_token_ebnf(self):
  1198. g = _Lark("""start: A
  1199. A: "a"* ("b"? "c".."e")+
  1200. """)
  1201. x = g.parse('abcde')
  1202. x = g.parse('dd')
  1203. def test_backslash(self):
  1204. g = _Lark(r"""start: "\\" "a"
  1205. """)
  1206. x = g.parse(r'\a')
  1207. g = _Lark(r"""start: /\\/ /a/
  1208. """)
  1209. x = g.parse(r'\a')
  1210. def test_backslash2(self):
  1211. g = _Lark(r"""start: "\"" "-"
  1212. """)
  1213. x = g.parse('"-')
  1214. g = _Lark(r"""start: /\// /-/
  1215. """)
  1216. x = g.parse('/-')
  1217. def test_special_chars(self):
  1218. g = _Lark(r"""start: "\n"
  1219. """)
  1220. x = g.parse('\n')
  1221. g = _Lark(r"""start: /\n/
  1222. """)
  1223. x = g.parse('\n')
  1224. # def test_token_recurse(self):
  1225. # g = _Lark("""start: A
  1226. # A: B
  1227. # B: A
  1228. # """)
  1229. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  1230. def test_empty(self):
  1231. # Fails an Earley implementation without special handling for empty rules,
  1232. # or re-processing of already completed rules.
  1233. g = _Lark(r"""start: _empty a "B"
  1234. a: _empty "A"
  1235. _empty:
  1236. """)
  1237. x = g.parse('AB')
  1238. def test_regex_quote(self):
  1239. g = r"""
  1240. start: SINGLE_QUOTED_STRING | DOUBLE_QUOTED_STRING
  1241. SINGLE_QUOTED_STRING : /'[^']*'/
  1242. DOUBLE_QUOTED_STRING : /"[^"]*"/
  1243. """
  1244. g = _Lark(g)
  1245. self.assertEqual( g.parse('"hello"').children, ['"hello"'])
  1246. self.assertEqual( g.parse("'hello'").children, ["'hello'"])
  1247. @unittest.skipIf(not Py36, "Required re syntax only exists in python3.6+")
  1248. def test_join_regex_flags(self):
  1249. g = r"""
  1250. start: A
  1251. A: B C
  1252. B: /./s
  1253. C: /./
  1254. """
  1255. g = _Lark(g)
  1256. self.assertEqual(g.parse(" ").children,[" "])
  1257. self.assertEqual(g.parse("\n ").children,["\n "])
  1258. self.assertRaises(UnexpectedCharacters, g.parse, "\n\n")
  1259. g = r"""
  1260. start: A
  1261. A: B | C
  1262. B: "b"i
  1263. C: "c"
  1264. """
  1265. g = _Lark(g)
  1266. self.assertEqual(g.parse("b").children,["b"])
  1267. self.assertEqual(g.parse("B").children,["B"])
  1268. self.assertEqual(g.parse("c").children,["c"])
  1269. self.assertRaises(UnexpectedCharacters, g.parse, "C")
  1270. def test_lexer_token_limit(self):
  1271. "Python has a stupid limit of 100 groups in a regular expression. Test that we handle this limitation"
  1272. tokens = {'A%d'%i:'"%d"'%i for i in range(300)}
  1273. g = _Lark("""start: %s
  1274. %s""" % (' '.join(tokens), '\n'.join("%s: %s"%x for x in tokens.items())))
  1275. def test_float_without_lexer(self):
  1276. expected_error = UnexpectedCharacters if 'dynamic' in LEXER else UnexpectedToken
  1277. if PARSER == 'cyk':
  1278. expected_error = ParseError
  1279. g = _Lark("""start: ["+"|"-"] float
  1280. float: digit* "." digit+ exp?
  1281. | digit+ exp
  1282. exp: ("e"|"E") ["+"|"-"] digit+
  1283. digit: "0"|"1"|"2"|"3"|"4"|"5"|"6"|"7"|"8"|"9"
  1284. """)
  1285. g.parse("1.2")
  1286. g.parse("-.2e9")
  1287. g.parse("+2e-9")
  1288. self.assertRaises( expected_error, g.parse, "+2e-9e")
  1289. def test_keep_all_tokens(self):
  1290. l = _Lark("""start: "a"+ """, keep_all_tokens=True)
  1291. tree = l.parse('aaa')
  1292. self.assertEqual(tree.children, ['a', 'a', 'a'])
  1293. def test_token_flags(self):
  1294. l = _Lark("""!start: "a"i+
  1295. """
  1296. )
  1297. tree = l.parse('aA')
  1298. self.assertEqual(tree.children, ['a', 'A'])
  1299. l = _Lark("""!start: /a/i+
  1300. """
  1301. )
  1302. tree = l.parse('aA')
  1303. self.assertEqual(tree.children, ['a', 'A'])
  1304. # g = """!start: "a"i "a"
  1305. # """
  1306. # self.assertRaises(GrammarError, _Lark, g)
  1307. # g = """!start: /a/i /a/
  1308. # """
  1309. # self.assertRaises(GrammarError, _Lark, g)
  1310. g = """start: NAME "," "a"
  1311. NAME: /[a-z_]/i /[a-z0-9_]/i*
  1312. """
  1313. l = _Lark(g)
  1314. tree = l.parse('ab,a')
  1315. self.assertEqual(tree.children, ['ab'])
  1316. tree = l.parse('AB,a')
  1317. self.assertEqual(tree.children, ['AB'])
  1318. def test_token_flags3(self):
  1319. l = _Lark("""!start: ABC+
  1320. ABC: "abc"i
  1321. """
  1322. )
  1323. tree = l.parse('aBcAbC')
  1324. self.assertEqual(tree.children, ['aBc', 'AbC'])
  1325. def test_token_flags2(self):
  1326. g = """!start: ("a"i | /a/ /b/?)+
  1327. """
  1328. l = _Lark(g)
  1329. tree = l.parse('aA')
  1330. self.assertEqual(tree.children, ['a', 'A'])
  1331. def test_token_flags_verbose(self):
  1332. g = _Lark(r"""start: NL | ABC
  1333. ABC: / [a-z] /x
  1334. NL: /\n/
  1335. """)
  1336. x = g.parse('a')
  1337. self.assertEqual(x.children, ['a'])
  1338. def test_token_flags_verbose_multiline(self):
  1339. g = _Lark(r"""start: ABC
  1340. ABC: / a b c
  1341. d
  1342. e f
  1343. /x
  1344. """)
  1345. x = g.parse('abcdef')
  1346. self.assertEqual(x.children, ['abcdef'])
  1347. def test_token_multiline_only_works_with_x_flag(self):
  1348. g = r"""start: ABC
  1349. ABC: / a b c
  1350. d
  1351. e f
  1352. /i
  1353. """
  1354. self.assertRaises( GrammarError, _Lark, g)
  1355. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  1356. def test_twice_empty(self):
  1357. g = """!start: ("A"?)?
  1358. """
  1359. l = _Lark(g)
  1360. tree = l.parse('A')
  1361. self.assertEqual(tree.children, ['A'])
  1362. tree = l.parse('')
  1363. self.assertEqual(tree.children, [])
  1364. def test_undefined_ignore(self):
  1365. g = """!start: "A"
  1366. %ignore B
  1367. """
  1368. self.assertRaises( GrammarError, _Lark, g)
  1369. def test_alias_in_terminal(self):
  1370. g = """start: TERM
  1371. TERM: "a" -> alias
  1372. """
  1373. self.assertRaises( GrammarError, _Lark, g)
  1374. def test_line_and_column(self):
  1375. g = r"""!start: "A" bc "D"
  1376. !bc: "B\nC"
  1377. """
  1378. l = _Lark(g)
  1379. a, bc, d = l.parse("AB\nCD").children
  1380. self.assertEqual(a.line, 1)
  1381. self.assertEqual(a.column, 1)
  1382. bc ,= bc.children
  1383. self.assertEqual(bc.line, 1)
  1384. self.assertEqual(bc.column, 2)
  1385. self.assertEqual(d.line, 2)
  1386. self.assertEqual(d.column, 2)
  1387. # if LEXER != 'dynamic':
  1388. self.assertEqual(a.end_line, 1)
  1389. self.assertEqual(a.end_column, 2)
  1390. self.assertEqual(bc.end_line, 2)
  1391. self.assertEqual(bc.end_column, 2)
  1392. self.assertEqual(d.end_line, 2)
  1393. self.assertEqual(d.end_column, 3)
  1394. def test_reduce_cycle(self):
  1395. """Tests an edge-condition in the LALR parser, in which a transition state looks exactly like the end state.
  1396. It seems that the correct solution is to explicitely distinguish finalization in the reduce() function.
  1397. """
  1398. l = _Lark("""
  1399. term: A
  1400. | term term
  1401. A: "a"
  1402. """, start='term')
  1403. tree = l.parse("aa")
  1404. self.assertEqual(len(tree.children), 2)
  1405. @unittest.skipIf(LEXER != 'standard', "Only standard lexers care about token priority")
  1406. def test_lexer_prioritization(self):
  1407. "Tests effect of priority on result"
  1408. grammar = """
  1409. start: A B | AB
  1410. A.2: "a"
  1411. B: "b"
  1412. AB: "ab"
  1413. """
  1414. l = _Lark(grammar)
  1415. res = l.parse("ab")
  1416. self.assertEqual(res.children, ['a', 'b'])
  1417. self.assertNotEqual(res.children, ['ab'])
  1418. grammar = """
  1419. start: A B | AB
  1420. A: "a"
  1421. B: "b"
  1422. AB.3: "ab"
  1423. """
  1424. l = _Lark(grammar)
  1425. res = l.parse("ab")
  1426. self.assertNotEqual(res.children, ['a', 'b'])
  1427. self.assertEqual(res.children, ['ab'])
  1428. grammar = """
  1429. start: A B | AB
  1430. A: "a"
  1431. B.-20: "b"
  1432. AB.-10: "ab"
  1433. """
  1434. l = _Lark(grammar)
  1435. res = l.parse("ab")
  1436. self.assertEqual(res.children, ['a', 'b'])
  1437. grammar = """
  1438. start: A B | AB
  1439. A.-99999999999999999999999: "a"
  1440. B: "b"
  1441. AB: "ab"
  1442. """
  1443. l = _Lark(grammar)
  1444. res = l.parse("ab")
  1445. self.assertEqual(res.children, ['ab'])
  1446. def test_import(self):
  1447. grammar = """
  1448. start: NUMBER WORD
  1449. %import common.NUMBER
  1450. %import common.WORD
  1451. %import common.WS
  1452. %ignore WS
  1453. """
  1454. l = _Lark(grammar)
  1455. x = l.parse('12 elephants')
  1456. self.assertEqual(x.children, ['12', 'elephants'])
  1457. def test_import_rename(self):
  1458. grammar = """
  1459. start: N W
  1460. %import common.NUMBER -> N
  1461. %import common.WORD -> W
  1462. %import common.WS
  1463. %ignore WS
  1464. """
  1465. l = _Lark(grammar)
  1466. x = l.parse('12 elephants')
  1467. self.assertEqual(x.children, ['12', 'elephants'])
  1468. def test_relative_import(self):
  1469. l = _Lark_open('test_relative_import.lark', rel_to=__file__)
  1470. x = l.parse('12 lions')
  1471. self.assertEqual(x.children, ['12', 'lions'])
  1472. def test_relative_import_unicode(self):
  1473. l = _Lark_open('test_relative_import_unicode.lark', rel_to=__file__)
  1474. x = l.parse(u'Ø')
  1475. self.assertEqual(x.children, [u'Ø'])
  1476. def test_relative_import_rename(self):
  1477. l = _Lark_open('test_relative_import_rename.lark', rel_to=__file__)
  1478. x = l.parse('12 lions')
  1479. self.assertEqual(x.children, ['12', 'lions'])
  1480. def test_relative_rule_import(self):
  1481. l = _Lark_open('test_relative_rule_import.lark', rel_to=__file__)
  1482. x = l.parse('xaabby')
  1483. self.assertEqual(x.children, [
  1484. 'x',
  1485. Tree('expr', ['a', Tree('expr', ['a', 'b']), 'b']),
  1486. 'y'])
  1487. def test_relative_rule_import_drop_ignore(self):
  1488. # %ignore rules are dropped on import
  1489. l = _Lark_open('test_relative_rule_import_drop_ignore.lark',
  1490. rel_to=__file__)
  1491. self.assertRaises((ParseError, UnexpectedInput),
  1492. l.parse, 'xa abby')
  1493. def test_relative_rule_import_subrule(self):
  1494. l = _Lark_open('test_relative_rule_import_subrule.lark',
  1495. rel_to=__file__)
  1496. x = l.parse('xaabby')
  1497. self.assertEqual(x.children, [
  1498. 'x',
  1499. Tree('startab', [
  1500. Tree('grammars__ab__expr', [
  1501. 'a', Tree('grammars__ab__expr', ['a', 'b']), 'b',
  1502. ]),
  1503. ]),
  1504. 'y'])
  1505. def test_relative_rule_import_subrule_no_conflict(self):
  1506. l = _Lark_open(
  1507. 'test_relative_rule_import_subrule_no_conflict.lark',
  1508. rel_to=__file__)
  1509. x = l.parse('xaby')
  1510. self.assertEqual(x.children, [Tree('expr', [
  1511. 'x',
  1512. Tree('startab', [
  1513. Tree('grammars__ab__expr', ['a', 'b']),
  1514. ]),
  1515. 'y'])])
  1516. self.assertRaises((ParseError, UnexpectedInput),
  1517. l.parse, 'xaxabyby')
  1518. def test_relative_rule_import_rename(self):
  1519. l = _Lark_open('test_relative_rule_import_rename.lark',
  1520. rel_to=__file__)
  1521. x = l.parse('xaabby')
  1522. self.assertEqual(x.children, [
  1523. 'x',
  1524. Tree('ab', ['a', Tree('ab', ['a', 'b']), 'b']),
  1525. 'y'])
  1526. def test_multi_import(self):
  1527. grammar = """
  1528. start: NUMBER WORD
  1529. %import common (NUMBER, WORD, WS)
  1530. %ignore WS
  1531. """
  1532. l = _Lark(grammar)
  1533. x = l.parse('12 toucans')
  1534. self.assertEqual(x.children, ['12', 'toucans'])
  1535. def test_relative_multi_import(self):
  1536. l = _Lark_open("test_relative_multi_import.lark", rel_to=__file__)
  1537. x = l.parse('12 capybaras')
  1538. self.assertEqual(x.children, ['12', 'capybaras'])
  1539. def test_relative_import_preserves_leading_underscore(self):
  1540. l = _Lark_open("test_relative_import_preserves_leading_underscore.lark", rel_to=__file__)
  1541. x = l.parse('Ax')
  1542. self.assertEqual(next(x.find_data('c')).children, ['A'])
  1543. def test_relative_import_of_nested_grammar(self):
  1544. l = _Lark_open("grammars/test_relative_import_of_nested_grammar.lark", rel_to=__file__)
  1545. x = l.parse('N')
  1546. self.assertEqual(next(x.find_data('rule_to_import')).children, ['N'])
  1547. def test_relative_import_rules_dependencies_imported_only_once(self):
  1548. l = _Lark_open("test_relative_import_rules_dependencies_imported_only_once.lark", rel_to=__file__)
  1549. x = l.parse('AAA')
  1550. self.assertEqual(next(x.find_data('a')).children, ['A'])
  1551. self.assertEqual(next(x.find_data('b')).children, ['A'])
  1552. self.assertEqual(next(x.find_data('d')).children, ['A'])
  1553. def test_import_errors(self):
  1554. grammar = """
  1555. start: NUMBER WORD
  1556. %import .grammars.bad_test.NUMBER
  1557. """
  1558. self.assertRaises(IOError, _Lark, grammar)
  1559. grammar = """
  1560. start: NUMBER WORD
  1561. %import bad_test.NUMBER
  1562. """
  1563. self.assertRaises(IOError, _Lark, grammar)
  1564. @unittest.skipIf('dynamic' in LEXER, "%declare/postlex doesn't work with dynamic")
  1565. def test_postlex_declare(self): # Note: this test does a lot. maybe split it up?
  1566. class TestPostLexer:
  1567. def process(self, stream):
  1568. for t in stream:
  1569. if t.type == 'A':
  1570. t.type = 'B'
  1571. yield t
  1572. else:
  1573. yield t
  1574. always_accept = ('A',)
  1575. parser = _Lark("""
  1576. start: B
  1577. A: "A"
  1578. %declare B
  1579. """, postlex=TestPostLexer())
  1580. test_file = "A"
  1581. tree = parser.parse(test_file)
  1582. self.assertEqual(tree.children, [Token('B', 'A')])
  1583. @unittest.skipIf('dynamic' in LEXER, "%declare/postlex doesn't work with dynamic")
  1584. def test_postlex_indenter(self):
  1585. class CustomIndenter(Indenter):
  1586. NL_type = 'NEWLINE'
  1587. OPEN_PAREN_types = []
  1588. CLOSE_PAREN_types = []
  1589. INDENT_type = 'INDENT'
  1590. DEDENT_type = 'DEDENT'
  1591. tab_len = 8
  1592. grammar = r"""
  1593. start: "a" NEWLINE INDENT "b" NEWLINE DEDENT
  1594. NEWLINE: ( /\r?\n */ )+
  1595. %ignore " "+
  1596. %declare INDENT DEDENT
  1597. """
  1598. parser = _Lark(grammar, postlex=CustomIndenter())
  1599. parser.parse("a\n b\n")
  1600. def test_import_custom_sources(self):
  1601. custom_loader = FromPackageLoader('tests', ('grammars', ))
  1602. grammar = """
  1603. start: startab
  1604. %import ab.startab
  1605. """
  1606. p = _Lark(grammar, import_paths=[custom_loader])
  1607. self.assertEqual(p.parse('ab'),
  1608. Tree('start', [Tree('startab', [Tree('ab__expr', [Token('ab__A', 'a'), Token('ab__B', 'b')])])]))
  1609. grammar = """
  1610. start: rule_to_import
  1611. %import test_relative_import_of_nested_grammar__grammar_to_import.rule_to_import
  1612. """
  1613. p = _Lark(grammar, import_paths=[custom_loader])
  1614. x = p.parse('N')
  1615. self.assertEqual(next(x.find_data('rule_to_import')).children, ['N'])
  1616. custom_loader2 = FromPackageLoader('tests')
  1617. grammar = """
  1618. %import .test_relative_import (start, WS)
  1619. %ignore WS
  1620. """
  1621. p = _Lark(grammar, import_paths=[custom_loader2], source_path=__file__) # import relative to current file
  1622. x = p.parse('12 capybaras')
  1623. self.assertEqual(x.children, ['12', 'capybaras'])
  1624. @unittest.skipIf(PARSER == 'cyk', "Doesn't work for CYK")
  1625. def test_prioritization(self):
  1626. "Tests effect of priority on result"
  1627. grammar = """
  1628. start: a | b
  1629. a.1: "a"
  1630. b.2: "a"
  1631. """
  1632. l = _Lark(grammar)
  1633. res = l.parse("a")
  1634. self.assertEqual(res.children[0].data, 'b')
  1635. grammar = """
  1636. start: a | b
  1637. a.2: "a"
  1638. b.1: "a"
  1639. """
  1640. l = _Lark(grammar)
  1641. res = l.parse("a")
  1642. self.assertEqual(res.children[0].data, 'a')
  1643. grammar = """
  1644. start: a | b
  1645. a.2: "A"+
  1646. b.1: "A"+ "B"?
  1647. """
  1648. l = _Lark(grammar)
  1649. res = l.parse("AAAA")
  1650. self.assertEqual(res.children[0].data, 'a')
  1651. l = _Lark(grammar)
  1652. res = l.parse("AAAB")
  1653. self.assertEqual(res.children[0].data, 'b')
  1654. l = _Lark(grammar, priority="invert")
  1655. res = l.parse("AAAA")
  1656. self.assertEqual(res.children[0].data, 'b')
  1657. @unittest.skipIf(PARSER != 'earley' or 'dynamic' not in LEXER, "Currently only Earley supports priority sum in rules")
  1658. def test_prioritization_sum(self):
  1659. "Tests effect of priority on result"
  1660. grammar = """
  1661. start: ab_ b_ a_ | indirection
  1662. indirection: a_ bb_ a_
  1663. a_: "a"
  1664. b_: "b"
  1665. ab_: "ab"
  1666. bb_.1: "bb"
  1667. """
  1668. l = _Lark(grammar, priority="invert")
  1669. res = l.parse('abba')
  1670. self.assertEqual(''.join(child.data for child in res.children), 'ab_b_a_')
  1671. grammar = """
  1672. start: ab_ b_ a_ | indirection
  1673. indirection: a_ bb_ a_
  1674. a_: "a"
  1675. b_: "b"
  1676. ab_.1: "ab"
  1677. bb_: "bb"
  1678. """
  1679. l = _Lark(grammar, priority="invert")
  1680. res = l.parse('abba')
  1681. self.assertEqual(''.join(child.data for child in res.children), 'indirection')
  1682. grammar = """
  1683. start: ab_ b_ a_ | indirection
  1684. indirection: a_ bb_ a_
  1685. a_.2: "a"
  1686. b_.1: "b"
  1687. ab_.3: "ab"
  1688. bb_.3: "bb"
  1689. """
  1690. l = _Lark(grammar, priority="invert")
  1691. res = l.parse('abba')
  1692. self.assertEqual(''.join(child.data for child in res.children), 'ab_b_a_')
  1693. grammar = """
  1694. start: ab_ b_ a_ | indirection
  1695. indirection: a_ bb_ a_
  1696. a_.1: "a"
  1697. b_.1: "b"
  1698. ab_.4: "ab"
  1699. bb_.3: "bb"
  1700. """
  1701. l = _Lark(grammar, priority="invert")
  1702. res = l.parse('abba')
  1703. self.assertEqual(''.join(child.data for child in res.children), 'indirection')
  1704. def test_utf8(self):
  1705. g = u"""start: a
  1706. a: "±a"
  1707. """
  1708. l = _Lark(g)
  1709. self.assertEqual(l.parse(u'±a'), Tree('start', [Tree('a', [])]))
  1710. g = u"""start: A
  1711. A: "±a"
  1712. """
  1713. l = _Lark(g)
  1714. self.assertEqual(l.parse(u'±a'), Tree('start', [u'\xb1a']))
  1715. @unittest.skipIf(PARSER == 'cyk', "No empty rules")
  1716. def test_ignore(self):
  1717. grammar = r"""
  1718. COMMENT: /(!|(\/\/))[^\n]*/
  1719. %ignore COMMENT
  1720. %import common.WS -> _WS
  1721. %import common.INT
  1722. start: "INT"i _WS+ INT _WS*
  1723. """
  1724. parser = _Lark(grammar)
  1725. tree = parser.parse("int 1 ! This is a comment\n")
  1726. self.assertEqual(tree.children, ['1'])
  1727. tree = parser.parse("int 1 ! This is a comment") # A trailing ignore token can be tricky!
  1728. self.assertEqual(tree.children, ['1'])
  1729. parser = _Lark(r"""
  1730. start : "a"*
  1731. %ignore "b"
  1732. """)
  1733. tree = parser.parse("bb")
  1734. self.assertEqual(tree.children, [])
  1735. def test_regex_escaping(self):
  1736. g = _Lark("start: /[ab]/")
  1737. g.parse('a')
  1738. g.parse('b')
  1739. self.assertRaises( UnexpectedInput, g.parse, 'c')
  1740. _Lark(r'start: /\w/').parse('a')
  1741. g = _Lark(r'start: /\\w/')
  1742. self.assertRaises( UnexpectedInput, g.parse, 'a')
  1743. g.parse(r'\w')
  1744. _Lark(r'start: /\[/').parse('[')
  1745. _Lark(r'start: /\//').parse('/')
  1746. _Lark(r'start: /\\/').parse('\\')
  1747. _Lark(r'start: /\[ab]/').parse('[ab]')
  1748. _Lark(r'start: /\\[ab]/').parse('\\a')
  1749. _Lark(r'start: /\t/').parse('\t')
  1750. _Lark(r'start: /\\t/').parse('\\t')
  1751. _Lark(r'start: /\\\t/').parse('\\\t')
  1752. _Lark(r'start: "\t"').parse('\t')
  1753. _Lark(r'start: "\\t"').parse('\\t')
  1754. _Lark(r'start: "\\\t"').parse('\\\t')
  1755. def test_ranged_repeat_rules(self):
  1756. g = u"""!start: "A"~3
  1757. """
  1758. l = _Lark(g)
  1759. self.assertEqual(l.parse(u'AAA'), Tree('start', ["A", "A", "A"]))
  1760. self.assertRaises(ParseError, l.parse, u'AA')
  1761. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAAA')
  1762. g = u"""!start: "A"~0..2
  1763. """
  1764. if PARSER != 'cyk': # XXX CYK currently doesn't support empty grammars
  1765. l = _Lark(g)
  1766. self.assertEqual(l.parse(u''), Tree('start', []))
  1767. self.assertEqual(l.parse(u'A'), Tree('start', ['A']))
  1768. self.assertEqual(l.parse(u'AA'), Tree('start', ['A', 'A']))
  1769. self.assertRaises((UnexpectedToken, UnexpectedInput), l.parse, u'AAA')
  1770. g = u"""!start: "A"~3..2
  1771. """
  1772. self.assertRaises(GrammarError, _Lark, g)
  1773. g = u"""!start: "A"~2..3 "B"~2
  1774. """
  1775. l = _Lark(g)
  1776. self.assertEqual(l.parse(u'AABB'), Tree('start', ['A', 'A', 'B', 'B']))
  1777. self.assertEqual(l.parse(u'AAABB'), Tree('start', ['A', 'A', 'A', 'B', 'B']))
  1778. self.assertRaises(ParseError, l.parse, u'AAAB')
  1779. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAABBB')
  1780. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'ABB')
  1781. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAAABB')
  1782. def test_ranged_repeat_terms(self):
  1783. g = u"""!start: AAA
  1784. AAA: "A"~3
  1785. """
  1786. l = _Lark(g)
  1787. self.assertEqual(l.parse(u'AAA'), Tree('start', ["AAA"]))
  1788. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AA')
  1789. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAAA')
  1790. g = u"""!start: AABB CC
  1791. AABB: "A"~0..2 "B"~2
  1792. CC: "C"~1..2
  1793. """
  1794. l = _Lark(g)
  1795. self.assertEqual(l.parse(u'AABBCC'), Tree('start', ['AABB', 'CC']))
  1796. self.assertEqual(l.parse(u'BBC'), Tree('start', ['BB', 'C']))
  1797. self.assertEqual(l.parse(u'ABBCC'), Tree('start', ['ABB', 'CC']))
  1798. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAAB')
  1799. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAABBB')
  1800. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'ABB')
  1801. self.assertRaises((ParseError, UnexpectedInput), l.parse, u'AAAABB')
  1802. @unittest.skipIf(PARSER=='earley', "Priority not handled correctly right now") # TODO XXX
  1803. def test_priority_vs_embedded(self):
  1804. g = """
  1805. A.2: "a"
  1806. WORD: ("a".."z")+
  1807. start: (A | WORD)+
  1808. """
  1809. l = _Lark(g)
  1810. t = l.parse('abc')
  1811. self.assertEqual(t.children, ['a', 'bc'])
  1812. self.assertEqual(t.children[0].type, 'A')
  1813. def test_line_counting(self):
  1814. p = _Lark("start: /[^x]+/")
  1815. text = 'hello\nworld'
  1816. t = p.parse(text)
  1817. tok = t.children[0]
  1818. self.assertEqual(tok, text)
  1819. self.assertEqual(tok.line, 1)
  1820. self.assertEqual(tok.column, 1)
  1821. # if _LEXER != 'dynamic':
  1822. self.assertEqual(tok.end_line, 2)
  1823. self.assertEqual(tok.end_column, 6)
  1824. @unittest.skipIf(PARSER=='cyk', "Empty rules")
  1825. def test_empty_end(self):
  1826. p = _Lark("""
  1827. start: b c d
  1828. b: "B"
  1829. c: | "C"
  1830. d: | "D"
  1831. """)
  1832. res = p.parse('B')
  1833. self.assertEqual(len(res.children), 3)
  1834. @unittest.skipIf(PARSER=='cyk', "Empty rules")
  1835. def test_maybe_placeholders(self):
  1836. # Anonymous tokens shouldn't count
  1837. p = _Lark("""start: ["a"] ["b"] ["c"] """, maybe_placeholders=True)
  1838. self.assertEqual(p.parse("").children, [])
  1839. # Unless keep_all_tokens=True
  1840. p = _Lark("""start: ["a"] ["b"] ["c"] """, maybe_placeholders=True, keep_all_tokens=True)
  1841. self.assertEqual(p.parse("").children, [None, None, None])
  1842. # All invisible constructs shouldn't count
  1843. p = _Lark("""start: [A] ["b"] [_c] ["e" "f" _c]
  1844. A: "a"
  1845. _c: "c" """, maybe_placeholders=True)
  1846. self.assertEqual(p.parse("").children, [None])
  1847. self.assertEqual(p.parse("c").children, [None])
  1848. self.assertEqual(p.parse("aefc").children, ['a'])
  1849. # ? shouldn't apply
  1850. p = _Lark("""!start: ["a"] "b"? ["c"] """, maybe_placeholders=True)
  1851. self.assertEqual(p.parse("").children, [None, None])
  1852. self.assertEqual(p.parse("b").children, [None, 'b', None])
  1853. p = _Lark("""!start: ["a"] ["b"] ["c"] """, maybe_placeholders=True)
  1854. self.assertEqual(p.parse("").children, [None, None, None])
  1855. self.assertEqual(p.parse("a").children, ['a', None, None])
  1856. self.assertEqual(p.parse("b").children, [None, 'b', None])
  1857. self.assertEqual(p.parse("c").children, [None, None, 'c'])
  1858. self.assertEqual(p.parse("ab").children, ['a', 'b', None])
  1859. self.assertEqual(p.parse("ac").children, ['a', None, 'c'])
  1860. self.assertEqual(p.parse("bc").children, [None, 'b', 'c'])
  1861. self.assertEqual(p.parse("abc").children, ['a', 'b', 'c'])
  1862. p = _Lark("""!start: (["a"] "b" ["c"])+ """, maybe_placeholders=True)
  1863. self.assertEqual(p.parse("b").children, [None, 'b', None])
  1864. self.assertEqual(p.parse("bb").children, [None, 'b', None, None, 'b', None])
  1865. self.assertEqual(p.parse("abbc").children, ['a', 'b', None, None, 'b', 'c'])
  1866. self.assertEqual(p.parse("babbcabcb").children,
  1867. [None, 'b', None,
  1868. 'a', 'b', None,
  1869. None, 'b', 'c',
  1870. 'a', 'b', 'c',
  1871. None, 'b', None])
  1872. p = _Lark("""!start: ["a"] ["c"] "b"+ ["a"] ["d"] """, maybe_placeholders=True)
  1873. self.assertEqual(p.parse("bb").children, [None, None, 'b', 'b', None, None])
  1874. self.assertEqual(p.parse("bd").children, [None, None, 'b', None, 'd'])
  1875. self.assertEqual(p.parse("abba").children, ['a', None, 'b', 'b', 'a', None])
  1876. self.assertEqual(p.parse("cbbbb").children, [None, 'c', 'b', 'b', 'b', 'b', None, None])
  1877. def test_escaped_string(self):
  1878. "Tests common.ESCAPED_STRING"
  1879. grammar = r"""
  1880. start: ESCAPED_STRING+
  1881. %import common (WS_INLINE, ESCAPED_STRING)
  1882. %ignore WS_INLINE
  1883. """
  1884. parser = _Lark(grammar)
  1885. parser.parse(r'"\\" "b" "c"')
  1886. parser.parse(r'"That" "And a \"b"')
  1887. def test_meddling_unused(self):
  1888. "Unless 'unused' is removed, LALR analysis will fail on reduce-reduce collision"
  1889. grammar = """
  1890. start: EKS* x
  1891. x: EKS
  1892. unused: x*
  1893. EKS: "x"
  1894. """
  1895. parser = _Lark(grammar)
  1896. @unittest.skipIf(PARSER!='lalr' or 'custom' in LEXER, "Serialize currently only works for LALR parsers without custom lexers (though it should be easy to extend)")
  1897. def test_serialize(self):
  1898. grammar = """
  1899. start: _ANY b "C"
  1900. _ANY: /./
  1901. b: "B"
  1902. """
  1903. parser = _Lark(grammar)
  1904. s = BytesIO()
  1905. parser.save(s)
  1906. s.seek(0)
  1907. parser2 = Lark.load(s)
  1908. self.assertEqual(parser2.parse('ABC'), Tree('start', [Tree('b', [])]) )
  1909. def test_multi_start(self):
  1910. parser = _Lark('''
  1911. a: "x" "a"?
  1912. b: "x" "b"?
  1913. ''', start=['a', 'b'])
  1914. self.assertEqual(parser.parse('xa', 'a'), Tree('a', []))
  1915. self.assertEqual(parser.parse('xb', 'b'), Tree('b', []))
  1916. def test_lexer_detect_newline_tokens(self):
  1917. # Detect newlines in regular tokens
  1918. g = _Lark(r"""start: "go" tail*
  1919. !tail : SA "@" | SB "@" | SC "@" | SD "@"
  1920. SA : "a" /\n/
  1921. SB : /b./s
  1922. SC : "c" /[^a-z]/
  1923. SD : "d" /\s/
  1924. """)
  1925. a,b,c,d = [x.children[1] for x in g.parse('goa\n@b\n@c\n@d\n@').children]
  1926. self.assertEqual(a.line, 2)
  1927. self.assertEqual(b.line, 3)
  1928. self.assertEqual(c.line, 4)
  1929. self.assertEqual(d.line, 5)
  1930. # Detect newlines in ignored tokens
  1931. for re in ['/\\n/', '/[^a-z]/', '/\\s/']:
  1932. g = _Lark('''!start: "a" "a"
  1933. %ignore {}'''.format(re))
  1934. a, b = g.parse('a\na').children
  1935. self.assertEqual(a.line, 1)
  1936. self.assertEqual(b.line, 2)
  1937. @unittest.skipIf(PARSER=='cyk' or LEXER=='custom_old', "match_examples() not supported for CYK/old custom lexer")
  1938. def test_match_examples(self):
  1939. p = _Lark(r"""
  1940. start: "a" "b" "c"
  1941. """)
  1942. def match_error(s):
  1943. try:
  1944. _ = p.parse(s)
  1945. except UnexpectedInput as u:
  1946. return u.match_examples(p.parse, {
  1947. 0: ['abe'],
  1948. 1: ['ab'],
  1949. 2: ['cbc', 'dbc'],
  1950. })
  1951. assert False
  1952. assert match_error("abe") == 0
  1953. assert match_error("ab") == 1
  1954. assert match_error("bbc") == 2
  1955. assert match_error("cbc") == 2
  1956. self.assertEqual( match_error("dbc"), 2 )
  1957. self.assertEqual( match_error("ebc"), 2 )
  1958. @unittest.skipIf(not regex or sys.version_info[0] == 2, 'Unicode and Python 2 do not place nicely together.')
  1959. def test_unicode_class(self):
  1960. "Tests that character classes from the `regex` module work correctly."
  1961. g = _Lark(r"""?start: NAME
  1962. NAME: ID_START ID_CONTINUE*
  1963. ID_START: /[\p{Lu}\p{Ll}\p{Lt}\p{Lm}\p{Lo}\p{Nl}_]+/
  1964. ID_CONTINUE: ID_START | /[\p{Mn}\p{Mc}\p{Nd}\p{Pc}]+/""", regex=True)
  1965. self.assertEqual(g.parse('வணக்கம்'), 'வணக்கம்')
  1966. @unittest.skipIf(not regex or sys.version_info[0] == 2, 'Unicode and Python 2 do not place nicely together.')
  1967. def test_unicode_word(self):
  1968. "Tests that a persistent bug in the `re` module works when `regex` is enabled."
  1969. g = _Lark(r"""?start: NAME
  1970. NAME: /[\w]+/
  1971. """, regex=True)
  1972. self.assertEqual(g.parse('வணக்கம்'), 'வணக்கம்')
  1973. @unittest.skipIf(PARSER!='lalr', "Puppet error handling only works with LALR for now")
  1974. def test_error_with_puppet(self):
  1975. def ignore_errors(e):
  1976. if isinstance(e, UnexpectedCharacters):
  1977. # Skip bad character
  1978. return True
  1979. # Must be UnexpectedToken
  1980. if e.token.type == 'COMMA':
  1981. # Skip comma
  1982. return True
  1983. elif e.token.type == 'SIGNED_NUMBER':
  1984. # Try to feed a comma and retry the number
  1985. e.puppet.feed_token(Token('COMMA', ','))
  1986. e.puppet.feed_token(e.token)
  1987. return True
  1988. # Unhandled error. Will stop parse and raise exception
  1989. return False
  1990. g = _Lark(r'''
  1991. start: "[" num ("," num)* "]"
  1992. ?num: SIGNED_NUMBER
  1993. %import common.SIGNED_NUMBER
  1994. %ignore " "
  1995. ''')
  1996. s = "[0 1, 2,, 3,,, 4, 5 6 ]"
  1997. tree = g.parse(s, on_error=ignore_errors)
  1998. res = [int(x) for x in tree.children]
  1999. assert res == list(range(7))
  2000. s = "[0 1, 2,@, 3,,, 4, 5 6 ]$"
  2001. tree = g.parse(s, on_error=ignore_errors)
  2002. _NAME = "Test" + PARSER.capitalize() + LEXER.capitalize()
  2003. _TestParser.__name__ = _NAME
  2004. _TestParser.__qualname__ = "tests.test_parser." + _NAME
  2005. globals()[_NAME] = _TestParser
  2006. __all__.append(_NAME)
  2007. _TO_TEST = [
  2008. ('standard', 'earley'),
  2009. ('standard', 'cyk'),
  2010. ('standard', 'lalr'),
  2011. ('dynamic', 'earley'),
  2012. ('dynamic_complete', 'earley'),
  2013. ('contextual', 'lalr'),
  2014. ('custom_new', 'lalr'),
  2015. ('custom_new', 'cyk'),
  2016. ('custom_old', 'earley'),
  2017. ]
  2018. for _LEXER, _PARSER in _TO_TEST:
  2019. _make_parser_test(_LEXER, _PARSER)
  2020. for _LEXER in ('dynamic', 'dynamic_complete'):
  2021. _make_full_earley_test(_LEXER)
  2022. if __name__ == '__main__':
  2023. unittest.main()