Source code for autowisp.fit_expression.FitTermsLexer
# pylint: skip-file
# Generated from /home/kpenev/projects/git/PhotometryPipeline/scripts/FitTermsLexer.g4 by ANTLR 4.13.0
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
[docs]
def serializedATN():
return [
4,
0,
9,
54,
6,
-1,
6,
-1,
2,
0,
7,
0,
2,
1,
7,
1,
2,
2,
7,
2,
2,
3,
7,
3,
2,
4,
7,
4,
2,
5,
7,
5,
2,
6,
7,
6,
2,
7,
7,
7,
2,
8,
7,
8,
2,
9,
7,
9,
1,
0,
1,
0,
1,
0,
1,
0,
1,
1,
1,
1,
1,
2,
1,
2,
1,
3,
1,
3,
1,
4,
1,
4,
1,
5,
4,
5,
36,
8,
5,
11,
5,
12,
5,
37,
1,
5,
1,
5,
1,
6,
1,
6,
1,
6,
1,
6,
1,
7,
1,
7,
1,
8,
4,
8,
49,
8,
8,
11,
8,
12,
8,
50,
1,
9,
1,
9,
0,
0,
10,
2,
1,
4,
2,
6,
3,
8,
4,
10,
5,
12,
6,
14,
7,
16,
8,
18,
9,
20,
0,
2,
0,
1,
2,
3,
0,
9,
10,
13,
13,
32,
32,
3,
0,
44,
44,
123,
123,
125,
125,
53,
0,
2,
1,
0,
0,
0,
0,
4,
1,
0,
0,
0,
0,
6,
1,
0,
0,
0,
0,
8,
1,
0,
0,
0,
0,
10,
1,
0,
0,
0,
0,
12,
1,
0,
0,
0,
1,
14,
1,
0,
0,
0,
1,
16,
1,
0,
0,
0,
1,
18,
1,
0,
0,
0,
2,
22,
1,
0,
0,
0,
4,
26,
1,
0,
0,
0,
6,
28,
1,
0,
0,
0,
8,
30,
1,
0,
0,
0,
10,
32,
1,
0,
0,
0,
12,
35,
1,
0,
0,
0,
14,
41,
1,
0,
0,
0,
16,
45,
1,
0,
0,
0,
18,
48,
1,
0,
0,
0,
20,
52,
1,
0,
0,
0,
22,
23,
5,
123,
0,
0,
23,
24,
1,
0,
0,
0,
24,
25,
6,
0,
0,
0,
25,
3,
1,
0,
0,
0,
26,
27,
2,
48,
57,
0,
27,
5,
1,
0,
0,
0,
28,
29,
5,
79,
0,
0,
29,
7,
1,
0,
0,
0,
30,
31,
5,
42,
0,
0,
31,
9,
1,
0,
0,
0,
32,
33,
5,
43,
0,
0,
33,
11,
1,
0,
0,
0,
34,
36,
7,
0,
0,
0,
35,
34,
1,
0,
0,
0,
36,
37,
1,
0,
0,
0,
37,
35,
1,
0,
0,
0,
37,
38,
1,
0,
0,
0,
38,
39,
1,
0,
0,
0,
39,
40,
6,
5,
1,
0,
40,
13,
1,
0,
0,
0,
41,
42,
5,
125,
0,
0,
42,
43,
1,
0,
0,
0,
43,
44,
6,
6,
2,
0,
44,
15,
1,
0,
0,
0,
45,
46,
5,
44,
0,
0,
46,
17,
1,
0,
0,
0,
47,
49,
3,
20,
9,
0,
48,
47,
1,
0,
0,
0,
49,
50,
1,
0,
0,
0,
50,
48,
1,
0,
0,
0,
50,
51,
1,
0,
0,
0,
51,
19,
1,
0,
0,
0,
52,
53,
8,
1,
0,
0,
53,
21,
1,
0,
0,
0,
4,
0,
1,
37,
50,
3,
5,
1,
0,
6,
0,
0,
4,
0,
0,
]
[docs]
class FitTermsLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]
TERM_LIST = 1
TERM_LIST_START = 1
UINT = 2
POLY_START = 3
CROSSPRODUCT = 4
UNION = 5
WS = 6
TERM_LIST_END = 7
TERM_SEP = 8
TERM = 9
channelNames = ["DEFAULT_TOKEN_CHANNEL", "HIDDEN"]
modeNames = ["DEFAULT_MODE", "TERM_LIST"]
literalNames = ["<INVALID>", "'{'", "'O'", "'*'", "'+'", "'}'", "','"]
symbolicNames = [
"<INVALID>",
"TERM_LIST_START",
"UINT",
"POLY_START",
"CROSSPRODUCT",
"UNION",
"WS",
"TERM_LIST_END",
"TERM_SEP",
"TERM",
]
ruleNames = [
"TERM_LIST_START",
"UINT",
"POLY_START",
"CROSSPRODUCT",
"UNION",
"WS",
"TERM_LIST_END",
"TERM_SEP",
"TERM",
"TERMCHAR",
]
grammarFileName = "FitTermsLexer.g4"
def __init__(self, input=None, output: TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.13.0")
self._interp = LexerATNSimulator(
self, self.atn, self.decisionsToDFA, PredictionContextCache()
)
self._actions = None
self._predicates = None