6 Token = collections.namedtuple(
16 def _make_token_matcher(definition):
17 name, regex = definition
18 regex_matcher = re.compile(regex)
20 def token_matcher(index, source, line):
21 match = regex_matcher.match(source[index:])
24 return False, index, None
28 index + len(match.group()),
29 Token(type=name, match=match.group(), index=index, line=line),
36 ('open_parenthese', r'\('),
37 ('close_parenthese', r'\)'),
39 ('integer_literal', r'\d+'),
40 ('symbol', r'[a-z]+'),
41 ('single_quoted_string_literal', r"'.*?'"),
42 ('addition_level_operator', r'(\+|-)'),
43 ('multiplication_level_operator', r'(\*|//|%)'),
46 _TOKEN_MATCHERS = list(map(_make_token_matcher, _TOKEN_MATCHERS))
48 @util.force_generator(tuple)
53 while index < len(source):
54 if source[index] == ' ':
60 for matcher in _TOKEN_MATCHERS:
61 success, index, token = matcher(index, source, line)
68 raise Exception('Unexpected character "{}"'.format(source[index]))
70 while index < len(source) and source[index] in set(['\n']):
74 if __name__ == '__main__':
77 class TokenizeTests(unittest.TestCase):
78 def test_tokenizes_open_parenthese(self):
82 type='open_parenthese',
89 def test_tokenizes_close_parenthese(self):
93 type='close_parenthese',
100 def test_tokenizes_symbol(self):
111 def test_tokenizes_single_quoted_string_literal(self):
113 tokenize("'Hello, world'"),
115 type='single_quoted_string_literal',
116 match="'Hello, world'",
122 def test_tokenizes_plus(self):
126 type='addition_level_operator',
133 def test_tokenizes_minus(self):
137 type='addition_level_operator',
144 def test_tokenizes_times(self):
148 type='multiplication_level_operator',
155 def test_tokenizes_integer_divide(self):
159 type='multiplication_level_operator',
166 def test_tokenizes_modular_divide(self):
170 type='multiplication_level_operator',
177 def test_tokenizes_comma(self):
189 def test_handles_trailing_newline(self):
200 def test_handles_leading_space(self):
211 def test_tokenizes_with_proper_line_numbers(self):
213 tokenize('print\n('),
222 type='open_parenthese',