X-Git-Url: https://code.kerkeslager.com/?p=fur;a=blobdiff_plain;f=tokenization.py;h=3c4dc6fb561a2fe08fbed90f23d945bc98873adf;hp=a736912603e0832efc974b07252cf03ebe6fdc38;hb=3da330f045ed7fcb66ee9d9447de320680263699;hpb=ec8ce6f417bad0e61e82462787fbb7d7dbe25ea0 diff --git a/tokenization.py b/tokenization.py index a736912..3c4dc6f 100644 --- a/tokenization.py +++ b/tokenization.py @@ -31,13 +31,13 @@ def _make_token_matcher(definition): return token_matcher - _TOKEN_MATCHERS = [ + ('keyword', r'(def|end)(?![a-z_])'), ('open_parenthese', r'\('), ('close_parenthese', r'\)'), ('comma', r','), ('integer_literal', r'\d+'), - ('symbol', r'[a-z]+'), + ('symbol', r'[a-z_]+'), ('single_quoted_string_literal', r"'.*?'"), ('comparison_level_operator', r'(<=|>=|==|!=|<|>)'), ('assignment_operator', r'='), @@ -58,6 +58,12 @@ def tokenize(source): index += 1 continue + if source[index] == '#': + while index < len(source) and source[index] != '\n': + index += 1 + + continue + success = False for matcher in _TOKEN_MATCHERS: