import re
class Lexer:
    def __init__(self):
        self.lex = re.compile(r'''\(|\)|\[|\]|#t|#f|"[^"]*"|'[\\]?[\d\w]+'|:[\w\d_\?\.!\-]+|\d+\.\d+|\d+|[^\s\[\]\(\)]+''')
    def tokenize(self,code):
        return self.detect_type(self.lex.findall(code))
    def detect_type(self,tokens):
        ret = []
        #print(tokens)
        for token in tokens:
            #print(token)
            if token == '(':
                ret.append({'type':'LP'})
            elif token == ')':
                ret.append({'type':'RP'})
            elif token == '[':
                ret.append({'type':'left'})
            elif token == ']':
                ret.append({'type':'right'})
            elif token == '#t':
                ret.append({'type':'bool',"value":True})
            elif token == '#f':
                ret.append({'type':'right',"value":False})
            elif token[0] == ':':
                ret.append({'type':'keyword','value':token[1:]})
            elif token[0] == '"':
                ret.append({'type':'string','value':token[1:-1]})
            elif token[0] == '\'':
                ret.append({'type':'char','value':token[1:-1]})
            elif token[0] >= '0' and token[0] <= '9':
                ret.append({'type':'number','value':token})
            else:
                ret.append({'type':'symbol','value':token})
        return ret
