import time
from contextlib import contextmanager
from sklearn import preprocessing

def smi_tokenizer(smi):
    """
    wangsong
    Tokenize a SMILES molecule or reaction
    """
    import re
    pattern =  "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
    regex = re.compile(pattern)
    tokens = [token for token in regex.findall(smi)]
    assert smi == ''.join(tokens)
    return ' '.join(tokens)

@contextmanager
def timer(name):
    """
    wangsong timer
    :param name:
    :return:
    """
    start = time.time()
    yield
    print(f'[{name}] done in {time.time() - start:.2f} s')


def processes(data,type):
    if type == 'standard_scaler':
        # 缩放到均值为0，方差为1
        x = preprocessing.StandardScaler().fit_transform(data)
    elif type == 'min_max_scaler':
        # 缩放到0和1之间
        x = preprocessing.MinMaxScaler().fit_transform(data)
    elif type == 'max_abs_scaler':
        # 缩放到-1和1之间
        x = preprocessing.MaxAbsScaler().fit_transform(data)
    elif type == 'normalizer':
        # 缩放到0和1之间,保留原始数据的分布
        x = preprocessing.Normalizer().fit_transform(data)
    else:
        x = data
    return x

