import torch
import numpy as np
import os



def set_seed(seed):
    # random.seed(seed)
    # np.random.seed(seed)
    # torch.manual_seed(seed) # CPU
    # torch.cuda.manual_seed(seed) # GPU
    # torch.cuda.manual_seed_all(seed) # All GPU
    os.environ['PYTHONHASHSEED'] = str(seed) # 禁止hash随机化
    torch.backends.cudnn.deterministic = True # 确保每次返回的卷积算法是确定的
    torch.backends.cudnn.benchmark = False # True的话会自动寻找最适合当前配置的高效算法，来达到优化运行效率的问题。False保证实验结果可复现

def init_dl_program(device_name,seed = None,use_cudnn=True,deterministic=False,benchmark=False,use_tf32=False,max_threads=None):
    if seed is not None:
        random.seed(seed)
        seed += 1
        np.random.seed(seed)
        seed += 1
        torch.manual_seed(seed)
    
    if isinstance(device_name ,(str,int)):
        device_name = [device_name]
    
    devices = []
    for t in reversed(device_name):
        t_device = torch.device(t)
        devices.append(t_device)
        if t_device.type == 'cuda':
            torch.cuda.set_device(t_device)
            if seed is not None:
                seed += 1
                torch.cuda.manual_seed(seed)
                
    devices.reverse()
    torch.backends.cudnn.enabled = use_cudnn
    torch.backends.cudnn.deterministic = deterministic
    torch.backends.cudnn.benchmark = benchmark
    
    if hasattr(torch.backends.cudnn, 'allow_tf32'):
        torch.backends.cudnn.allow_tf32 = use_tf32
        torch.backends.cuda.matmul.allow_tf32 = use_tf32
        
    return devices if len(devices) > 1 else devices[0]

def one_hot_encoding(X):
    X = [int(x) for x in X]
    n_values = np.max(X) + 1
    b = np.eye(n_values)[X]
    return b

def jitter(x, sigma=0.8):
    # https://arxiv.org/pdf/1706.00527.pdf
    # 加入噪声
    return x + np.random.normal(loc=0., scale=sigma, size=x.shape)

def DataTransform_TD(sample, jitter_ratio):
    """Weak and strong augmentations，对这样的做法感到疑惑，是相当于噪声与mask的结合"""
    # aug_1 = masking(sample, keepratio=0.9)
    aug_1 = jitter(sample, jitter_ratio) # 首先加入噪声，然后再加入噪声？
    # aug_1 = scaling(sample, config.augmentation.jitter_scale_ratio)
    # aug_3 = permutation(sample, max_segments=config.augmentation.max_seg)
    print(aug_1.shape)
    li = np.random.randint(0, 4, size=[sample.shape[0]]) # there are two augmentations in Frequency domain 、 确定不是Time Domain?是Frequency domain?
    print(li.shape)
    li_onehot = one_hot_encoding(li) 
    aug_1[1-li_onehot[:, 0]] = 0 # the rows are not selected are set as zero.相当于mask掉了，也就是你加了噪声，而且你还进行了mask
    # aug_2[1 - li_onehot[:, 1]] = 0
    # aug_3[1 - li_onehot[:, 2]] = 0
    # aug_4[1 - li_onehot[:, 3]] = 0
    aug_T = aug_1 # + aug_2 + aug_3 #+aug_4
    return aug_T

def DataTransform_FD(sample):
    """Weak and strong augmentations in Frequency domain """
    """论文里面的这里，主要是从三个方面来考虑：1、高频低频；2、单分量多分量；3、随机mask和分布mask"""
    aug_1 = remove_frequency(sample, pertub_ratio=0.1)
    aug_2 = add_frequency(sample, pertub_ratio=0.1)
    aug_F = aug_1 + aug_2
    return aug_F

def remove_frequency(x, pertub_ratio=0.0):
    mask = torch.cuda.FloatTensor(x.shape).uniform_() > pertub_ratio # maskout_ratio are False
    mask = mask.to(x.device)
    return x*mask

def add_frequency(x, pertub_ratio=0.0):

    mask = torch.cuda.FloatTensor(x.shape).uniform_() > (1-pertub_ratio) # only pertub_ratio of all values are True
    mask = mask.to(x.device)
    max_amplitude = x.max()
    random_am = torch.rand(mask.shape)*(max_amplitude*0.1)
    pertub_matrix = mask*random_am
    return x+pertub_matrix

