import os
import torch
import shutil
import numpy as np
import random
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn as nn
import copy

# 获取当前工作目录
current_directory = os.getcwd()

def RBN_weight(bn_mean_list, bn_var_list,args,epoch, T=0.01):
    if epoch == 0:
        return [1.0/args.num_users for i in range(args.num_users)]
    global_mean = torch.load(f"{args.root}/rbn.t7")['global_mean']
    global_var = torch.load(f"{args.root}/rbn.t7")['global_var']
    causal_weight = []
    for i in range(len(bn_mean_list)):
        local_sim = []
        for j in range(len(bn_mean_list[0])):
            sim_i = (cos(bn_mean_list[i][j],global_mean[j]) + cos(bn_var_list[i][j],global_var[j])) / 2
            local_sim.append(sim_i)
        causal_weight.append(sum(local_sim)/len(local_sim))
    all = sum([np.exp(item/T) for item in causal_weight])
    res = [np.exp(item/T)/all for item in causal_weight]
    return res

def made_causal_stat(causal_bn_mean,causal_bn_var,args):
    global_mean = []
    for i in range(len(causal_bn_mean[0])):
        stach_list = torch.stack([item[i].cuda() for item in causal_bn_mean])
        mean_tensor = torch.mean(stach_list, dim=0)
        global_mean.append(mean_tensor)
    global_var = []
    for i in range(len(causal_bn_var[0])):
        stach_list = torch.stack([item[i].cuda() for item in causal_bn_mean])
        mean_tensor = torch.mean(stach_list, dim=0)
        global_var.append(mean_tensor)

    temp_rbn = {}
    temp_rbn['global_mean'] = global_mean
    temp_rbn['global_var'] = global_var
    torch.save(temp_rbn,f"{args.root}/rbn.t7")

def cos(x,y):
    x = x.cuda()
    y = y.cuda()
    suma = x*y
    suma = suma.sum()
    res = suma/(torch.norm(x,1) * torch.norm(y,1))
    return res.item()


def find_batchnorm_layers(model):
    batchnorm_layers = []

    for name, module in model.named_modules():
        if isinstance(module, nn.SyncBatchNorm) or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d):
            batchnorm_layers.append(module)

    return batchnorm_layers


def average_weights_alpha(w, lw, idx, p, global_rank):
    """
    Returns the weighted average of the weights.
    """
    device = f"cuda:{global_rank}"
    w_avg = copy.deepcopy(w[0])
    for key in w_avg.keys():
        cou = 0
        w_avg[key] = w_avg[key].to(device)
        if (lw[0] >= idx):
            w_avg[key] = w_avg[key] * p
        for i in range(1, len(w)):
            w[i][key] = w[i][key].to(device)
            if (lw[i] >= idx) and (('bn' not in key)):
                w_avg[key] = w_avg[key] + w[i][key] * p
            else:
                cou += 1 
                w_avg[key] = w_avg[key] + w[i][key]
        w_avg[key] = torch.div(w_avg[key], cou+(len(w)-cou)*p)
    return w_avg

def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministic = True


def checkmade_dir(file_path, delete=True):
    if os.path.exists(file_path):
        if delete:
            shutil.rmtree(file_path)
            os.makedirs(file_path)
    else:
        os.makedirs(file_path)
        
def delete_file(file_path, delete=True):
    if os.path.exists(file_path):
        if delete:
            os.remove(file_path)
    else:
        pass

def average_weights(w):
    """
    Returns the average of the weights.
    """
    w_avg = copy.deepcopy(w[0])
    for key in w_avg.keys():
        for i in range(1, len(w)):
            w_avg[key] += w[i][key]
        if 'num_batches_tracked' in key:
            w_avg[key] = w_avg[key].true_divide(len(w))
        else:
            w_avg[key] = torch.div(w_avg[key], len(w))
    return w_avg

def setup_seed(seed):
    """Setting the seed of random numbers"""
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    cudnn.deterministuc = True
    

def record_net_data_stats(y_train, net_dataidx_map):
    """Return a dict for the net_dataidx_map"""
    net_cls_counts = {}
    num_class = np.unique(y_train).shape[0]
    for net_i, dataidx in net_dataidx_map.items():  # label:sets
        unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
        tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
        for i in range(num_class):
            if i in tmp.keys():
                continue
            else:
                tmp[i] = 1  # 5

        net_cls_counts[net_i] = tmp

    return net_cls_counts

def get_cls_num_list(traindata_cls_counts,dataset):
    """Transfer the dict into the list"""
    cls_num_list = []
    num_class = 100 if dataset == "cifar100" else 10
    if dataset == 'tiny':
        num_class = 200
    for key, val in traindata_cls_counts.items():
        temp = [0] * num_class  
        for key_1, val_1 in val.items():
            temp[key_1] = val_1
        cls_num_list.append(temp)

    return cls_num_list

def causal_loss(logits_adv, logits_inv):
    KL = lambda x, y: (x.softmax(dim=1) * (x.softmax(dim=1).log() - y.softmax(dim=1).log())).sum(dim=1)
    return (KL(logits_adv, logits_inv)).mean()

def rprint(str, rank):
    if rank==1:
        print(str)

def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        assert False


def do_freeze(net):
    for params in net.parameters():
        params.requires_grad = False
        
def not_freeze(net):
    for params in net.parameters():
        params.requires_grad = True


class MixAttack(object):
    def __init__(self, net, slowattack, fastattack, train_iters):
        self.net = net
        self.slowattack = slowattack
        self.fastattack = fastattack
        self.train_iters = train_iters
        self.ratio = 0.3
        self.current_iter = 0

    def __call__(self, inputs, targets):
        # training
        if self.net.training:
            adv_inputs,weights,diff = self.slowattack(inputs, targets) \
                if self._iter < self.train_iters * self.ratio else self.fastattack(inputs, targets)
            self.iter()
            self.check()
        # testing
        else:
            adv_inputs,weights,diff = self.fastattack(inputs, targets)
        return adv_inputs,weights,diff

    def iter(self):
        self.current_iter = self.current_iter+1

    def check(self):
        if self.train_iters == self.current_iter:
            self.current_iter = 0

    @property
    def _iter(self):
        return self.current_iter


def get_onehot(adv_output, targets):
    b, c = adv_output.shape
    psuedo_onehot = torch.FloatTensor(b, c).cuda()
    psuedo_onehot.zero_()
    psuedo_onehot.scatter_(1, targets.unsqueeze(-1), 1)

    return psuedo_onehot

def inv_eps(dataset, network):

    if dataset == 'cifar10' and network=='vgg':
        inv_eps = 0.03

    elif dataset == 'cifar10' and network == 'cnn':
        inv_eps = 4/255
    
    elif dataset == 'cifar100' and network == 'cnn':
        inv_eps = 4/255
    
    elif dataset == 'svhn' and network == 'cnn':
        inv_eps = 4/255
    
    elif dataset == 'tiny' and network == 'cnn':
        inv_eps = 0.5/255
    
    elif dataset == 'cifar10' and network == 'cnn':
        inv_eps = 4/255

    elif dataset == 'cifar10' and network == 'resnet':
        inv_eps = 4/255

    elif dataset == 'cifar100' and network == 'resnet':
        inv_eps = 8/255
    
    elif dataset == 'cifar100' and network == 'vgg':
        inv_eps = 8/255
    
    elif dataset == 'cifar100' and network == 'wide':
        inv_eps = 8/255
        
    elif dataset == 'cifar10' and network == 'wide':
        inv_eps = 2/255

    elif dataset == 'svhn' and network=='vgg':
        inv_eps = 4/255

    elif dataset == 'svhn' and network=='resnet':
        inv_eps = 1/255

    elif dataset == 'svhn' and network=='wide':
        inv_eps = 1/255

    elif dataset == 'tiny' and network=='vgg':
        inv_eps = 1/255

    elif dataset == 'tiny' and network == 'resnet':
        inv_eps = 0.5/255

    elif dataset == 'tiny' and network == 'wide':
        inv_eps = 0.5/255
    # print(f'Data: {dataset}, Net: {network}, InvEps: {inv_eps*255:.1f}')
    return inv_eps

def causal_loss_func(logits_adv, logits_inv):
    KL = lambda x, y: (x.softmax(dim=1) * (x.softmax(dim=1).log() - y.softmax(dim=1).log())).sum(dim=1)
    return (KL(logits_adv, logits_inv)).mean()

def average_weights(w,global_rank):
    """
    Returns the average of the weights.
    """
    device = f"cuda:{global_rank}"
    
    w_avg = copy.deepcopy(w[0])
    for key in w_avg.keys():
        w_avg[key] = w_avg[key].to(device)
        for i in range(1, len(w)):
            w[i][key] = w[i][key].to(device)
            w_avg[key] += w[i][key]
        if 'num_batches_tracked' in key:
            w_avg[key] = w_avg[key].true_divide(len(w))
        else:
            w_avg[key] = torch.div(w_avg[key], len(w))
    return w_avg

def average_weights_bn(w,global_rank,user_id,rbn_weight):
    """
    Returns the average of the weights.
    """
    device = f"cuda:{global_rank}"
    w_avg = copy.deepcopy((w[0]))
    for key in w_avg.keys():
        w_avg[key] = w_avg[key].to(device)
        w_avg[key] = w_avg[key] * rbn_weight[0]
        
        for i in range(1, len(w)):
            w[i][key] = w[i][key].to(device)
            w_avg[key] += w[i][key] * rbn_weight[i]
        if 'num_batches_tracked' in key:
            w_avg[key] = w_avg[key]
        else:
            w_avg[key] = w[user_id][key]
    return w_avg


def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    if is_best:
        torch.save(state, filename)