import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable
import wandb
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

def dkd_loss(logits_student, logits_teacher, target, alpha = 1.0, beta = 8.0, temperature = 1.0):
    gt_mask = _get_gt_mask(logits_student, target)
    # print("gt",gt_mask)
    other_mask = _get_other_mask(logits_student, target)
    # print("ot",other_mask)
    # print("log_s",logits_student)
    pred_student = F.log_softmax(logits_student / temperature, dim=1)
    pred_teacher = F.log_softmax(logits_teacher / temperature, dim=1)
    # print("pred_s",pred_student)
    pred_student = cat_mask(pred_student, gt_mask, other_mask)
    pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask)
    # log_pred_student = torch.log(pred_student)
    log_pred_student = pred_student
    # print("logits",logits_student)
    # print("pre",log_pred_student)
    # print(log_pred_student)
    # print(pred_teacher)
    tckd_loss = (
        F.kl_div(log_pred_student, pred_teacher, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    pred_teacher_part2 = F.softmax(
        logits_teacher / temperature - 1000.0 * gt_mask, dim=1
    )
    log_pred_student_part2 = F.log_softmax(
        logits_student / temperature - 1000.0 * gt_mask, dim=1
    )
    nckd_loss = (
        F.kl_div(log_pred_student_part2, pred_teacher_part2, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    # print("1",tckd_loss)
    # print("2",nckd_loss)
    return alpha * tckd_loss + beta * nckd_loss


def _get_gt_mask(logits, target):
    target = target.reshape(-1)
    # print("gt_target:",target.unsqueeze(-1))
    mask = torch.zeros_like(logits).scatter_(1, target.unsqueeze(-1), 1).bool()
    # print(mask)
    return mask


def _get_other_mask(logits, target):
    target = target.reshape(-1)
    mask = torch.ones_like(logits).scatter_(1, target.unsqueeze(-1), 0).bool()
    return mask


def cat_mask(t, mask1, mask2):
    t1 = (t * mask1).sum(dim=1, keepdims=True)
    t2 = (t * mask2).sum(1, keepdims=True)
    # print("t1",t1)
    # print("t2",t2)
    rt = torch.cat([t1, t2], dim=1)
    # print("rt",rt)
    return rt

class LocalUpdate_FedEn(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class  SeverUpdate_FedEn(object):
    def __init__(self,args,net_list,round,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        self.num_groups = len(net_list)

        self.models = []
        self.optimizer = []
        self.net_state = []

        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        for i in range (self.num_groups):
            model = net_list[i].to(self.args.device)
            self.models.append(model)
            if self.args.optimizer == 'sgd':
                optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
            elif self.args.optimizer == 'adam':
                optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)
            self.optimizer.append(optimizer)        

    def train(self):
        for i in range (self.num_groups):
            self.models[i].train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            residual = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                for model in self.models:
                    outputs.append(model(images))
                # for i in range(self.num_groups):
                #     residual.append([])
                #     for j in range (self.args.sever_bs):
                #         temp_sum = 0
                #         for _,k in enumerate(outputs[i]['output'].tolist()[j]):
                #             # print(k)
                #             # print(outputs[i]['output'].tolist()[j])
                #             temp_sum += (outputs[i]['output'].tolist()[j][labels[j]] - k)**2
                #         residual[i].append(temp_sum)    

                # ensemble_logits = []
                # count_logits = [0.0] * self.args.sever_bs
                # ensemble_logits = copy.deepcopy(outputs[0]['output'].tolist())
                # for i in range(self.args.sever_bs):
                #     for j in range(self.num_classes):
                #         ensemble_logits[i][j] = 0.0
                # for i in range(self.num_groups):
                #     y_pre = outputs[i]['output'].data.max(1,keepdim=True)[1].squeeze(1)
                #     logits = outputs[i]['output'].tolist()
                #     for index in range(self.args.sever_bs):
                #         if y_pre[index] == labels[index]:
                #             ensemble_logits[index] = np.add(ensemble_logits[index],logits[index]).tolist()
                #             count_logits[index] += 1
                            # ensemble_logits[index] = np.add(ensemble_logits[index],np.multiply(logits[index],residual[i][index]) ).tolist()
                            # count_logits[index] += residual[i][index]

                # for index in range(self.args.sever_bs):
                    # ensemble_logits[index] = ensemble_logits[index].tolist()
                # teacher = []
                # student = []
                # select = []
                # for index in range(self.num_groups):
                #     student.append([])
                # for index in range(self.args.sever_bs):
                #     if count_logits[index] > 1:
                #         teacher.append(copy.deepcopy(ensemble_logits[index]))
                #         select.append(count_logits[index])
                #         for j in range(self.num_groups):
                #             student[j].append(outputs[j]['output'].tolist()[index])

                # for i in range(len(teacher)):
                #     for j in range(self.num_classes):
                #         teacher[i][j] = teacher[i][j] / select[i]
                ensemble_logits = [] 
                for i in range(self.num_groups):
                    if i == 0:
                        ensemble_logits = copy.deepcopy(Variable(outputs[0]['output']))
                    else:
                        ensemble_logits = torch.add(ensemble_logits,Variable(outputs[i]['output']))
                # print(ensemble_logits)
                # ensemble_logits = torch.div(ensemble_logits,float(self.num_groups))
                # print(ensemble_logits)
                # print(ensemble_logits)
                for i in range(self.num_groups):
                    # ce_loss = self.loss_func(outputs[i]['output'],labels)
                    # ce_loss = 0
                    # kl_loss = 0

                    # teacher = torch.Tensor(teacher).cuda()
                    # student[i] = torch.Tensor(student[i]).cuda()
                    # print(teacher)
                    # print(student[i])
                    ensemble_logits = torch.sub(ensemble_logits,Variable(outputs[i]['output']))
                    ensemble_logits = torch.div(ensemble_logits,float(self.num_groups-1))
                    kl_loss = KD(outputs[i]['output'],ensemble_logits)
                    # print(kl_loss)
                    # kl_loss  = KD(outputs[i]['output'],ensemble_logits)
                    # print("1",outputs[i]['output'])
                    # print("2",ensemble_logits)
                    # kl_loss = dkd_loss(outputs[i]['output'],ensemble_logits,labels)
                    # print("3",kl_loss.item())
                    # if kl_loss.item()  > 1000 :
                    #     print(outputs[i]['output'])
                    #     print(ensemble_logits)
                    #     exit()
                    loss = self.alpha*kl_loss   
                    self.optimizer[i].zero_grad()
                    # loss.requires_grad_(True)
                    loss.backward()
                    self.optimizer[i].step()
                    batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print("epoch_loss:",epoch_loss)
        for model in self.models:
            self.net_state.append(copy.deepcopy(model.state_dict()))
        return self.net_state


def FedEn(args,net_glob,dataset_train,dataset_test,dict_users,dict_global):

    net_glob.train()
    turntable = int(args.frac*args.num_users)
    net_glob_arr = []

    for index in range(turntable):
        net_glob_arr.append(copy.deepcopy(net_glob.state_dict()))
    
    acc = []
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)

        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedEn(args=args,dataset=dataset_train,idxs=dict_users[idx])
            net_glob.load_state_dict(net_glob_arr[index])
            w_local = local.train(round=iter,net=copy.deepcopy(net_glob).to(args.device))
            net_glob_arr[index] = copy.deepcopy(w_local)  
        
        w_global = AggregationNoData(net_glob_arr)      

        # if  iter % 5 == 4:    
        # for i in range(turntable):
        #     net_glob_arr[i] = copy.deepcopy(w_global)
        # else:
        net_list = []
        for i in range(turntable):
            net_list.append(copy.deepcopy(net_glob))
            net_list[i].load_state_dict(net_glob_arr[i])
        sever = SeverUpdate_FedEn(args=args,net_list = copy.deepcopy(net_list),round=iter,dataset_global = dataset_train,dict_global = dict_global)
        w_list = sever.train()
        for i in range(turntable):
            net_glob_arr[i] = copy.deepcopy(w_list[i])
        w_global_2 = AggregationNoData(net_glob_arr) 
        for i in range(turntable):
            net_glob_arr[i] = copy.deepcopy(w_global_2) 
        glb_net = copy.deepcopy(net_glob)
        glb_net.load_state_dict(w_global)
        acc_item_t =  test_single(glb_net,dataset_test,args)
        glb_net.load_state_dict(w_global_2)
        acc_item = test_single(glb_net,dataset_test,args)
        acc.append(acc_item)
        # wandb.log({'accuracy':acc_item1,'accuracy2':acc_item2})
        wandb.log({'accuracy':acc_item,'accuarcy2':acc_item_t})
    save_result(acc,'test_acc',args)     
        
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()