import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable

def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

def dkd_loss(logits_student, logits_teacher, target, alpha = 1.0, beta = 8.0, temperature = 4.0):
    gt_mask = _get_gt_mask(logits_student, target)
    other_mask = _get_other_mask(logits_student, target)
    pred_student = F.softmax(logits_student / temperature, dim=1)
    pred_teacher = F.softmax(logits_teacher / temperature, dim=1)
    pred_student = cat_mask(pred_student, gt_mask, other_mask)
    pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask)
    log_pred_student = torch.log(pred_student)
    tckd_loss = (
        F.kl_div(log_pred_student, pred_teacher, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    pred_teacher_part2 = F.softmax(
        logits_teacher / temperature - 1000.0 * gt_mask, dim=1
    )
    log_pred_student_part2 = F.log_softmax(
        logits_student / temperature - 1000.0 * gt_mask, dim=1
    )
    nckd_loss = (
        F.kl_div(log_pred_student_part2, pred_teacher_part2, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    return alpha * tckd_loss + beta * nckd_loss


def _get_gt_mask(logits, target):
    target = target.reshape(-1)
    # print("gt_target:",target.unsqueeze(-1))
    mask = torch.zeros_like(logits).scatter_(1, target.unsqueeze(-1), 1).bool()
    # print(mask)
    return mask


def _get_other_mask(logits, target):
    target = target.reshape(-1)
    mask = torch.ones_like(logits).scatter_(1, target.unsqueeze(-1), 0).bool()
    return mask


def cat_mask(t, mask1, mask2):
    t1 = (t * mask1).sum(dim=1, keepdims=True)
    t2 = (t * mask2).sum(1, keepdims=True)
    rt = torch.cat([t1, t2], dim=1)
    return rt

class LocalUpdate_FedC(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class  SeverUpdate_FedClass(object):
    def __init__(self,args,net_list,round,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        self.num_groups = len(net_list)

        self.models = []
        self.optimizer = []
        self.net_state = []

        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        for i in range (self.num_groups):
            model = net_list[i].to(self.args.device)
            self.models.append(model)
            if self.args.optimizer == 'sgd':
                optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
            elif self.args.optimizer == 'adam':
                optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)
            self.optimizer.append(optimizer)        

    def train(self):
        for i in range (self.num_groups):
            self.models[i].train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                for model in self.models:
                    outputs.append(model(images))
                for i in range(self.num_groups):
                    ce_loss = self.loss_func(outputs[i]['output'],labels)
                    kl_loss = 0
                    for j in range(self.num_groups):
                        if i != j :
                            #choose 
                            # y_pre = outputs[j]['output'].data.max(1,keepdim=True)[1].squeeze(1)

                            # teacher = []
                            # student = []  
                            # target = []                              
                            # # print("y:",y_pre)
                            # # print("y:",labels)
                            # for index in range(self.args.sever_bs):

                            #     if y_pre[index] == labels[index]:
                            #         teacher.append(copy.deepcopy(outputs[j]['output'].tolist()[index]))
                            #         student.append(copy.deepcopy(outputs[i]['output'].tolist()[index]))
                            #         target.append(copy.deepcopy(y_pre[index]))

                            # teacher = torch.Tensor(teacher).cuda()
                            # student = torch.Tensor(student).cuda()
                            # target = torch.LongTensor(target).cuda()

                            # kl_loss += dkd_loss(student,teacher,target)
                            #
                            # kl_loss = max(kl_loss,KD(student.cuda(),teacher.cuda(),self.args.temp))
                            # kl_loss += KD(student.cuda(),teacher.cuda(),self.args.temp)
                            # kl_loss += KD(outputs[i]['output'],Variable(outputs[j]['output']),self.args.temp)
                            kl_loss += dkd_loss(outputs[i]['output'],Variable(outputs[j]['output']),labels)
                            print(kl_loss)
                    loss = ce_loss + self.alpha*kl_loss   
                    # loss = ce_loss + self.alpha*kl_loss/(self.num_groups-1)       
                    # print("kl_loss:",kl_loss)
                    # print("ce_loss:",ce_loss)

                    self.optimizer[i].zero_grad()
                    loss.backward()
                    self.optimizer[i].step()
                    outputs[i] = self.models[i](images)
                    batch_loss.append(kl_loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print("epoch_loss:",epoch_loss)
        for model in self.models:
            self.net_state.append(copy.deepcopy(model.state_dict()))
        return self.net_state


def FedClass(args,net_glob,dataset_train,dataset_test,dict_users,dict_global):

    net_glob.train()
    turntable = int(args.frac*args.num_users)
    net_glob_arr = []

    for index in range(turntable):
        net_glob_arr.append(copy.deepcopy(net_glob.state_dict()))
    
    acc = []
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)

        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedC(args=args,dataset=dataset_train,idxs=dict_users[idx])
            net_glob.load_state_dict(net_glob_arr[index])
            w_local = local.train(round=iter,net=copy.deepcopy(net_glob).to(args.device))
            net_glob_arr[index] = copy.deepcopy(w_local)  
        
        w_global = AggregationNoData(net_glob_arr)      

        if  iter % 5 == 4:    
            for i in range(turntable):
                net_glob_arr[i] = copy.deepcopy(w_global)
        else:
            net_list = []
            for i in range(turntable):
                net_list.append(copy.deepcopy(net_glob))
                net_list[i].load_state_dict(net_glob_arr[i])
            sever = SeverUpdate_FedClass(args=args,net_list = copy.deepcopy(net_list),round=iter,dataset_global = dataset_train,dict_global = dict_global)
            w_list = sever.train()
            for i in range(turntable):
                net_glob_arr[i] = copy.deepcopy(w_list[i])
            w_global = AggregationNoData(net_glob_arr)  
        glb_net = copy.deepcopy(net_glob)
        glb_net.load_state_dict(w_global)
        acc.append(test_single(glb_net,dataset_test,args))
    save_result(acc,'test_acc',args)     
        
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()