import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from models.test import test_img_resem
from torch.autograd import Variable
import wandb
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

def dkd_loss(logits_student, logits_teacher, target, alpha = 1.0, beta = 8.0, temperature = 4.0):
    gt_mask = _get_gt_mask(logits_student, target)
    other_mask = _get_other_mask(logits_student, target)
    pred_student = F.softmax(logits_student / temperature, dim=1)
    pred_teacher = F.softmax(logits_teacher / temperature, dim=1)
    pred_student = cat_mask(pred_student, gt_mask, other_mask)
    pred_teacher = cat_mask(pred_teacher, gt_mask, other_mask)
    log_pred_student = torch.log(pred_student)
    tckd_loss = (
        F.kl_div(log_pred_student, pred_teacher, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    pred_teacher_part2 = F.softmax(
        logits_teacher / temperature - 1000.0 * gt_mask, dim=1
    )
    log_pred_student_part2 = F.log_softmax(
        logits_student / temperature - 1000.0 * gt_mask, dim=1
    )
    nckd_loss = (
        F.kl_div(log_pred_student_part2, pred_teacher_part2, reduction='sum')
        * (temperature**2)
        / target.shape[0]
    )
    return alpha * tckd_loss + beta * nckd_loss


def _get_gt_mask(logits, target):
    target = target.reshape(-1)
    # print("gt_target:",target.unsqueeze(-1))
    mask = torch.zeros_like(logits).scatter_(1, target.unsqueeze(-1), 1).bool()
    # print(mask)
    return mask


def _get_other_mask(logits, target):
    target = target.reshape(-1)
    mask = torch.ones_like(logits).scatter_(1, target.unsqueeze(-1), 0).bool()
    return mask


def cat_mask(t, mask1, mask2):
    t1 = (t * mask1).sum(dim=1, keepdims=True)
    t2 = (t * mask2).sum(1, keepdims=True)
    rt = torch.cat([t1, t2], dim=1)
    return rt

class LocalUpdate_FedCom(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()
#######
class  SeverUpdate_FedCom(object):
    def __init__(self,args,net_list, net_glob,round,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        self.num_groups = len(net_list)

        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        self.model = net_glob.to(self.args.device)
        self.m_list = []
        for i in range(self.num_groups):
            self.m_list.append(net_list[i].to(self.args.device))
        self.num_blocks = 3     
            
        if self.args.optimizer == 'sgd':
            self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                    momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
              

    def train(self):
        self.model.train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                # images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                out_g = self.model(images)
                outputs = []
                for m in self.m_list:
                    temp  = m(images,start_layer_idx = 0)['representation3'].detach()
                    outputs.append(self.model(temp,start_layer_idx = 3))
                    # outputs.append(m(images))
                    # outputs.append(m(out_g['representation0'],level = 1))
                ensemble_logits = [] 
                for i in range(self.num_groups):
                    if i == 0:
                        ensemble_logits = copy.deepcopy(Variable(outputs[0]['output']))
                    else:
                        ensemble_logits = torch.add(ensemble_logits,Variable(outputs[i]['output']))
                ensemble_logits = torch.div(ensemble_logits,float(self.num_groups))
                # print(ensemble_logits)
                # out_g = self.model(images)
                # # ce_loss = self.loss_func(outputs[i]['output'],labels)
                kl_loss = KD(out_g['output'],ensemble_logits)
                # kl_loss = dkd_loss(out_g['output'],ensemble_logits,labels)
                # print(kl_loss)
                # kl_loss  = KD(outputs[i]['output'],ensemble_logits)
                
                loss = self.alpha*kl_loss   
                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.args.max_norm)
                self.optimizer.step()
                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print("epoch_loss:",epoch_loss)
        return self.model.state_dict()



def FedCom(args,net_zoo,dataset_train,dataset_test,dict_users,dict_global):
    
    num_model = len(net_zoo)
    for i in range(num_model):
        net_zoo[i].train()
    turntable = int(args.frac*args.num_users/num_model) 
    net_glob_arr_1 = []
    net_glob_arr_2 = []
    for index in range(turntable):
        net_glob_arr_1.append(copy.deepcopy(net_zoo[0].state_dict()))
        net_glob_arr_2.append(copy.deepcopy(net_zoo[1].state_dict()))
    
    acc = []
    for i in range(num_model):
        acc.append([])
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        w_global = []
        m = max(int(args.frac*args.num_users),1)
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)

        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedCom(args=args,dataset=dataset_train,idxs=dict_users[idx])
            if int(index/turntable) <= 0:
                net_zoo[int(index/turntable)].load_state_dict(net_glob_arr_1[int(index%turntable)])
                w_local = local.train(round=iter,net=copy.deepcopy(net_zoo[int(index/turntable)]).to(args.device))
                net_glob_arr_1[int(index%turntable)] = copy.deepcopy(w_local) 
            else:
                net_zoo[int(index/turntable)].load_state_dict(net_glob_arr_2[int(index%turntable)])
                w_local = local.train(round=iter,net=copy.deepcopy(net_zoo[int(index/turntable)]).to(args.device))
                net_glob_arr_2[int(index%turntable)] = copy.deepcopy(w_local)  
        w_global.append(AggregationNoData(net_glob_arr_1))
        w_global.append(AggregationNoData(net_glob_arr_2))
        net_zoo[0].load_state_dict(w_global[0])
        net_zoo[1].load_state_dict(w_global[1])

        # if iter > 150 :
        # glb_net1 = copy.deepcopy(net_zoo[0])
        # glb_net1.load_state_dict(w_global[0])
        # glb_net2 = copy.deepcopy(net_zoo[1])
        # glb_net2.load_state_dict(w_global[1])
        # net_list1 = []
        # net_list2 = []
        # for i in range(turntable):
        #     net_list1.append(copy.deepcopy(net_zoo[0]))
        #     net_list1[i].load_state_dict(net_glob_arr_1[i])
        #     net_list2.append(copy.deepcopy(net_zoo[1]))
        #     net_list2[i].load_state_dict(net_glob_arr_2[i])

        # sever = SeverUpdate_FedCom(args=args,net_list = copy.deepcopy(net_list2),net_glob=copy.deepcopy(glb_net1),round=iter,dataset_global = dataset_train,dict_global = dict_global)
        # w_list1 = sever.train()
        # net_zoo[0].load_state_dict(w_list1)
        # sever = SeverUpdate_FedCom(args=args,net_list = copy.deepcopy(net_list1),net_glob=copy.deepcopy(glb_net2),round=iter,dataset_global = dataset_train,dict_global = dict_global)
        # w_list2 = sever.train()
        
        # net_zoo[1].load_state_dict(w_list2)
        for index in range(turntable):
            net_glob_arr_1[index] = net_zoo[0].state_dict()
            net_glob_arr_2[index] = net_zoo[1].state_dict()

        acc_dict = {}
        for i in range(num_model):
            a = test_single(net_zoo[i],dataset_test,args)
            acc_dict['accuracy_cnn{}'.format(i+1)] = a
            acc[i].append(a)
        # a1 = test_resem(net_zoo,dataset_test,args)
        # acc_dict['accuracy_cnn1'] = a1
        # net_zoo2 = []
        # net_zoo2.append(net_zoo[1])
        # net_zoo2.append(net_zoo[0])
        # a2 = test_resem(net_zoo2,dataset_test,args)
        # acc_dict['accuracy_cnn2'] = a2
        wandb.log(acc_dict)
    save_result(acc,'test_acc',args)     
        
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()

def test_resem(net_zoo,dataset_test,args):
    acc_test,loss_test = test_img_resem(net_zoo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()