import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable
import wandb
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result


class LocalUpdate_FedDF(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs,args),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                if self.args.dataset == 'widar':
                    labels = labels.long()
                if (images.size()[0] <= 1):
                    continue
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class  SeverUpdate_FedDF(object):
    def __init__(self,args,net_list, net_s,round,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        self.num_groups = len(net_list)

        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        self.model = net_s.to(self.args.device)
        self.m_list = []
        for i in range(self.num_groups):
            self.m_list.append(net_list[i].to(self.args.device))
            

        if self.args.optimizer == 'sgd':
            self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                    momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
              

    def train(self):
        self.model.train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                # images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                for m in self.m_list:
                    outputs.append(m(images))
                # residual = []
                # #varience
                # for i in range(self.num_groups):
                #     residual.append([])
                #     for j in range (self.args.sever_bs):
                #         temp_sum = 0
                #         for _,k in enumerate(outputs[i]['output'].tolist()[j]):
                #             # print(k)
                #             # print(outputs[i]['output'].tolist()[j])
                #             temp_sum += (outputs[i]['output'].tolist()[j][labels[j]] - k)**2
                #         residual[i].append(temp_sum)    
                # sum_var = []
                # for i in range(self.args.sever_bs):
                #     temp = 0
                #     for j in range(self.num_groups):
                #         temp +=  residual[j][i]
                #     sum_var.append( temp)
                # ensemble_logits = [] 
                # for i in range(self.num_groups):
                #     if i == 0:
                #         ensemble_logits = torch.multiply(outputs[0]['output'],torch.unsqueeze(torch.tensor(residual[0]),1).to(self.args.device))
                #     else:
                #         ensemble_logits = torch.add(ensemble_logits,torch.multiply(outputs[i]['output'],torch.unsqueeze(torch.tensor(residual[i]),1).to(self.args.device)))
                # ensemble_logits = torch.div(ensemble_logits,torch.unsqueeze(torch.tensor(sum_var),1).to(self.args.device))

                ensemble_logits = [] 
                for i in range(self.num_groups):
                    if i == 0:
                        ensemble_logits = copy.deepcopy(Variable(outputs[0]['output']))
                    else:
                        ensemble_logits = torch.add(ensemble_logits,Variable(outputs[i]['output']))
                # print(ensemble_logits)
                ensemble_logits = torch.div(ensemble_logits,float(self.num_groups))
                # print(ensemble_logits)
                out_g = self.model(images)
                # # ce_loss = self.loss_func(outputs[i]['output'],labels)
                

                kl_loss = KD(out_g['output'],ensemble_logits)
                # kl_loss = dkd_loss(out_g['output'],ensemble_logits,labels)
                # print(kl_loss)
                # kl_loss  = KD(outputs[i]['output'],ensemble_logits)
                
                loss = self.alpha*kl_loss   
                self.optimizer.zero_grad()
                loss.backward()
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.args.max_norm)
                self.optimizer.step()
                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print("epoch_loss:",epoch_loss)
        return self.model.state_dict()



def FedDF(args,net_zoo,dataset_train,dataset_test,dict_users,dict_global):

    num_model = len(net_zoo)
    acc = []
    for i in range(num_model):
        net_zoo[i].train()
        acc.append([])
    # turntable = int(args.frac*args.num_users)
    # net_glob_arr = []

    # for index in range(turntable):
    #     net_glob_arr.append(copy.deepcopy(net_glob.state_dict()))
    
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)
        _, idxs_users = select_clients_s(args)
        # m1 = int(m * args.client1_frac)
        # m2 = int(m * args.client2_frac)
        # m3 = int(m * args.client3_frac)
        # idxs_users = []
        # idxs_users.append(np.random.choice(range(0,int(args.num_users)),m1,replace=False))
        # idxs_users.append(np.random.choice(range(int(args.num_users*args.client1_frac),int(args.num_users)),m2,replace=False))
        # idxs_users.append(np.random.choice(range(int(args.num_users*(args.client1_frac+args.client2_frac)),int(args.num_users)),m3,replace=False))
        lens = []
        para_list = []
        for index,client_list in enumerate(idxs_users):
            lens.append([])
            para_list.append([])
            for num,idx in enumerate(client_list):
                local = LocalUpdate_FedDF(args=args,dataset=dataset_train,idxs=dict_users[idx])
                
                w_local = local.train(round=iter,net=copy.deepcopy(net_zoo[index]).to(args.device))
                para_list[index].append(copy.deepcopy(w_local)) 
                lens[index].append(len(dict_users[idx]))
        w_global = []
        t_list = []
        for i in range(num_model):
            for index,item in enumerate(para_list[i]):
                temp = copy.deepcopy(net_zoo[i])
                temp.load_state_dict(item)
                t_list.append(temp)
                
        for i in range(num_model):
            w_global.append(Aggregation(para_list[i],lens[i]))  
            net_s = copy.deepcopy(net_zoo[i])
            net_s.load_state_dict(w_global[i])
            sever = SeverUpdate_FedDF(args=args,net_list = t_list,net_s=net_s,round=iter,dataset_global = dataset_train,dict_global = dict_global)
            w_s = sever.train()
            net_zoo[i].load_state_dict(w_s)
        acc_dict = {}
        for i in range(num_model):
            test_a = test_single(net_zoo[i],dataset_test,args)
            acc_dict['accuracy_local{}'.format(7+i)] = test_a
            acc[i].append(test_a)
        wandb.log(acc_dict)
    save_result(acc,'test_acc',args)     
        
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()