import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from models.branchnet import BranchNet
from torch.autograd import Variable

def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_FedC(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class SeverUpdate_FedC(object):
    def __init__(self,args,net_stu,net_glob,round ,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        # self.net_s = net_s
        # self.net_list = net_list
        self.num_classes = args.num_classes
        self.net_state = 0
        self.num_groups = args.num_groups
        self.model = 0
        self.net_glob = net_glob.to(self.args.device)
        self.optimizer = 0
        #  
        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha

        model = net_stu.to(self.args.device)
        self.model = model
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                    momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)
        self.optimizer = optimizer
    def train(self):
        self.model.train()

        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                output = 0
                output_g = self.net_glob(images)
                output = self.model(images)
                ce_loss = self.loss_func(output['output'],labels)
                kl_loss = 0
                # a = [s for s in range(self.num_groups)]
                # a.remove(i)
                # j = np.random.choice(a,1,replace=False)[0]
                # kl_loss = KD(output['representation5'],Variable(output_g['representation5']),self.args.temp)
                kl_loss = KD(output['output'],Variable(output_g['output']),self.args.temp)
                loss = ce_loss + self.alpha * (kl_loss)
                # print("kl_loss:",kl_loss)
                # print("ce_loss:",ce_loss)

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                
                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print(epoch_loss)
        self.net_state = copy.deepcopy(self.model.state_dict())
        return self.net_state
        


        

def FedC(args,net_glob,dataset_train,dataset_test,dict_users,dict_global):
    
    net_glob.train()
    turntable = int(args.frac*args.num_users)
    glb_net = copy.deepcopy(net_glob)
    # for i in range(num_net):
    #     net_list[i].train()
    net_glob_arr = []
    for index in range(turntable):
        net_glob_arr.append(copy.deepcopy(net_glob.state_dict()))

    acc = []
    for iter in range(args.epochs):

        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)

        idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        # group_idxs_users = []
        # for i in range(num_net):
        #     group_users = []
        #     start = i*int(m/num_net)
        #     end = (i+1)*int(m/num_net)
        #     for j in range(start,end):
        #         group_users.append(idxs_users[j])
        #     group_idxs_users.append(group_users)

        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedC(args=args,dataset=dataset_train,idxs=dict_users[idx])
            net_glob.load_state_dict(net_glob_arr[index])
            w_local = local.train(round=iter,net=copy.deepcopy(net_glob).to(args.device))
            net_glob_arr[index] = copy.deepcopy(w_local)

            # w_locals.append(copy.deepcopy(w_local))
            # lens.append(len(dict_users[idx]))
            # # aggregation in group
            # if len(lens) == 1:
            #     w_global = w_locals[0]
            # else:
            #     w_global = Aggregation(w_locals,lens)
            # # w_global = Aggregation(w_locals,lens)
            # net_list[id].load_state_dict(w_global)
            # w_globals.append(copy.deepcopy(w_global))


        #FedCC
        # sim_tab = [[0 for _ in range(turntable)] for _ in range(turntable)]
        # for k in range(turntable):
        #     sim_arr = []
        #     for j in range(k):
        #         dict_a = torch.Tensor(0)
        #         dict_b = torch.Tensor(0)
        #         for cnt, p in enumerate(net_glob_arr[k].keys()):
        #             a = net_glob_arr[k][p]
        #             b = net_glob_arr[j][p]
        #             a = a.view(-1)
        #             b = b.view(-1)
        #             if cnt == 0:
        #                 dict_a = a
        #                 dict_b = b
        #             else:
        #                 dict_a = torch.cat((dict_a, a), dim=0)
        #                 dict_b = torch.cat((dict_b, b), dim=0)
        #         sim = F.cosine_similarity(dict_a, dict_b, dim=0).item()
        #         sim_arr.append(sim)
        #         sim_tab[k][j] = sim
        #         sim_tab[j][k] = sim
        # print(sim_tab)

        net_glob_t = copy.deepcopy(net_glob)
        # for i in range(turntable):
        #     sim_min_index = 0
        #     sim_min = 1.0
        #     for j in range(turntable):
        #         if i != j and sim_tab[i][j] < sim_min:
        #             sim_min = sim_tab[i][j]
        #             sim_min_index = j
        #     net_glob.load_state_dict(net_glob_arr[i])
        #     net_glob_t.load_state_dict(net_glob_arr[sim_min_index])
        #     sever = SeverUpdate_FedC(args=args,net_stu= copy.deepcopy(net_glob).to(args.device),net_glob = net_glob_t,round=iter,dataset_global=dataset_train,dict_global=dict_global)
        #     w = sever.train()
        #     net_glob_arr[i] = copy.deepcopy(w)
        w_global = AggregationNoData(net_glob_arr)
        net_glob_t.load_state_dict(w_global)
        for i in range(turntable):
            net_glob.load_state_dict(net_glob_arr[i])
            sever = SeverUpdate_FedC(args=args,net_stu= copy.deepcopy(net_glob).to(args.device),net_glob = net_glob_t,round=iter,dataset_global=dataset_train,dict_global=dict_global)
            w = sever.train()
            net_glob_arr[i] = copy.deepcopy(w)
        w_global = AggregationNoData(net_glob_arr)

        # agg_weights = [] 
        # for i in range(args.num_groups):
        #     agg_weights.append(1.0)
        # w_global = Aggregation(net_glob_arr,agg_weights)
        # w_global = AggregationNoData(net_glob_arr)
        # if  iter % 5 == 4:

        for i in range(args.num_groups):
            net_glob_arr[i] = copy.deepcopy(w_global)


        glb_net.load_state_dict(copy.deepcopy(w_global))
        acc.append(test_single(glb_net,dataset_test,args))

    save_result(acc,'test_acc',args)       


def test(net_list,dataset_test,args):

    acc_test,loss_test = test_img(net_list,dataset_test,args)

    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))


    return acc_test.item()

def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()


        

