import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import Aggregation
from models.test import test_img_avg
from models.branchnet import BranchNet
from torch.autograd import Variable
import copy

def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_FedHomo(object):
    def __init__(self, args, dataset=None, idxs=None, verbose=False):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.selected_clients = []
        self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True)
        self.verbose = verbose

    def train(self, round, net):

        net.train()
        # train and update
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)

        Predict_loss = 0
        for iter in range(self.args.local_ep):

            for batch_idx, (images, labels) in enumerate(self.ldr_train):
                images, labels = images.to(self.args.device), labels.to(self.args.device)
                net.zero_grad()
                log_probs = net(images)['output']
                loss = self.loss_func(log_probs, labels)
                loss.backward()
                optimizer.step()

                Predict_loss += loss.item()

        if self.verbose:
            info = '\nUser predict Loss={:.4f}'.format(Predict_loss / (self.args.local_ep * len(self.ldr_train)))
            print(info)

        return net.state_dict()

class SeverUpdate_FedHomo(object):
    def __init__(self,args,net_list,round,dataset_global = None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        self.net_state = []
        
        self.models = []
        self.optimizer = []

        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        self.num_groups = len(net_list)
        for i in range (self.num_groups):
            model = net_list[i].to(self.args.device)
            self.models.append(model)
            if self.args.optimizer == 'sgd':
                optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
            elif self.args.optimizer == 'adam':
                optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)
            self.optimizer.append(optimizer)
    def train(self):
        for i in range (self.num_groups):
            self.models[i].train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                for model in self.models:
                    outputs.append(model(images))
                for i in range(self.num_groups):
                    ce_loss = self.loss_func(outputs[i]['output'],labels)
                    kl_loss = 0
                    # kl_loss1 = 0
                    # kl_loss2 = 0
                    # kl_loss4 = 0
                    for j in range (self.num_groups):
                        if i != j:
                            # kl_loss1 += KD(outputs[i]['representation1'],Variable(outputs[j]['representation1']),self.args.temp)
                            # kl_loss2 += KD(outputs[i]['representation2'],Variable(outputs[j]['representation2']),self.args.temp)
                            # kl_loss4 += KD(outputs[i]['representation4'],Variable(outputs[j]['representation4']),self.args.temp)
                            kl_loss += KD(outputs[i]['representation'],Variable(outputs[j]['representation']),self.args.temp)
                    loss = ce_loss + self.alpha * (kl_loss)/(self.num_groups-1)
                    # loss = ce_loss + self.alpha * (kl_loss1+kl_loss2+kl_loss4)/(self.num_groups-1)

                    self.optimizer[i].zero_grad()
                    loss.backward()
                    self.optimizer[i].step()
                    batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        for model in self.models:
            self.net_state.append(model.state_dict())
        return self.net_state


def FedHomo(args,net,dataset_train,dataset_test,dict_users,dict_global):
    net.train()
    acc = []
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        m = max(int(args.frac*args.num_users),1)
        net_list = []

        
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        lens = []
        j = 0
        for idx in idxs_users:
            
            local = LocalUpdate_FedHomo(args=args,dataset = dataset_train,idxs = dict_users[idx])
            w = local.train(round=iter,net=copy.deepcopy(net).to(args.device))
            net_list.append(copy.deepcopy(net))
            net_list[j].load_state_dict(w)
            j += 1
            lens.append(len(dict_users[idx]))
        w_list = []
        
        sever = SeverUpdate_FedHomo(args = args,net_list = net_list,round = iter,dataset_global=dataset_train,dict_global=dict_global)    
        w_list = sever.train()
        # for i in range (m):
        #     net_list[i].load_state_dict(w_list[i])
        w_glob = Aggregation(w_list,lens)
        net.load_state_dict(w_glob)
        
        acc.append(test(net,dataset_test,args))
    save_result(acc, 'test_acc', args)



def test(net,dataset_test,args):

    acc_test,loss_test = test_img_avg(net,dataset_test,args)

    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))


    return acc_test.item()

  

        

