import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import Aggregation
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable

def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_Fedtwo(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class SeverUpdate_Fedtwo(object):
    def __init__(self,args,net_list,net_glob,round ,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        # self.net_s = net_s
        # self.net_list = net_list
        self.num_classes = args.num_classes
        self.net_state = []
        self.num_groups = args.num_groups
        self.models = []
        self.optimizer = []
        self.net_glob = net_glob
        # 
        # 
        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        for i in range (self.num_groups):
            model = net_list[i].to(self.args.device)
            self.models.append(model)
            if self.args.optimizer == 'sgd':
                optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
            elif self.args.optimizer == 'adam':
                optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)
            self.optimizer.append(optimizer)
    def train(self):
        for i in range (self.num_groups):
            self.models[i].train()
        # net.train()
        # self.net_t.train()
        # self.net_t.eval()

        # if self.args.optimizer == 'sgd':
        #     optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
        #                                 momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        #     # optimizer = torch.optim.SGD([{'params':net.parameters()},{'params':self.branch_dict[64].parameters()},{'params':self.branch_dict[128].parameters()},
        #     #                             {'params':self.branch_dict[256].parameters()},{'params':self.branch_dict[512].parameters()}], 
        #     #                             lr=self.args.lr*(self.args.lr_decay**round),momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        # elif self.args.optimizer == 'adam':
        #     optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                output_g = self.net_glob(images)
                for model in self.models:
                    outputs.append(model(images))
                for i in range(self.num_groups):
                    ce_loss = self.loss_func(outputs[i]['output'],labels)
                    kl_loss = 0
                    # a = [s for s in range(self.num_groups)]
                    # a.remove(i)
                    # j = np.random.choice(a,1,replace=False)[0]
                    kl_loss = KD(outputs[i]['representation5'],Variable(output_g['representation5']),self.args.temp)
                    loss = ce_loss + self.alpha * (kl_loss)


                    self.optimizer[i].zero_grad()
                    loss.backward()
                    self.optimizer[i].step()
                    
                    batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        for model in self.models:
            self.net_state.append(copy.deepcopy(model.state_dict()))
        return self.net_state
        


        

def Fedtwo(args,net_list_1,net_list_2,dataset_train,dataset_test,dict_users,dict_global):
    num_net = len(net_list_1)
    # for i in range(num_net):
    #     net_list[i].train()
    if num_net != args.num_groups:
        exit("error num_groups")

    acc = []
    for iter in range(args.epochs):

        print('*'*80)
        print('Round {:3d}'.format(iter))

        # m = max(int(args.frac*args.num_users),1)

        # idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        # group_idxs_users = []
        # for i in range(num_net):
        #     group_users = []
        #     start = i*int(m/num_net)
        #     end = (i+1)*int(m/num_net)
        #     for j in range(start,end):
        #         group_users.append(idxs_users[j])
        #     group_idxs_users.append(group_users)

        m = max(int(args.frac*args.num_users),1)
        group_idxs_users = []
        m1 = max(int(args.frac*args.num_users*args.group1_frac),1)
        group1_idxs_users = np.random.choice(range(0,int(args.num_users*args.group1_frac)),m1,replace=False)
        m2 = max(int(args.frac*args.num_users*args.group2_frac),1)
        group2_idxs_users = np.random.choice(range(int(args.num_users*args.group1_frac),int(args.num_users*(args.group1_frac+args.group2_frac))),m2,replace=False)
        # m3 = max(int(args.frac*args.num_users*args.group3_frac),1)
        # group3_idxs_users = np.random.choice(range(int(args.num_users*(args.group1_frac+args.group2_frac)),args.num_users),m3,replace=False)
        group_idxs_users.append(group1_idxs_users) 
        group_idxs_users.append(group2_idxs_users)
        
        # group_idxs_users.append(group3_idxs_users)
        w_globals_1 = []
        w_globals_2 = []
        w_list = []

        
        for id in range(args.num_groups):
            w_global = []
            w_locals_1 = []
            lens_1 = []
            idx = group1_idxs_users[id]
            local = LocalUpdate_Fedtwo(args=args,dataset=dataset_train,idxs=dict_users[idx])

            w_local = local.train(round=iter,net=copy.deepcopy(net_list_1[id]).to(args.device))
            w_locals_1.append(copy.deepcopy(w_local))
            lens_1.append(len(dict_users[idx]))
            # aggregation in group
            if len(lens_1) == 1:
                w_global = w_locals_1[0]
            else:
                w_global = Aggregation(w_locals_1,lens_1)
            net_list_1[id].load_state_dict(w_global)
            w_globals_1.append(copy.deepcopy(w_global))

        for id in range(args.num_groups):
            w_global = []
            w_locals_2 = []
            lens_2 = []
            idx = group2_idxs_users[id]
            local = LocalUpdate_Fedtwo(args=args,dataset=dataset_train,idxs=dict_users[idx])

            w_local = local.train(round=iter,net=copy.deepcopy(net_list_2[id]).to(args.device))
            w_locals_2.append(copy.deepcopy(w_local))
            lens_2.append(len(dict_users[idx]))
            # aggregation in group
            if len(lens_2) == 1:
                w_global = w_locals_2[0]
            else:
                w_global = Aggregation(w_locals_2,lens_2)
            net_list_2[id].load_state_dict(w_global)
            w_globals_2.append(copy.deepcopy(w_global))

        agg_weights = []         
        for i in range(args.num_groups):
            agg_weights.append(1.0)
        w_global_1 = Aggregation(w_globals_1,agg_weights)
        w_global_2 = Aggregation(w_globals_2,agg_weights)
        if  iter % 5 == 4:
            for i in range(args.num_groups):
                net_list_1[i].load_state_dict(copy.deepcopy(w_global_1))
                net_list_2[i].load_state_dict(copy.deepcopy(w_global_2))
        else:
            glob_1 = copy.deepcopy(net_list_1[0])
            glob_2 = copy.deepcopy(net_list_2[0])
            glob_1.load_state_dict(w_global_1)
            glob_2.load_state_dict(w_global_2)
            net_list_glob_1 = []
            net_list_glob_2 = []
            for i in range (args.num_groups):
                net_list_glob_1.append(copy.deepcopy(net_list_1[i]))
                net_list_glob_2.append(copy.deepcopy(net_list_2[i]))
            sever = SeverUpdate_Fedtwo(args=args,net_list = net_list_glob_1,net_glob = glob_2.to(args.device),round=iter,dataset_global=dataset_train,dict_global=dict_global)
            w_list_1 = sever.train()
            sever = SeverUpdate_Fedtwo(args=args,net_list = net_list_glob_2,net_glob = glob_1.to(args.device),round=iter,dataset_global=dataset_train,dict_global=dict_global)
            w_list_2 = sever.train()
            for i in range(args.num_groups):
                net_list_1[i].load_state_dict(copy.deepcopy(w_list_1[i]))
                net_list_2[i].load_state_dict(copy.deepcopy(w_list_2[i]))



        # acc.append(test_two(net_list_1,net_list_2,dataset_test,args))
        glob_1 = copy.deepcopy(net_list_1[0])
        glob_2 = copy.deepcopy(net_list_2[0])
        glob_1.load_state_dict(w_global_1)
        glob_2.load_state_dict(w_global_2)
        acc.append(test_single(glob_1,glob_2,dataset_test,args))

    save_result(acc,'test_acc',args)       


def test_two(net_list_1,net_list_2,dataset_test,args):

    acc_test,loss_test = test_img_two(net_list_1,net_list_2,dataset_test,args)

    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))


    return acc_test.item()

def test_single(net_glo_1,net_glo_2,dataset_test,args):
    acc_test,loss_test = test_img_avg_two(net_glo_1,net_glo_2,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()

def test_img_two(net_list_1,net_list_2,dataset,args):
    for i in range(args.num_groups):
        net_list_1[i].eval()
        net_list_2[i].eval()
    test_loss = 0
    correct = 0
    data_loader = DataLoader(dataset,batch_size=args.bs)
    
    l = len(data_loader)
    with torch.no_grad():
        for id in range(args.num_groups):
            for idx,(image,label) in enumerate(data_loader):
                image,label = image.to(args.device),label.to(args.device)
            
                log_probs = net_list_1[id](image)['output']
                test_loss += F.cross_entropy(log_probs,label,reduction='sum').item()
                y_pred = log_probs.data.max(1,keepdim=True)[1]
                correct += y_pred.eq(label.data.view_as(y_pred)).long().cpu().sum()

                log_probs = net_list_2[id](image)['output']
                test_loss += F.cross_entropy(log_probs,label,reduction='sum').item()
                y_pred = log_probs.data.max(1,keepdim=True)[1]
                correct += y_pred.eq(label.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)*args.num_groups*2
    accuracy = 100.00 * correct / (len(data_loader.dataset)*args.num_groups*2)
    return accuracy,test_loss


def test_img_avg_two(net_g_1, net_g_2,datatest, args):
    net_g_1.eval()
    net_g_2.eval()
    # testing
    test_loss = 0
    correct = 0
    data_loader = DataLoader(datatest, batch_size=args.bs)
    l = len(data_loader)
    with torch.no_grad():
        for idx, (data, target) in enumerate(data_loader):
            if args.device != -1:
                data, target = data.cuda(), target.cuda()
            log_probs = net_g_1(data)['output']
            # sum up batch loss
            test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
            # get the index of the max log-probability
            y_pred = log_probs.data.max(1, keepdim=True)[1]
            correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()

            log_probs = net_g_2(data)['output']
            # sum up batch loss
            test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
            # get the index of the max log-probability
            y_pred = log_probs.data.max(1, keepdim=True)[1]
            correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()

    test_loss /= len(data_loader.dataset)*2
    accuracy = 100.00 * correct / (len(data_loader.dataset)*2)
    return accuracy, test_loss
        

