import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import Aggregation
from models.test import test_img
from models.test import test_img_avg
from models.branchnet import BranchNet
from torch.autograd import Variable
layers0 = ['conv1.weight','bn1.weight','bn1.bias']
layers = ['layer1','layer2','layer3','layer4','fc']
layers1 = ['layer1']
layers2 = ['layer1','layer2']
layers3 = ['layer1','layer2','layer3']
layers4 = ['layer1','layer2','layer3','fc']
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_Fedloss(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class SeverUpdate_Fedloss(object):
    def __init__(self,args,net_list,round ,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        # self.net_s = net_s
        # self.net_list = net_list
        self.num_classes = args.num_classes
        self.net_state = []
        self.num_groups = args.num_groups
        self.num_block = args.num_block
        self.models = []
        self.optimizer = []
        for i in range (self.num_groups):
            self.optimizer.append([])
        # 
        # 
        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        for i in range (self.num_groups):
            
            model = net_list[i].to(self.args.device)
            self.models.append(model)
            for j in range (self.num_block):
                for name,p in net_list[i].named_parameters():
                    p.requires_grad = False
                    for layer in layers0:
                        if layer == name:
                            p.requires_grad = True
                for name,p in net_list[i].named_parameters(): 
                    if layers[j] in name:
                        p.requires_grad = True
                if self.args.optimizer == 'sgd':
                    optimizer = torch.optim.SGD(filter(lambda p:p.requires_grad,model.parameters()), lr=self.args.lr*(self.args.lr_decay**round),
                                            momentum=self.args.momentum,weight_decay=self.args.weight_decay)
                elif self.args.optimizer == 'adam':
                    optimizer = torch.optim.Adam(filter(lambda p:p.requires_grad,model.parameters()), lr=self.args.lr)
                self.optimizer[i].append(optimizer)
    def train(self):
        for i in range (self.num_groups):
            self.models[i].train()
        # net.train()
        # self.net_t.train()
        # self.net_t.eval()

        # if self.args.optimizer == 'sgd':
        #     optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
        #                                 momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        #     # optimizer = torch.optim.SGD([{'params':net.parameters()},{'params':self.branch_dict[64].parameters()},{'params':self.branch_dict[128].parameters()},
        #     #                             {'params':self.branch_dict[256].parameters()},{'params':self.branch_dict[512].parameters()}], 
        #     #                             lr=self.args.lr*(self.args.lr_decay**round),momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        # elif self.args.optimizer == 'adam':
        #     optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                outputs = []
                for model in self.models:
                    outputs.append(model(images))
                for i in range(self.num_groups):
                    ce_loss = self.loss_func(outputs[i]['output'],labels)
                    kl_loss = []
                    loss = []
                    kl_loss5 = 0
                    kl_loss1 = 0
                    kl_loss2 = 0
                    kl_loss3 = 0
                    kl_loss4 = 0
                    for j in range (self.num_groups):
                        if i != j:
                            kl_loss1 += KD(outputs[i]['representation1'],Variable(outputs[j]['representation1']),self.args.temp)
                            kl_loss2 += KD(outputs[i]['representation2'],Variable(outputs[j]['representation2']),self.args.temp)
                            kl_loss3 += KD(outputs[i]['representation3'],Variable(outputs[j]['representation3']),self.args.temp)
                            kl_loss4 += KD(outputs[i]['representation4'],Variable(outputs[j]['representation4']),self.args.temp)
                            kl_loss5 += KD(outputs[i]['representation'],Variable(outputs[j]['representation']),self.args.temp)
                    
                    # loss = ce_loss + self.alpha * (kl_loss)/(self.num_groups-1)
                    # loss = ce_loss + self.alpha * (kl_loss1+kl_loss2+kl_loss3+kl_loss4+kl_loss5)/(self.num_groups-1)
                    kl_loss.append(kl_loss1)
                    kl_loss.append(kl_loss2)
                    kl_loss.append(kl_loss3)
                    kl_loss.append(kl_loss4)
                    kl_loss.append(kl_loss5)
                    for k in range(self.num_block):
                        loss.append(ce_loss + self.alpha * (kl_loss[k])/(self.num_groups-1))
                    # print(len(self.optimizer[i]))
                    for j in range(self.num_block-1,-1,-1):
                        self.optimizer[i][j].zero_grad()
                    # for j in range(self.num_block-1,-1,-1):
                        loss[j].backward(retain_graph = True)
                        self.optimizer[i][j].step()
                    # for j in range(self.num_block-1,-1,-1):
                    #     self.optimizer[i][j].step()
                    batch_loss.append(loss[self.num_block-1].item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        for model in self.models:
            self.net_state.append(model.state_dict())
        return self.net_state
        


        

def Fedloss(args,net_list,dataset_train,dataset_test,dict_users,dict_global):
    num_net = len(net_list)
    # for i in range(num_net):
    #     net_list[i].train()
    if num_net != args.num_groups:
        exit("error num_groups")
    glob_net = copy.deepcopy(net_list[0])
    acc = []
    for iter in range(args.epochs):

        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)

        idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        group_idxs_users = []
        for i in range(num_net):
            group_users = []
            start = i*int(m/num_net)
            end = (i+1)*int(m/num_net)
            for j in range(start,end):
                group_users.append(idxs_users[j])
            group_idxs_users.append(group_users)
        # # idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        # list_users = [x for x in range(args.num_users)]
        # group_idxs_users = []
        # m1 = max(int(args.frac*args.num_users*args.group1_frac),1)
        # # group1_idxs_users = np.random.choice(range(0,int(args.num_users*args.group1_frac)),m1,replace=False)
        # group1_idxs_users = np.random.choice(list_users,m1,replace=False)
        # for x in group1_idxs_users:
        #     list_users.remove(x)
        # m2 = max(int(args.frac*args.num_users*args.group2_frac),1)
        # # group2_idxs_users = np.random.choice(range(int(args.num_users*args.group1_frac),int(args.num_users*(args.group1_frac+args.group2_frac))),m2,replace=False)
        # group2_idxs_users = np.random.choice(list_users,m2,replace=False)
        # # m3 = max(int(args.frac*args.num_users*args.group3_frac),1)
        # # group3_idxs_users = np.random.choice(range(int(args.num_users*(args.group1_frac+args.group2_frac)),args.num_users),m3,replace=False)
        # group_idxs_users.append(group1_idxs_users) 
        # group_idxs_users.append(group2_idxs_users)
        # # group_idxs_users.append(group3_idxs_users)
        w_globals = []
        w_list = []
        for id in range(args.num_groups):
            w_locals = []
            lens = []
            for idx in group_idxs_users[id]:
                local = LocalUpdate_Fedloss(args=args,dataset=dataset_train,idxs=dict_users[idx])

                w_local = local.train(round=iter,net=copy.deepcopy(net_list[id]).to(args.device))
                w_locals.append(w_local)
                lens.append(len(dict_users[idx]))
            # aggregation in group
            if len(lens) == 1:
                w_global = w_locals[0]
            else:
                w_global = Aggregation(w_locals,lens)
            # w_global = Aggregation(w_locals,lens)
            net_list[id].load_state_dict(w_global)
            w_globals.append(copy.deepcopy(w_global))


        agg_weights = [] 
        for i in range(num_net):
            agg_weights.append(1.0)
        w_global = Aggregation(w_globals,agg_weights)
        if iter < 0 or iter % 5 == 4:
            for i in range(num_net):
                net_list[i].load_state_dict(copy.deepcopy(w_global))
        else:  
            net_list_glob = []
            for id in range(args.num_groups):
                net_list_glob.append(copy.deepcopy(net_list[id]))          
            sever = SeverUpdate_Fedloss(args=args,net_list = net_list_glob,round=iter,dataset_global=dataset_train,dict_global=dict_global)
            w_list = sever.train()
            for i in range(num_net):
                net_list[i].load_state_dict(copy.deepcopy(w_list[i]))
        # for i in range(args.num_groups):
        #     net_list[i].load_state_dict(w_list[i])



        # acc.append(test_single(net_list,dataset_test,args))
        glob_net.load_state_dict(copy.deepcopy(w_global))
        acc.append(test_single(glob_net,dataset_test,args))

    save_result(acc,'test_acc',args)       


def test(net_list,dataset_test,args):

    acc_test,loss_test = test_img(net_list,dataset_test,args)

    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))


    return acc_test.item()

def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()


        
#################################################################################################
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import Aggregation
from models.test import test_img
from models.test import test_img_avg
from models.branchnet import BranchNet
from torch.autograd import Variable
layers0 = ['conv1.weight','bn1.weight','bn1.bias']
layers = [
    ['layer1'],
    ['layer1','layer2'],
    ['layer1','layer2','layer3'],
    ['layer1','layer2','layer3','layer4'],
    ['layer1','layer2','layer3','layer4','fc']    
]
# layers = ['layer1','layer2','layer3','layer4','fc']
layers1 = ['layer1']
layers2 = ['layer1','layer2']
layers3 = ['layer1','layer2','layer3']
layers4 = ['layer1','layer2','layer3','fc']
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_Fedloss(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class SeverUpdate_Fedloss(object):
    def __init__(self,args,net_list,round ,dataset_global=None,dict_global = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        # self.net_s = net_s
        # self.net_list = net_list
        self.num_classes = args.num_classes
        self.net_state = []
        self.num_groups = args.num_groups
        self.num_block = args.num_block
        self.models = []
        self.optimizer = []
        for i in range (self.num_groups):
            self.optimizer.append([])
        for i in range (self.num_groups):
            self.models.append([])
        # 
        # 
        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        for i in range (self.num_groups):
            
            model = net_list[i].to(self.args.device)
            # self.models[i].append(copy.deepcopy(model))
            for j in range (self.num_block):
                self.models[i].append(copy.deepcopy(model))
                for name,p in self.models[i][j].named_parameters():
                    p.requires_grad = False
                    for layer in layers0:
                        if layer == name:
                            p.requires_grad = True
                for name,p in self.models[i][j].named_parameters(): 
                    for layer in layers[j]:
                        if layer in name:
                            p.requires_grad = True
                if self.args.optimizer == 'sgd':
                    optimizer = torch.optim.SGD(self.models[i][j].parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                            momentum=self.args.momentum,weight_decay=self.args.weight_decay)
                elif self.args.optimizer == 'adam':
                    optimizer = torch.optim.Adam(self.models[i][j].parameters(), lr=self.args.lr)
                self.optimizer[i].append(optimizer)
    def train(self):
        for i in range (self.num_groups):
            for j in range(self.num_block):
                self.models[i][j].train()
        # net.train()
        # self.net_t.train()
        # self.net_t.eval()

        # if self.args.optimizer == 'sgd':
        #     optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
        #                                 momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        #     # optimizer = torch.optim.SGD([{'params':net.parameters()},{'params':self.branch_dict[64].parameters()},{'params':self.branch_dict[128].parameters()},
        #     #                             {'params':self.branch_dict[256].parameters()},{'params':self.branch_dict[512].parameters()}], 
        #     #                             lr=self.args.lr*(self.args.lr_decay**round),momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        # elif self.args.optimizer == 'adam':
        #     optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        for iter in range(self.args.sever_ep):

            # for batch_idx,(images,labels) in enumerate(self.gdr_train):
            #     images,labels = images.to(self.args.device),labels.to(self.args.device)
            #     images,labels = Variable(images),Variable(labels)
                # net.zero_grad()
                # outputs = []
                # for model in self.models:
                #     outputs.append(model(images))
                for i in range(self.num_groups):
                    for k in range(self.num_block-1,-1,-1):
                        for batch_idx,(images,labels) in enumerate(self.gdr_train):
                            images,labels = images.to(self.args.device),labels.to(self.args.device)
                            images,labels = Variable(images),Variable(labels)
                            outputs = []
                            for model in self.models:
                                outputs.append(model[k](images))
                            if k==self.num_block-1:
                                ce_loss = self.loss_func(outputs[i]['output'],labels)
                            else:          
                                ce_loss = 0
                            ce_loss = self.loss_func(outputs[i]['output'],labels)
                            kl_loss = 0
                            a = []
                            for l in range(self.num_groups):
                                a.append(l)
                            a.remove(i)
                            j = np.random.choice(a,1,replace=False)
                            j = j[0]
                            # for j in range(self.num_groups):
                            #     if i!=j:
                            kl_loss = KD(outputs[i]['representation'+str(k+1)],Variable(outputs[j]['representation'+str(k+1)]),self.args.temp)
                            # loss = ce_loss + self.alpha * kl_loss/(self.num_groups-1)
                            loss = ce_loss + self.alpha * kl_loss
                            self.optimizer[i][k].zero_grad()
                            # loss.backward(retain_graph = True)
                            loss.backward()
                            self.optimizer[i][k].step()
                        if k!=0:
                        # if k != self.num_block-1:
                            self.models[i][k+1].load_state_dict(self.models[i][k].state_dict())
        for model in self.models:
            self.net_state.append(copy.deepcopy(model[self.num_block-1].state_dict()))
        return self.net_state
        


        

def Fedloss(args,net_list,dataset_train,dataset_test,dict_users,dict_global):
    num_net = len(net_list)
    # for i in range(num_net):
    #     net_list[i].train()
    if num_net != args.num_groups:
        exit("error num_groups")
    glob_net = copy.deepcopy(net_list[0])
    acc = []
    for iter in range(args.epochs):

        print('*'*80)
        print('Round {:3d}'.format(iter))

        m = max(int(args.frac*args.num_users),1)

        idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        group_idxs_users = []
        for i in range(num_net):
            group_users = []
            start = i*int(m/num_net)
            end = (i+1)*int(m/num_net)
            for j in range(start,end):
                group_users.append(idxs_users[j])
            group_idxs_users.append(group_users)
        # # idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        # list_users = [x for x in range(args.num_users)]
        # group_idxs_users = []
        # m1 = max(int(args.frac*args.num_users*args.group1_frac),1)
        # # group1_idxs_users = np.random.choice(range(0,int(args.num_users*args.group1_frac)),m1,replace=False)
        # group1_idxs_users = np.random.choice(list_users,m1,replace=False)
        # for x in group1_idxs_users:
        #     list_users.remove(x)
        # m2 = max(int(args.frac*args.num_users*args.group2_frac),1)
        # # group2_idxs_users = np.random.choice(range(int(args.num_users*args.group1_frac),int(args.num_users*(args.group1_frac+args.group2_frac))),m2,replace=False)
        # group2_idxs_users = np.random.choice(list_users,m2,replace=False)
        # # m3 = max(int(args.frac*args.num_users*args.group3_frac),1)
        # # group3_idxs_users = np.random.choice(range(int(args.num_users*(args.group1_frac+args.group2_frac)),args.num_users),m3,replace=False)
        # group_idxs_users.append(group1_idxs_users) 
        # group_idxs_users.append(group2_idxs_users)
        # # group_idxs_users.append(group3_idxs_users)
        w_globals = []
        w_list = []
        for id in range(args.num_groups):
            w_locals = []
            lens = []
            for idx in group_idxs_users[id]:
                local = LocalUpdate_Fedloss(args=args,dataset=dataset_train,idxs=dict_users[idx])

                w_local = local.train(round=iter,net=copy.deepcopy(net_list[id]).to(args.device))
                w_locals.append(w_local)
                lens.append(len(dict_users[idx]))
            # aggregation in group
            if len(lens) == 1:
                w_global = w_locals[0]
            else:
                w_global = Aggregation(w_locals,lens)
            # w_global = Aggregation(w_locals,lens)
            net_list[id].load_state_dict(w_global)
            w_globals.append(copy.deepcopy(w_global))


        agg_weights = [] 
        for i in range(num_net):
            agg_weights.append(1.0)
        w_global = Aggregation(w_globals,agg_weights)
        if iter < 50 or iter % 5 == 4:
            for i in range(num_net):
                net_list[i].load_state_dict(w_global)
        else:  
            net_list_glob = []
            for id in range(args.num_groups):
                net_list_glob.append(copy.deepcopy(net_list[id]))          
            sever = SeverUpdate_Fedloss(args=args,net_list = net_list_glob,round=iter,dataset_global=dataset_train,dict_global=dict_global)
            w_list = sever.train()
            for i in range(num_net):
                net_list[i].load_state_dict(w_list[i])
        # for i in range(args.num_groups):
        #     net_list[i].load_state_dict(w_list[i])



        # acc.append(test_single(net_list,dataset_test,args))
        glob_net.load_state_dict(w_global)
        acc.append(test_single(glob_net,dataset_test,args))

    save_result(acc,'test_acc',args)       


def test(net_list,dataset_test,args):

    acc_test,loss_test = test_img(net_list,dataset_test,args)

    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))


    return acc_test.item()

def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()


        


