import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.model_creator import *
from models.ResNet_Dery import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable
from sim.block_meta import MODEL_BLOCKS,MODEL_ZOO,PLANES 
from collections import OrderedDict
import wandb


def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_FedDery(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net,flag):
        net.train()
        # for name,param in net.named_parameters():
        #     if  'concat' in  name:
        #         param.requires_grad = True
        #     else:
        #         param.requires_grad = False
        # if self.args.optimizer == 'sgd':
        #     optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
        #                                 momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        # elif self.args.optimizer == 'adam':
        #     optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        # ##### frozen training
        # for iter in range(self.args.local_ep):
        #     for batch_idx,(images,labels) in enumerate(self.ldr_train):
        #         images,labels = images.to(self.args.device),labels.to(self.args.device)
        #         net.zero_grad()
        #         out_of_local = net(images)
        #         log_probs = out_of_local['output']
        #         loss = self.loss_func(log_probs,labels)
        #         loss.backward()
        #         optimizer.step()
        if flag == True:
            for param in net.parameters():
                param.requires_grad = True
        else:
            for name, param in net.named_parameters():
                if 'concat' in name:
                    param.requires_grad_ = True
                else:
                    param.requires_grad_ = False

        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()
#######

class LocalUpdate_FedDery_Frozen(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        # for name,child in net.named_children():
        #     if 'block' in name:
        #         child.eval()
        for name,param in net.named_parameters():
            if  'concat' in  name:
                param.requires_grad_ = True
            else:
                param.requires_grad_ = False
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=self.args.lr)
        ##### frozen training
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        return net.state_dict()


def FedDery(args,net_zoo,dataset_train,dataset_test,dict_users,dict_global):
    block_list = []
    acc = []

    num_model = len(net_zoo)    
    for i in range(num_model):
        acc.append([])
    turntable = int(args.frac*args.num_users)
    block_list = split_block(copy.deepcopy(net_zoo),args.num_block)
    net_test = []
    for i in range(num_model):
        net_test.append(CombinedSingleModel(block_list[i]).to(args.device))
    # net_test[0].load_state_dict(torch.load('./params/resnet0_params1.pth'),strict = False)
    # net_test.append(CombinedSingleModel(block_list[1]).to(args.device))
    # net_test[1].load_state_dict(torch.load('./params/resnet1_params1.pth'),strict = False)
    block_list = split_block_simple(copy.deepcopy(net_test),args.num_block)
    concat_para1 = None
    concat_para2 = None
    concat_para3 = None
    concat_para4 = None
    user_index1 = 3
    user_index2 = 5
    user_index3 = 8
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        m = max(int(args.frac*args.num_users),1)
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        model_list = []
        para1_list  =[]
        para2_list = []
        para3_list  =[]
        para4_list = []
        block_choice = []  
        choose = []
        block_para_1 = []
        block_para_2 = []
        if iter > -1:
            for i in range(m):
                choose.append([])
                block_choice = []
                if i < user_index1:
                    for j in range(args.num_block):
                        # choose[i].append(np.random.choice([0,1],1,replace=True)[0])
                        # choose[i].append(0)
                        if j  < 2:
                            choose[i].append(0)
                        else:
                            choose[i].append(1)
                        block_choice.append(copy.deepcopy(block_list[choose[i][j]][j][0]))
                elif i < user_index2:
                    for j in range(args.num_block):
                        # choose[i].append(np.random.choice([0,1],1,replace=True)[0])
                        # choose[i].append(0)
                        if j  < 2:
                            choose[i].append(0)
                        else:
                            choose[i].append(0)
                        block_choice.append(copy.deepcopy(block_list[choose[i][j]][j][0]))
                elif i < user_index3:
                    for j in range(args.num_block):
                        # choose[i].append(0)
                        if j  < 2:
                            choose[i].append(1)
                        else:
                            choose[i].append(0)
                        block_choice.append(copy.deepcopy(block_list[choose[i][j]][j][0]))
                else :
                    for j in range(args.num_block):
                        # choose[i].append(1)
                        if j  < 2:
                            choose[i].append(1)
                        else:
                            choose[i].append(1)
                        block_choice.append(copy.deepcopy(block_list[choose[i][j]][j][0]))
                print(choose[i])
                if i < 10:
                    model_list.append(CombinedModel(block_list=block_choice,choose = choose[i],planes=PLANES).to(args.device))
                else:
                    model_list.append(CombinedSingleModel(block_list=block_choice).to(args.device))
        else:
            for i in range(m):
                choose.append([])
                block_choice = []
                for j in range(args.num_block):
                    if i < 5:
                        choose[i].append(0)
                    else:
                        choose[i].append(0)
                    block_choice.append(copy.deepcopy(block_list[choose[i][j]][j][0]))
                # print(block_choice[3])
                print(choose[i])
                model_list.append(CombinedSingleModel(block_list=block_choice).to(args.device))
        # print(model_list[0])
        if iter != 0:
            for i in range (m):
                if i < user_index1:
                    model_list[i].load_state_dict(concat_para1,strict = False)
                elif i< user_index2:
                    model_list[i].load_state_dict(concat_para2,strict = False)
                elif i < user_index3:
                    model_list[i].load_state_dict(concat_para3,strict = False)
                else:
                    model_list[i].load_state_dict(concat_para4,strict = False)
        
            # local = SeverUpdate_FedDery_KD(args=args,dataset=dataset_train,idxs = dict_global)
            # w_local1 = local.train(round=iter,net=copy.deepcopy(model_list[0].to(args.device)),net_t=copy.deepcopy(model_list[8].to(args.device)))
            # w_local2 = local.train(round=iter,net=copy.deepcopy(model_list[3].to(args.device)),net_t=copy.deepcopy(model_list[6].to(args.device)))
            # for i in range(m):
            #     if i < 3:
            #         model_list[i].load_state_dict(w_local1)
            #     elif i< 6:
            #         model_list[i].load_state_dict(w_local2)
        if iter %2 == 0:
            flag1 = False
            flag2 = True
        else:
            flag1 = True
            flag2 = False
        for index,idx in enumerate(idxs_users):
            if iter > -1:
                # local = LocalUpdate_FedDery_Frozen(args=args,dataset=dataset_train,idxs=dict_users[idx])
                # local = LocalUpdate_FedDery_KD(args=args,dataset=dataset_train,idxs=dict_users[idx])
                local = LocalUpdate_FedDery(args=args,dataset=dataset_train,idxs=dict_users[idx])
            # else :
                # local = LocalUpdate_FedDery(args=args,dataset=dataset_train,idxs=dict_users[idx])
            if index < user_index1:
                # w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),net_t=copy.deepcopy(net_test[1].to(args.device)))
                
                w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),flag=flag1)
                para1_list.append(w_local)
            elif index < user_index2:
                # w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),net_t=copy.deepcopy(net_test[0].to(args.device)))
                w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),flag=flag1)
                para2_list.append(w_local)
            elif index < user_index3:
                w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),flag = flag2)
                para3_list.append(w_local)
            else:
                w_local = local.train(round=iter,net=copy.deepcopy(model_list[index].to(args.device)),flag=flag2)
                para4_list.append(w_local)
            # concat_para = OrderedDict()
            # for key,para in w_local.items():
            #     if 'concat' in key: 
            #         concat_para[key] = para
            # model_list[index].load_state_dict(concat_para,strict = False)
            # model_list[index].load_state_dict(w_local,strict = False)


        concat_para1 = AggregationNoData(para1_list)
        concat_para2 = AggregationNoData(para2_list)
        concat_para3 = AggregationNoData(para3_list)
        concat_para4 = AggregationNoData(para4_list)
        for i in range (m):
            if i < user_index1:
                model_list[i].load_state_dict(concat_para1,strict  = False)
            elif i < user_index2:
                model_list[i].load_state_dict(concat_para2,strict = False)
            elif i < user_index3:
                model_list[i].load_state_dict(concat_para3,strict = False)
            else:
                model_list[i].load_state_dict(concat_para4,strict = False)
        group_num = int(m/2)
        for i in range(num_model):
            block_para_1.append([])
            block_para_2.append([])
            for j in range(args.num_block):
                block_para_1[i].append([])
                block_para_2[i].append([])
        ######################################
        count = [[0,0,0,0],
                 [0,0,0,0]]
        for i in range(group_num):
            for j in range(args.num_block):
                if choose[i][j] == 0:
                    block_para_1[0][j].append(OrderedDict())
                elif choose[i][j] == 1:
                    block_para_1[1][j].append(OrderedDict())
        #########################################
        for i in range(group_num):
            for j in range(args.num_block):
                select_model = choose[i][j]
                for key,para in model_list[i].state_dict().items():
                    if 'block{:d}'.format(j+1) in key: 
                        block_para_1[select_model][j][count[select_model][j]][key] = para
                count[select_model][j] = count[select_model][j] + 1
        ###############
        count = [[0,0,0,0],
                [0,0,0,0]]
        for i in range(group_num,m):
            for j in range(args.num_block):
                if choose[i][j] == 0:
                    block_para_2[0][j].append(OrderedDict())
                elif choose[i][j] == 1:
                    block_para_2[1][j].append(OrderedDict())
        for i in range(group_num,m):
            for j in range(args.num_block):
                select_model = choose[i][j]
                for key,para in model_list[i].state_dict().items():
                    if 'block{:d}'.format(j+1) in key: 
                        block_para_2[select_model][j][count[select_model][j]][key] = para
                count[select_model][j] = count[select_model][j] + 1        
        para_avg_1 = []
        para_avg_2 = []
        for i in range(num_model):
            para_avg_1.append([])
            para_avg_2.append([])
            for j in range(args.num_block):
                para_avg_1[i].append([])
                para_avg_1[i][j] = AggregationNoData(block_para_1[i][j])
                para_avg_2[i].append([])
                para_avg_2[i][j] = AggregationNoData(block_para_2[i][j])
                # para_avg[i][j] = block_para[i][j]

        # for i in range(num_model):
        #     for j in range(args.num_block):
        #         if para_avg[i][j] is not None:
        #             net_test[i].load_state_dict(para_avg[i][j],strict = False)
        for i in range(m):
            if i < user_index1:
                for j in range (args.num_block):
                    if j < 2:
                        model_list[i].load_state_dict(para_avg_1[0][j],strict=False)
                        # continue
                    else:
                        model_list[i].load_state_dict(para_avg_1[1][j],strict=False)
                concat_para1 = model_list[i].state_dict()
            elif i < user_index2 :
                for j in range (args.num_block):
                    if j < 2:
                        model_list[i].load_state_dict(para_avg_1[0][j],strict=False)
                    else:
                        model_list[i].load_state_dict(para_avg_1[0][j],strict=False)
                        # continue
                concat_para2 = model_list[i].state_dict()
            elif i < user_index3:
                for j in range (args.num_block):
                    if j < 2:
                        model_list[i].load_state_dict(para_avg_2[1][j],strict=False)
                    else:
                        model_list[i].load_state_dict(para_avg_2[0][j],strict=False)
                concat_para3 = model_list[i].state_dict()
            else:
                for j in range (args.num_block):
                    if j < 2:
                        model_list[i].load_state_dict(para_avg_2[1][j],strict=False)
                    else:
                        model_list[i].load_state_dict(para_avg_2[1][j],strict=False)
                concat_para4 = model_list[i].state_dict()

        # server = SeverUpdate_FedDery(args=args,net = copy.deepcopy(net_test[0]),round=iter,dataset_global = dataset_train,dict_global = dict_global)
        # w_1 = server.train()
        # net_test[0].load_state_dict(w_1)
        # server = SeverUpdate_FedDery(args=args,net = copy.deepcopy(net_test[1]),round=iter,dataset_global = dataset_train,dict_global = dict_global)
        # w_2 = server.train()
        # net_test[1].load_state_dict(w_2)

         # if iter == 100:
            #     torch.save(net_test[i].state_dict(),'./params/resnet{}_params1.pth'.format(i))

        # local = SeverUpdate_FedDery_KD(args=args,dataset=dataset_train,idxs = dict_global)
        # w_local1 = local.train(round=iter,net=copy.deepcopy(model_list[0].to(args.device)),net_t=copy.deepcopy(net_test[1].to(args.device)))
        # w_local2 = local.train(round=iter,net=copy.deepcopy(model_list[3].to(args.device)),net_t=copy.deepcopy(net_test[0].to(args.device)))
        # for i in range(m):
        #     if i < 3:
        #         model_list[i].load_state_dict(w_local1)
        #     elif i< 6:
        #         model_list[i].load_state_dict(w_local2)

        block_list = split_block_simple(copy.deepcopy(net_test),args.num_block)
        print(model_list[9])
        acc_dict = {}
        for i in range(num_model):
            # print(net_test[i])
            a = test_single(net_test[i],dataset_test,args)
            acc_dict['accuracy_cnn{}'.format(i+1)] = a
            acc[i].append(a)
        ####
        for i in range(m):
            test_a = test_single(model_list[i],dataset_test,args)
            acc_dict['accuracy_local{}'.format(i+1)] = test_a
        wandb.log(acc_dict)
    save_result(acc,'test_acc',args)   
    return

def split_block(net_zoo,num_block):
    block_list = []  ## size = num_block
    for i in range(len(net_zoo)):
        block_list.append([])
        for j in range(num_block):
            block_list[i].append([])
            for layer in MODEL_BLOCKS[MODEL_ZOO[i]][j]:
                if '.' not in layer:
                    layer1 = layer
                    layer2 = None
                else:
                    layers = layer.split('.')
                    layer1 = layers[0]
                    layer2 = layers[1]
                for name,child in net_zoo[i].named_children():
                    if layer1 in name :
                        if isinstance(child,torch.nn.Sequential) :
                            block_list[i][j].append(child[int(layer2)])
                        else:
                            block_list[i][j].append(child)
    for i in range(len(net_zoo)):
        for j in range(num_block):
            block_list[i][j] = nn.Sequential(*block_list[i][j])
    return block_list

def split_block_simple(net_zoo,num_block):
    block_list = []
    for i in range(len(net_zoo)):
        block_list.append([])
        for j in range(num_block):
            block_list[i].append([])
            for name,child in net_zoo[i].named_children():
                if 'block{:d}'.format(j+1) in name:
                    block_list[i][j].append(child)
    return block_list  
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()
