import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.model_creator import *
from models.ResNet_Dery import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable
from sim.block_meta import MODEL_BLOCKS,MODEL_ZOO,PLANES 
from collections import OrderedDict
import wandb


def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_FedDery(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net,flag):
        net.train()
        # for name,param in net.named_parameters():
        #     if  'concat' in  name:
        #         param.requires_grad = True
        #     else:
        #         param.requires_grad = False
        # if self.args.optimizer == 'sgd':
        #     optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
        #                                 momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        # elif self.args.optimizer == 'adam':
        #     optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        # ##### frozen training
        # for iter in range(self.args.local_ep):
        #     for batch_idx,(images,labels) in enumerate(self.ldr_train):
        #         images,labels = images.to(self.args.device),labels.to(self.args.device)
        #         net.zero_grad()
        #         out_of_local = net(images)
        #         log_probs = out_of_local['output']
        #         loss = self.loss_func(log_probs,labels)
        #         loss.backward()
        #         optimizer.step()
        if flag == True:
            for param in net.parameters():
                param.requires_grad = True
        else:
            for name, param in net.named_parameters():
                if 'concat' in name:
                    param.requires_grad_ = True
                else:
                    param.requires_grad_ = False

        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                if (images.size()[0] <= 1):
                    continue
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()
#######



def FedDery(args,net_zoo,dataset_train,dataset_test,dict_users,dict_global):
    block_list = []
    acc = []
    num_group = 2
    pre_model = 2
    num_model = len(net_zoo)    
    for i in range(num_model*num_model):
        acc.append([])
    turntable = int(args.frac*args.num_users)
    block_list = split_block(copy.deepcopy(net_zoo),args.num_block)
    net_test = []
    model_choice = []
    for i in range(num_model):
        net_test.append(CombinedSingleModel(block_list[i],i).to(args.device))
        model_choice.append(i)
    # net_test[0].load_state_dict(torch.load('./params/resnet0_params1.pth'),strict = False)
    # net_test.append(CombinedSingleModel(block_list[1]).to(args.device))
    # net_test[1].load_state_dict(torch.load('./params/resnet1_params1.pth'),strict = False)
    block_list = split_block_simple(copy.deepcopy(net_test),args.num_block)
    
    user_index = num_model * pre_model

    model_list = []
    concat_para = []
    for i in range(num_model):
        model_list.append([])
        concat_para.append([])
        for j in range(num_model):
            choose = []
            block_choice = []
            for k in range(args.num_block):
                if k < 2:
                    choose.append(i)
                else:
                    choose.append(j)
                block_choice.append(copy.deepcopy(block_list[choose[k]][k][0]))
            print(choose)
            model_list[i].append(CombinedModel(block_list=block_choice,choose = choose,planes=PLANES).to(args.device))
            concat_para[i].append(None)
    for i in range(num_model):
        for j in range(num_model):
            total = sum([param.nelement() for param in model_list[i][j].parameters()])
            # 精确地计算：1MB=1024KB=1048576字节
            print('number of parameter of model{}{}: {:.4f}M'.format(i,j,(total / 1e6)))
            # print('modelNumber of parameter: % .4fM' % (total / 1e6))
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        m = max(int(args.frac*args.num_users),1)
        # m = num_model * num_group * pre_model
        # idxs_users = np.random.choice(range(args.num_users),m,replace=False)
        para_list = []
        for i in range(num_group):
            para_list.append([])
        block_choice = []  

        ####choose the model tree
        model_choose = np.random.choice(model_choice,num_group,replace=False)
        print(model_choose)
        # if iter %2 == 0:
        #     flag1 = False
        #     flag2 = True
        # else:
        #     flag1 = True
        #     flag2 = False
        flag1 = True
        flag2 =True
        count1 = 0
        count2 = 0
        choose = []
        lens = []
        for i in range(num_group):
            lens.append([])
            m1 = int(m * args.client1_frac)
            m2 = int(m * args.client2_frac)
            m3 = int(m * args.client3_frac)
            idxs_users = []
            idxs_users.append(np.random.choice(range(0,int(args.num_users)),m1,replace=False))
            idxs_users.append(np.random.choice(range(int(args.num_users*args.client1_frac),int(args.num_users)),m2,replace=False))
            idxs_users.append(np.random.choice(range(int(args.num_users*(args.client1_frac+args.client2_frac)),int(args.num_users)),m3,replace=False))
            temp = 0
            for index,client_list in enumerate(idxs_users):
                for num,idx in enumerate(client_list):
                    choose.append([model_choose[i],model_choose[i],index,index])
                    local = LocalUpdate_FedDery(args=args,dataset=dataset_train,idxs=dict_users[idx])
                    w_local = local.train(round=iter,net=copy.deepcopy(model_list[model_choose[i]][index].to(args.device)),flag=flag1)
                    para_list[i].append(w_local)
                    lens[i].append(len(dict_users[idx]))
        
        print(choose)
        # group_num = int(m/num_group)
        block_para_group = []
        for p in range(num_group):
            block_para_group.append([])
            for i in range(num_model):
                block_para_group[p].append([])
                for j in range(args.num_block):
                    block_para_group[p][i].append([])
        ######################################
        count = [[0,0,0,0],
                    [0,0,0,0],
                    [0,0,0,0]]
        for i in range(m*num_group):
            q =  int(i / m)
            for j in range(args.num_block):
                block_para_group[q][choose[i][j]][j].append(OrderedDict())
                # if choose[i][j] == 0:
                #     block_para_group[p][0][j].append(OrderedDict())
                # elif choose[i][j] == 1:
                #     block_para_group[p][1][j].append(OrderedDict())
                # elif choose[i][j] == 2:
                #     block_para_group[p][2][j].append(OrderedDict())
        #########################################
        for i in range(m*num_group):
            q = int(i / m)
            if i == m:
                count = [[0,0,0,0],
                    [0,0,0,0],
                    [0,0,0,0]]
            for j in range(args.num_block):
                select_model = choose[i][j]
                # for key,para in model_list[i].state_dict().items():
                for key,para in para_list[q][i%m].items():
                    if 'block{:d}'.format(j+1) in key: 
                        block_para_group[q][select_model][j][count[select_model][j]][key] = para
                count[select_model][j] = count[select_model][j] + 1
                # print(count)
        ###############
        # count = [[0,0,0,0],
        #         [0,0,0,0]]
        # for i in range(group_num,m):
        #     for j in range(args.num_block):
        #         if choose[i][j] == 0:
        #             block_para_2[0][j].append(OrderedDict())
        #         elif choose[i][j] == 1:
        #             block_para_2[1][j].append(OrderedDict())
        # for i in range(group_num,m):
        #     for j in range(args.num_block):
        #         select_model = choose[i][j]
        #         for key,para in model_list[i].state_dict().items():
        #             if 'block{:d}'.format(j+1) in key: 
        #                 block_para_2[select_model][j][count[select_model][j]][key] = para
        #         count[select_model][j] = count[select_model][j] + 1        
        # para_avg_1 = []
        # para_avg_2 = []
        para_avg = []
        for p in range(num_group):
            para_avg.append([])
            for i in range(num_model):
                para_avg[p].append([])
                for j in range(args.num_block):
                    para_avg[p][i].append([])
                    # para_avg[p][i][j] = AggregationNoData(block_para_group[p][i][j])
                    para_avg[p][i][j] = Aggregation(block_para_group[p][i][j],lens[p])

        # for i in range(num_model):
        #     for j in range(args.num_block):
        #         if para_avg[i][j] is not None:
        #             net_test[i].load_state_dict(para_avg[i][j],strict = False)
        # for p in range(num_group):

        for x in range(num_group):
            for y in range(num_model):
                for j in range (args.num_block):
                    if j < 2:
                        model_list[model_choose[x]][y].load_state_dict(para_avg[x][model_choose[x]][j],strict = False)
                        # continue
                    else:
                        model_list[model_choose[x]][y].load_state_dict(para_avg[x][y][j],strict = False)
                concat_para[model_choice[x]][y] = model_list[model_choose[x]][y].state_dict()


        # block_list = split_block_simple(copy.deepcopy(net_test),args.num_block)
        
        acc_dict = {}
        # for i in range(num_model):
        #     # print(net_test[i])
        #     a = test_single(net_test[i],dataset_test,args)
        #     acc_dict['accuracy{}'.format(i+1)] = a
        #     acc[i].append(a)
        ####
        temp = 0
        for i in range(num_model):
            for j in range(num_model):
                test_a = test_single(model_list[i][j],dataset_test,args)
                acc_dict['accuracy_local{}'.format(temp+1)] = test_a
                acc[temp].append(test_a)
                temp = temp + 1
        wandb.log(acc_dict)
    save_result(acc,'test_acc',args)   
    return

def split_block(net_zoo,num_block):
    block_list = []  ## size = num_block
    for i in range(len(net_zoo)):
        block_list.append([])
        for j in range(num_block):
            block_list[i].append([])
            for layer in MODEL_BLOCKS[MODEL_ZOO[i]][j]:
                if '.' not in layer:
                    layer1 = layer
                    layer2 = None
                else:
                    layers = layer.split('.')
                    layer1 = layers[0]
                    layer2 = layers[1]
                for name,child in net_zoo[i].named_children():
                    if layer1 in name :
                        if isinstance(child,torch.nn.Sequential) :
                            block_list[i][j].append(child[int(layer2)])
                        else:
                            block_list[i][j].append(child)
    for i in range(len(net_zoo)):
        for j in range(num_block):
            block_list[i][j] = nn.Sequential(*block_list[i][j])
    return block_list

def split_block_simple(net_zoo,num_block):
    block_list = []
    for i in range(len(net_zoo)):
        block_list.append([])
        for j in range(num_block):
            block_list[i].append([])
            for name,child in net_zoo[i].named_children():
                if 'block{:d}'.format(j+1) in name:
                    block_list[i][j].append(child)
    return block_list  
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()
