import torch
import torch.nn.functional as F
import wandb
from torch.utils.data import DataLoader
from torch import nn
import copy
import numpy as np
from models.Update import DatasetSplit
# from models.aggregation import Aggregation_DepthFL
from models.aggregation import get_model_list_depthfl,select_clients_s
from models.test import test_img_depthfl
from utils.save_result import save_result
from tqdm import  tqdm
from collections import OrderedDict

def KD(input_p,input_q,T=1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_DepthFL(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs,args),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                loss = torch.zeros(1).to(self.args.device)
                if self.args.dataset == 'widar':
                    labels = labels.long()
                if (images.size()[0] <= 1):
                    continue
                optimizer.zero_grad()
                output_list = net(images)
                for i, branch_output in enumerate(output_list):
                    loss += self.loss_func(branch_output,labels)
                    if len(output_list) > 1:
                        for j, output in enumerate(output_list):
                            if j == i:
                                continue

                            loss += (
                                    0.1
                                    * KD(branch_output, output.detach())
                                    / (len(output_list) - 1)
                            )

                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()


def DepthFL(args,dataset_train,dataset_test,dict_users):
    net_zoo,_ = get_model_list_depthfl(args)
    num_model = len(net_zoo)
    acc_list = [[] for _ in net_zoo]
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        m = max(int(args.frac * args.num_users), 1)
        _, idxs_users = select_clients_s(args)
        lens = []
        para_list = []
        for index, client_list in enumerate(idxs_users):
            for num, idx in enumerate(client_list):
                local = LocalUpdate_DepthFL(args=args, dataset=dataset_train, idxs=dict_users[idx])

                w_local = local.train(round=iter, net=copy.deepcopy(net_zoo[index]).to(args.device))
                para_list.append(copy.deepcopy(w_local))
                lens.append(len(dict_users[idx]))
        w_glob = Aggregation_DepthFL(para_list,lens,net_zoo[-1].state_dict())


        #Test
        acc_dict = {}
        for idx,net in enumerate(net_zoo):
            net_zoo[idx].load_state_dict(w_glob,strict=False)
            a,l = test_depthfl(net,dataset_test,args)
            acc_list[idx].append(a)
            acc_dict['accuracy_local{}'.format(idx+7)] = a
        # wandb.log(acc_dict)
    save_result(acc_list, 'test_acc', args)



def Aggregation_DepthFL(w,lens,global_model_param):
    w_avg = copy.deepcopy(global_model_param)
    count = OrderedDict()
    for key,param in w_avg.items():
        temp_param = param.new_zeros(param.size(), dtype=torch.float32)
        count[key] = 0
        for m in range(len(w)):
            if key in w[m].keys():
                temp_param += w[m][key] * lens[m]
                count[key] += lens[m]

        w_avg[key] = temp_param / count[key]
    return w_avg

def test_depthfl(net_glob, dataset_test, args):
    acc_test, loss_test = test_img_depthfl(net_glob, dataset_test, args)

    print("Testing accuracy: {:.2f}".format(acc_test))

    return acc_test, loss_test