import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch import nn 
import copy
import numpy as np
from models.Update import DatasetSplit
from utils.save_result import save_result
from models.aggregation import *
from models.test import test_img
from models.test import test_img_avg
from torch.autograd import Variable
import wandb
def KD(input_p,input_q,T = 1):
    kl_loss = nn.KLDivLoss(reduction="batchmean")
    p = F.log_softmax(input_p/T,dim=1)
    q = F.softmax(input_q/T,dim=1)
    result = kl_loss(p,q)
    return result

class LocalUpdate_FedMMD(object):
    def __init__(self,args,dataset=None,idxs = None):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset,idxs),self.args.local_bs,shuffle=True)
    def train(self,round,net):
        net.train()
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                        momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        epoch_loss = []
        for iter in range(self.args.local_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.ldr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)
                net.zero_grad()
                out_of_local = net(images)
                log_probs = out_of_local['output']
                loss = self.loss_func(log_probs,labels)
                loss.backward()
                optimizer.step()

                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))

        return net.state_dict()

class  SeverUpdate_FedMMD(object):
    def __init__(self,args,net_s, net_t,round,dataset_global=None,dict_global = None):
        self.args = args
        self.beta = 1.0
        self.loss_func = nn.CrossEntropyLoss()
        self.num_classes = args.num_classes
        
        self.gdr_train = DataLoader(DatasetSplit(dataset_global,dict_global),batch_size=self.args.sever_bs,shuffle=True)
        self.alpha = args.alpha
        self.net_s= net_s.to(self.args.device)
        self.net_t = net_t.to(self.args.device)
            
            
        if self.args.optimizer == 'sgd':
            self.optimizer = torch.optim.SGD(self.net_s.parameters(), lr=self.args.lr*(self.args.lr_decay**round),
                                    momentum=self.args.momentum,weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            self.optimizer = torch.optim.Adam(self.net_s.parameters(), lr=self.args.lr)
              

    def train(self):
        self.net_s.train()
        epoch_loss = []
        for iter in range(self.args.sever_ep):
            batch_loss = []
            for batch_idx,(images,labels) in enumerate(self.gdr_train):
                images,labels = images.to(self.args.device),labels.to(self.args.device)

                out_t = self.net_t(images)
                out_s = self.net_s(images)
                loss_re0 = KD(out_s['representation0'],out_t['representation0'])
                loss_re1 = KD(self.net_s(out_t['representation0'],level = 1)['representation1'],out_t['representation1'])
                # print(self.net_s(out_t['representation1'],level = 2)['representation2'].size())
                # print(out_t['representation2'].size())
                loss_re2 = KD(self.net_s(out_t['representation1'],level = 2)['representation2'],out_t['representation2'])
                loss_log = KD(out_s['output'],out_t['output'])
                print("loss_re0:",loss_re0)
                print("loss_re1:",loss_re1)
                print("loss_re2:",loss_re2)
                print("loss_log:",loss_log)
                kl_loss = (loss_re2+loss_re1)*0.01 + loss_re0*0.001
                print("kl_loss:",kl_loss)
                # ce_loss = self.loss_func(out_s['output'],labels) + self.beta*loss_log
                ce_loss = self.beta*loss_log
                # ce_loss = self.loss_func(out_s['output'],labels) 
                print("ce_loss:",ce_loss)
                loss = (self.alpha*kl_loss + ce_loss  ) /2
                # loss = ce_loss
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                batch_loss.append(loss.item())
            epoch_loss.append(sum(batch_loss)/len(batch_loss))
        print("epoch_loss:",epoch_loss)
        return self.net_s.state_dict()



def FedMMD(args,net_zoo,dataset_train,dataset_test,dict_users,dict_global):
    
    num_model = len(net_zoo)
    for i in range(num_model):
        net_zoo[i].train()
    turntable = int(args.frac*args.num_users)
    net_glob_arr = []

    for index in range(turntable):
        net_glob_arr.append(copy.deepcopy(net_zoo[index].state_dict()))
    
    acc = []
    for i in range(num_model):
        acc.append([])
    for iter in range(args.epochs):
        print('*'*80)
        print('Round {:3d}'.format(iter))
        w_global = []
        m = max(int(args.frac*args.num_users),1)
        idxs_users = np.random.choice(range(args.num_users),m,replace=False)

        for index,idx in enumerate(idxs_users):
            local = LocalUpdate_FedMMD(args=args,dataset=dataset_train,idxs=dict_users[idx])
            net_zoo[index].load_state_dict(net_glob_arr[index])
            w_local = local.train(round=iter,net=copy.deepcopy(net_zoo[index]).to(args.device))
            net_glob_arr[index] = copy.deepcopy(w_local)  
        #####
        # for i in range(turntable):
        #     if i%2 == 0 :
        #         w_global.append(AggregationNoData(net_glob_arr[i:i+2]))
        for i in range(num_model):
            net_zoo[i].load_state_dict(net_glob_arr[i])
        # if  iter % 5 == 4:    
        # for i in range(turntable):
        #     net_glob_arr[i] = copy.deepcopy(w_global)
        # else:
        # net_list = []
        # for i in range(turntable):
        #     net_list.append(copy.deepcopy(net_glob))
        #     net_list[i].load_state_dict(net_glob_arr[i])
        net_zoo_arr = []
        for i in range(num_model):
            net_weight = []
            list_t = [ x for x in range(num_model)]
            list_t.remove(i)
            random_t = np.random.choice(list_t,3,replace=False)
            for j in random_t:
                sever = SeverUpdate_FedMMD(args=args,net_s = copy.deepcopy(net_zoo[i]),net_t=copy.deepcopy(net_zoo[j]),round=iter,dataset_global = dataset_train,dict_global = dict_global)
                w_list = sever.train()
                net_weight.append(copy.deepcopy(w_list))
            net_zoo_arr.append(AggregationNoData(net_weight)) 

        acc_dict = {}
        for i in range(num_model):
            net_zoo[i].load_state_dict(net_zoo_arr[i]) 
            a = test_single(net_zoo[i],dataset_test,args)
            acc_dict['accuracy_cnn{}'.format(i+1)] = a
            acc[i].append(a)
        wandb.log(acc_dict)
    save_result(acc,'test_acc',args)     
        
def test_single(net_glo,dataset_test,args):
    acc_test,loss_test = test_img_avg(net_glo,dataset_test,args)
    print("Testing accuracy: {:.2f}, Testing loss: {:.2f}".format(acc_test,loss_test))
    return acc_test.item()