import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset
from torch import nn 
import numpy as np

class DatasetSplit(Dataset):
    def __init__(self, dataset, idxs, args):
        self.dataset = dataset
        self.idxs = list(idxs)

    def __len__(self):
        return len(self.idxs)

    def __getitem__(self, item):
        image, label = self.dataset[self.idxs[item]]
        return image, label

class KDLoss(nn.Module):
    def __init__(self):
        super(KDLoss, self).__init__()

        self.kld_loss = nn.KLDivLoss()
        self.ce_loss = nn.CrossEntropyLoss()
        self.log_softmax = nn.LogSoftmax(dim=1)
        self.softmax = nn.Softmax(dim=1)

        self.T = 3
        self.gamma = 0

    def loss_fn_kd(self, pred, target, soft_target, gamma_active=True):
        _ce = self.ce_loss(pred, target)
        T = self.T
        if self.gamma and gamma_active:
            # _ce = (1. - self.gamma) * _ce
            _kld = self.kld_loss(self.log_softmax(pred / T), self.softmax(soft_target / T)) * self.gamma * T * T
        else:
            _kld = 0
        loss = _ce + _kld
        return loss

class LocalUpdate_FedAvg(object):
    def __init__(self, args, dataset=None, idxs=None, verbose=False):
        self.args = args
        self.loss_func = nn.CrossEntropyLoss()
        self.selected_clients = []
        self.ldr_train = DataLoader(DatasetSplit(dataset, idxs, args), batch_size=self.args.local_bs, shuffle=True,
                                    drop_last=True)
        self.verbose = verbose

    def train(self, round, net):

        net.train()
        # train and update
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr * (self.args.lr_decay ** round),
                                        momentum=self.args.momentum, weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)

        Predict_loss = 0
        for iter in range(self.args.local_ep):
            if "transformer" in self.args.model:
                for batch_idx, (images, labels, mask) in enumerate(self.ldr_train):
                    images, labels, mask = images.to(self.args.device), labels.to(self.args.device), mask.to(
                        self.args.device)
                    net.zero_grad()
                    log_probs = net(images, mask)
                    loss = self.loss_func(log_probs, labels)
                    loss.backward()
                    optimizer.step()

                    Predict_loss += loss.item()
            else:
                for batch_idx, (images, labels) in enumerate(self.ldr_train):
                    images, labels = images.to(self.args.device), labels.to(self.args.device)
                    if self.args.dataset == 'widar':
                        labels = labels.long()
                    net.zero_grad()
                    log_probs = net(images)['output']
                    loss = self.loss_func(log_probs, labels)
                    loss.backward()
                    optimizer.step()

                    Predict_loss += loss.item()

        if self.verbose:
            info = '\nUser predict Loss={:.4f}'.format(Predict_loss / (self.args.local_ep * len(self.ldr_train)))
            print(info)

        return net.state_dict()
        
class LocalUpdate_ScaleFL(object):
    def __init__(self, args, dataset=None, idxs=None, verbose=False):
        self.args = args
        self.loss_func = KDLoss()
        self.ldr_train = DataLoader(DatasetSplit(dataset, idxs, args), batch_size=self.args.local_bs, shuffle=True,
                                    drop_last=True)
        self.verbose = verbose

    def train(self, round, net, ee):

        net.train()
        # train and update
        if self.args.optimizer == 'sgd':
            optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr * (self.args.lr_decay ** round),
                                        momentum=self.args.momentum, weight_decay=self.args.weight_decay)
        elif self.args.optimizer == 'adam':
            optimizer = torch.optim.Adam(net.parameters(), lr=self.args.lr)
        # elif self.args.optimizer == 'adaBelief':
        #     optimizer = AdaBelief(net.parameters(), lr=self.args.lr)

        Predict_loss = 0
        for iter in range(self.args.local_ep):

            for batch_idx, (images, labels) in enumerate(self.ldr_train):
                images, labels = images.to(self.args.device), labels.to(self.args.device)
                if self.args.dataset == 'widar':
                    labels = labels.long()
                net.zero_grad()
                outputs = net(images, ee)
                loss = 0.0
                for j in range(len(outputs)):
                    if j == len(outputs) - 1:
                        loss += self.loss_func.ce_loss(outputs[j]["output"], labels) * (j + 1)
                    else:
                        gamma_active = round > self.args.epochs * 0.25
                        loss += self.loss_func.loss_fn_kd(outputs[j]["output"], labels, outputs[-1]["output"], gamma_active) * (j + 1)

                loss /= len(outputs) * (len(outputs) + 1) / 2

                loss.backward()
                optimizer.step()

                Predict_loss += loss.item()

        if self.verbose:
            info = '\nUser predict Loss={:.4f}'.format(Predict_loss / (self.args.local_ep * len(self.ldr_train)))
            print(info)

        requires_grad = []
        for param in net.parameters():
            requires_grad.append(param.grad != None)

        return net.state_dict(), requires_grad