import copy

import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

from Aggregation.BaseAggregation import BaseAggregation
from utils.utils import DatasetSplit, test


class FedNova(BaseAggregation):
    """
        FedNova
    """
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(FedNova, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)

    def local_train(self, choose_user_ids):
        """
        本地客户端训练
        :param choose_user_ids:
        :return:
        """
        for client_id in choose_user_ids:
            model = self.net[client_id]
            model.train()

            train_loader = DataLoader(DatasetSplit(self.train_dataset, self.user_groups[client_id]),
                                      batch_size=self.config.local_bs, shuffle=True, num_workers=4)
            optimizer = self.get_optimizer(model)
            pbar = tqdm(range(self.config.local_ep), desc='LocalTrain', unit='item')
            global_model = copy.deepcopy(self.global_model).to(self.config.device)
            for i, epoch in enumerate(pbar):

                batch_loss = []
                # Nova learning rate for this local epoch
                local_lr = self.config.lr * (self.config.local_ep / (self.config.local_ep + epoch + 1))
                for param_group in optimizer.param_groups:
                    param_group['lr'] = local_lr

                for idx, (x, y) in enumerate(train_loader):
                    x, y = x.to(self.config.device), y.to(self.config.device)
                    model.zero_grad()
                    # ---------------------------------------
                    output = model(x)
                    loss = torch.nn.functional.cross_entropy(output, y)

                    # if epoch > 0:
                    #     for w, w_t in zip(local_net.parameters(), model.parameters()):
                    #         loss += self.config.mu / 2. * torch.pow(torch.norm(w.data - w_t.data), 2)
                    proximal_term = 0.0
                    for w, w_t in zip(model.parameters(), global_model.parameters()):
                        proximal_term += (w - w_t).norm(2)

                    loss += (self.config.mu / 2) * proximal_term
                    # ---------------------------------------
                    loss.backward()
                    optimizer.step()

            acc, test_loss = test(model, self.test_loader, self.config)
            pbar.set_postfix(
                {"client_id": client_id, "epoch": epoch, "acc": acc, "test_loss": test_loss},
                refresh=True)
            print(f"client_id: {client_id}, epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            self.client_loss_data.append({"client_id": int(client_id), "acc": acc, "test_loss": test_loss})

    def update_weights(self):
        global_model = self.global_model
        # Step 1: Normalize the local model weights
        local_model_weights = []
        for local_model in self.net:
            local_model.eval()
            with torch.no_grad():
                for param in local_model.parameters():
                    # param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
                    if param.dim() > 1:  # 检查参数是否为向量
                        param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
                    else:
                        print(f"Skipping normalization for {param.name} as it's not a vector")
            local_model_weights.append(local_model.state_dict())

        # Step 2: Normalize the global model weights
        global_model.eval()
        with torch.no_grad():
            for param in global_model.parameters():
                # param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
                if param.dim() > 1:  # 检查参数是否为向量
                    param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
                else:
                    print(f"Skipping normalization for {param.name} as it's not a vector")

        # Step 3: Compute the average of local model weights
        avg_local_model = global_model.state_dict()
        for key, value in avg_local_model.items():
            avg_local_model[key] = sum(local_model_weight[key] for local_model_weight in local_model_weights) / len(
                self.net)

        # Step 4: Normalize the average of local model weights
        for param in avg_local_model.values():
            # param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
            if param.dim() > 1:  # 检查参数是否为向量
                param.data = param.data / torch.norm(param.data, dim=1, keepdim=True)
            else:
                print(f"Skipping normalization for {param.name} as it's not a vector")

        return avg_local_model
