import json
import os
import random
import time
import clip
import copy
import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from tqdm import tqdm

from Aggregation.BaseAggregation import BaseAggregation
from models.nets import FedGPMAEfficientMLP
from utils.utils import test, DatasetSplit, DatasetSplitClip
import torch.nn.functional as F

class FedGPMAEfficient2(BaseAggregation):
    """
        FedGPMA
    """
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(FedGPMAEfficient2, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)

        # 加载全局生成器
        self.generator = torch.load(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{10}.pth")
        self.generator.to(self.config.device)
        self.generator.eval()

        # 加载预训练的ResNet-18模型
        self.pre_train_model = models.resnet18(pretrained=True)
        # 固定ResNet-18的所有参数
        for param in self.pre_train_model.parameters():
            param.requires_grad = False

        # 获取ResNet-18的分类器（全连接层）
        num_ftrs = self.pre_train_model.fc.in_features

        if self.config.dataset == 'cifar10':
            # 修改最后的分类器以适应CIFAR-10的10个类别
            self.pre_train_model.fc = nn.Linear(num_ftrs, 10)
        else:
            # 修改最后的分类器以适应CIFAR-100的100个类别
            self.pre_train_model.fc = nn.Linear(num_ftrs, 100)

        self.pre_train_optimizer = torch.optim.AdamW(resnet18.fc.parameters(), lr=self.config.lr)

    def get_optimizer(self, model):
        optimizer = None
        if self.config.optimizer == "SGD":
            optimizer = torch.optim.SGD(model.parameters(), lr=self.config.lr, momentum=self.config.momentum)
        elif self.config.optimizer == "AdamW":
            optimizer = torch.optim.AdamW(model.parameters(), lr=self.config.lr)
        return optimizer


    def update_weights(self, choose_user_ids):
        w_avg = super(FedGPMAEfficient, self).update_weights(choose_user_ids)
        self.global_model.load_state_dict(w_avg)
        self.global_model.to(self.config.device)

        # 验证
        acc, test_loss = test(self.global_model, self.test_loader, self.config)
        print(f"global acc: {acc}, test_loss: {test_loss}")

        # 可以考虑在此处初始化训练一次pre_train_model
        self.train_pre_model()

        self.global_model.eval()
        global_model_optimizer = self.get_optimizer(self.global_model)

        pbar = tqdm(range(200), desc=f'Model distillation', unit='item')

        for i, epoch in enumerate(pbar):
            # 模型蒸馏
            z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (self.config.local_bs, 100)), device=self.config.device))
            if self.config.dataset == 'cifar100':
                labels = [random.randint(0, 100-1) for _ in range(self.config.local_bs)]
            else:
                labels = [random.randint(0, 9) for _ in range(self.config.local_bs)]

            gen_labels = Variable(torch.cuda.LongTensor([labels], device=self.config.device))
            generate_data = self.generator(z, gen_labels.squeeze())
            self.global_model.zero_grad()
            self.pre_train_model.zero_grad()

            teacher_logits = []
            for client_id in choose_user_ids:
                with torch.no_grad():
                    output_i = self.net[client_id](generate_data)
                    teacher_logits.append(output_i)

            output_global = self.global_model(generate_data)
            pre_train_output = self.pre_train_model(generate_data)
            pre_train_loss = self.combined_loss(pre_train_output, gen_labels, teacher_logits)
            pre_train_loss.backward()
            self.pre_train_optimizer.step()

            pre_train_output2 = self.pre_train_model(generate_data)
            global_model_loss = self.combined_loss(output_global, gen_labels, [pre_train_output2])

            global_model_loss.backward()
            global_model_optimizer.step()

        # print(f'acc generator : {acc/total}')
        # 验证
        acc, test_loss = test(self.global_model, self.test_loader, self.config)
        print(f"global acc: {acc}, test_loss: {test_loss}")

    def combined_loss(self, outputs, targets, teacher_logits,
                      temp=1.0, alpha=0.1):
        loss = torch.tensor(0.0, device=self.config.device)
        # KL散度
        total_kl_loss = torch.tensor(0.0, device=self.config.device)
        for teacher_logit in teacher_logits:
            total_kl_loss += F.kl_div(F.log_softmax(outputs / temp, dim=1), F.softmax(teacher_logit / temp, dim=1),
                                      reduction='batchmean') * (temp ** 2)
        loss += total_kl_loss / len(teacher_logits) * alpha
        # 均方误差
        loss += F.cross_entropy(outputs, targets.squeeze()) * (1. - alpha - cosine_reg)
        # L2正则化
        # l2_reg_loss = torch.tensor(0.0, device=self.config.device)
        # # 假设你想对model的参数进行正则化
        # for param in self.global_model.parameters():
        #     l2_reg_loss += torch.norm(param)
        # loss += l2_reg * l2_reg_loss

        return loss

    def train(self):
        print("begin train...")
        for epoch in range(1, self.config.epochs + 1):
            # 判断目录是否存在
            path = f"{self.config.save_dir}/{self.config.aggregation}_{self.config.beta}/{self.config.dataset}_{self.config.num_users}"
            if not os.path.exists(path):
                # 目录不存在，创建目录
                os.makedirs(path)

            begin_time = time.time()
            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.client_loss_data = []
            self.local_train(choose_user_ids)

            # 执行聚合算法，更新参数
            self.update_weights(choose_user_ids)

            # 保存本地模型
            for choose_user_id in choose_user_ids:
                torch.save(self.net[choose_user_id], path +
                           f"/local_model_{choose_user_id}_{self.config.aggregation}_{epoch}.pth")

            # 分发模型
            self.distribute_model()

            # 验证
            if epoch == 50:
                acc, test_loss = test(self.global_model, self.test_loader, self.config)
                print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
                self.loss_data.append(
                    {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})
            end_time = time.time()
            print(f"global epoch cost: {end_time-begin_time}s")

            # 保存全局模型
            torch.save(self.global_model,
                       path +
                       f"/global_model_{self.config.aggregation}_{epoch}.pth")
        with open(f'{self.config.dataset}_{self.config.aggregation}_'
                  f'{self.config.partition}_{self.config.model}_'
                  f'{self.config.beta}_{self.config.num_users}.json', 'w') as f:
            json.dump(self.loss_data, f)

    def train_pre_model(self):
        pbar = tqdm(range(200), desc=f'Train Pre-train Model', unit='item')
        for i, epoch in enumerate(pbar):
            # 模型蒸馏
            z = Variable(
                torch.cuda.FloatTensor(np.random.normal(0, 1, (self.config.local_bs, 100)), device=self.config.device))
            if self.config.dataset == 'cifar100':
                labels = [random.randint(0, 100 - 1) for _ in range(self.config.local_bs)]
            else:
                labels = [random.randint(0, 9) for _ in range(self.config.local_bs)]

            gen_labels = Variable(torch.cuda.LongTensor([labels], device=self.config.device))
            generate_data = self.generator(z, gen_labels.squeeze())
            self.pre_train_model.zero_grad()

            output = self.pre_train_model(generate_data)

            loss = F.cross_entropy(output, gen_labels.squeeze())

            loss.backward()
            self.pre_train_optimizer.step()
