import json
import os
import random
import time
import clip
import copy
import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from tqdm import tqdm

from Aggregation.BaseAggregation import BaseAggregation
from models.nets import FedGPMAEfficientMLP
from utils.utils import test, DatasetSplit, DatasetSplitClip
import torch.nn.functional as F

class FedGPMAEfficient(BaseAggregation):
    """
        FedGPMA
    """
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(FedGPMAEfficient, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)

        # 加载全局生成器
        self.generator = torch.load(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{10}.pth")
        self.generator.to(self.config.device)
        self.generator.eval()

        if self.config.dataset == 'cifar10':
            self.global_linear = FedGPMAEfficientMLP(num_classes=10, dim=1600, num_users=20).to(self.config.device)
        else:
            self.global_linear = FedGPMAEfficientMLP(num_classes=100, dim=10816, num_users=20).to(self.config.device)

    def get_optimizer(self, model):
        optimizer = None
        if self.config.optimizer == "SGD":
            optimizer = torch.optim.SGD(model.parameters(), lr=self.config.lr, momentum=self.config.momentum)
        elif self.config.optimizer == "AdamW":
            optimizer = torch.optim.AdamW(model.parameters(), lr=self.config.lr)
        return optimizer


    def update_weights(self, choose_user_ids):
        w_avg = super(FedGPMAEfficient, self).update_weights(choose_user_ids)
        self.global_model.load_state_dict(w_avg)
        self.global_model.to(self.config.device)

        # 验证
        acc, test_loss = test(self.global_model, self.test_loader, self.config)
        print(f"global acc: {acc}, test_loss: {test_loss}")

        self.global_model.eval()
        self.global_linear.train()
        optimizer = self.get_optimizer(self.global_linear)

        fc1_params = []
        fc1_params_bias = []
        fc_params = []
        fc_params_bias = []
        for user_id in choose_user_ids:
            # 获取fc1和fc层的模型参数
            fc1_params_i = copy.deepcopy(self.net[user_id].fc1[0].weight)
            fc1_bias_i = copy.deepcopy(self.net[user_id].fc1[0].bias)
            fc_params_i = copy.deepcopy(self.net[user_id].fc.weight)
            fc_bias_i = copy.deepcopy(self.net[user_id].fc.bias)
            fc1_params.append(fc1_params_i.to(self.config.device))
            fc1_params_bias.append(fc1_bias_i.to(self.config.device))
            fc_params.append(fc_params_i.to(self.config.device))
            fc_params_bias.append(fc_bias_i.to(self.config.device))

        pbar = tqdm(range(2000), desc=f'Model distillation', unit='item')

        # total = 0
        # acc = 0
        for i, epoch in enumerate(pbar):
            # 模型蒸馏
            z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (self.config.local_bs, 100)), device=self.config.device))
            if self.config.dataset == 'cifar100':
                labels = [random.randint(0, 100-1) for _ in range(self.config.local_bs)]
            else:
                labels = [random.randint(0, 9) for _ in range(self.config.local_bs)]

            gen_labels = Variable(torch.cuda.LongTensor([labels], device=self.config.device))
            generate_data = self.generator(z, gen_labels.squeeze())
            self.global_linear.zero_grad()

            out1, out = self.global_linear(fc1_params, fc_params)
            # 将out1和out2的值分别赋给fc1和fc的参数，并包装成Parameter
            # self.global_model.fc1.weight = torch.nn.Parameter(out1)
            self.global_model.state_dict()['fc1.0.weight'].copy_(out1)
            # self.global_model.fc1.bias = torch.nn.Parameter(out1)
            # self.global_model.fc.weight = torch.nn.Parameter(out)
            self.global_model.state_dict()['fc.weight'].copy_(out)
            # self.global_model.fc.bias = torch.nn.Parameter(out)

            # 预测并计算损失
            predictions = self.global_model(generate_data)
            loss = F.cross_entropy(predictions, gen_labels.squeeze())

            loss.backward()
            optimizer.step()

        # print(f'acc generator : {acc/total}')
        # 验证
        with torch.no_grad():
            out1, out = self.global_linear(fc1_params, fc_params)
            # 将out1和out2的值分别赋给fc1和fc的参数，并包装成Parameter
            self.global_model.state_dict()['fc1.0.weight'].copy_(out1)
            # self.global_model.fc1.bias = torch.nn.Parameter(out1)
            self.global_model.state_dict()['fc.weight'].copy_(out)
            # self.global_model.fc.bias = torch.nn.Parameter(out)
        acc, test_loss = test(self.global_model, self.test_loader, self.config)
        print(f"global acc: {acc}, test_loss: {test_loss}")

    def train(self):
        print("begin train...")
        for epoch in range(1, self.config.epochs + 1):
            # 判断目录是否存在
            path = f"{self.config.save_dir}/{self.config.aggregation}_{self.config.beta}/{self.config.dataset}_{self.config.num_users}"
            if not os.path.exists(path):
                # 目录不存在，创建目录
                os.makedirs(path)

            begin_time = time.time()
            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.client_loss_data = []
            self.local_train(choose_user_ids)

            # 执行聚合算法，更新参数
            self.update_weights(choose_user_ids)

            # 保存本地模型
            for choose_user_id in choose_user_ids:
                torch.save(self.net[choose_user_id], path +
                           f"/local_model_{choose_user_id}_{self.config.aggregation}_{epoch}.pth")

            # 分发模型
            self.distribute_model()

            # 验证
            if epoch == 50:
                acc, test_loss = test(self.global_model, self.test_loader, self.config)
                print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
                self.loss_data.append(
                    {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})
            end_time = time.time()
            print(f"global epoch cost: {end_time-begin_time}s")

            # 保存全局模型
            torch.save(self.global_model,
                       path +
                       f"/global_model_{self.config.aggregation}_{epoch}.pth")
        with open(f'{self.config.dataset}_{self.config.aggregation}_'
                  f'{self.config.partition}_{self.config.model}_'
                  f'{self.config.beta}_{self.config.num_users}.json', 'w') as f:
            json.dump(self.loss_data, f)

