import copy
import json
import os
import random
import time

import clip
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from tqdm import tqdm
from transformers import CLIPModel, CLIPProcessor

from Aggregation.BaseAggregation import BaseAggregation
from utils.utils import test, DatasetSplit, DatasetSplitClip
import torch.nn.functional as F


class AdapterClip(BaseAggregation):
    """
        FedGPMA
    """
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(AdapterClip, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)

        # 加载clip
        # self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(self.config.device)
        # self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

        self.clip_model, self.preprocess = clip.load('ViT-B/32', self.config.device)

        self.clip_model.eval()

        data_dir = 'dataset'
        if self.config.dataset == "cifar10":
            self.train_dataset_clip = datasets.CIFAR10(data_dir, train=True, download=True, transform=self.preprocess)
            self.test_dataset_clip = datasets.CIFAR10(data_dir, train=False, download=True, transform=self.preprocess)
        elif self.config.dataset == "cifar100":
            self.train_dataset_clip = datasets.CIFAR100(data_dir, train=True, download=True, transform=self.preprocess)
            self.test_dataset_clip = datasets.CIFAR100(data_dir, train=False, download=True, transform=self.preprocess)
        else:
            self.train_dataset_clip = self.train_dataset
            self.test_dataset_clip = self.test_dataset


        if self.config.dataset in ['mnist', 'svhn']:
            self.text_inputs = torch.cat([clip.tokenize(f"a photo of the digit {c}") for c in range(10)]).to(
                self.config.device)
        else:
            self.text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in test_dataset.classes]).to(
                self.config.device)

        self.text_features = self.clip_model.encode_text(self.text_inputs)

        # 加载全局生成器
        self.generator = torch.load(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{10}.pth")
        self.generator.to(self.config.device)
        self.generator.eval()

    def calculate_clip_vec(self, x):
        with torch.no_grad():
            shape = x.shape
            if shape == (self.config.local_bs, 3, 224, 224):
                # images = self.preprocess(x)
                image_features = self.clip_model.encode_image(x)
            else:
                images = [transforms.ToPILImage()(x[i]) for i in range(x.shape[0])]
                images = [self.preprocess(image) for image in images]
                image_features = self.clip_model.encode_image(torch.stack(images, dim=0).to(self.config.device))

        return image_features

    def update_weights(self, choose_user_ids):
        w_avg = super(AdapterClip, self).update_weights(choose_user_ids)
        self.global_model.load_state_dict(w_avg)

        self.global_model.to(self.config.device)

        # 验证
        # acc, test_loss = self.test(self.global_model, self.test_loader, type=1)
        # print(f"global acc: {acc}, test_loss: {test_loss}")

        self.global_model.train()
        optimizer = self.get_optimizer(self.global_model)

        pbar = tqdm(range(200), desc=f'Model distillation', unit='item')

        # total = 0
        # acc = 0
        for i, epoch in enumerate(pbar):
            # 模型蒸馏
            z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (self.config.local_bs, 100)), device=self.config.device))
            if self.config.dataset == 'cifar100':
                labels = [random.randint(0, 100-1) for _ in range(self.config.local_bs)]
            else:
                labels = [random.randint(0, 9) for _ in range(self.config.local_bs)]
            gen_labels = Variable(torch.cuda.LongTensor([labels], device=self.config.device))
            generate_data = self.generator(z, gen_labels.squeeze())

            self.global_model.zero_grad()

            clip_vectors = self.calculate_clip_vec(generate_data)

            teacher_logits = []
            z = []
            for client_id in choose_user_ids:
                with torch.no_grad():
                    output_i, z_i = self.net[client_id](generate_data, clip_vectors)
                    teacher_logits.append(output_i)
                    z.append(z_i)

            output_global, z_global = self.global_model(generate_data, clip_vectors)

            # 计算loss
            loss = (1 - self.config.alpha) * self.combined_loss(output_global, gen_labels, teacher_logits, z, z_global,
                                      ['kl', 'cosine', 'ce', 'l2-'])
            similarity = (100.0 * clip_vectors @ self.text_features.T).softmax(dim=-1)
            # with torch.no_grad():
            #     total += len(generate_data)
            #     acc += (similarity.argmax(dim=1) == gen_labels).sum().item()
            loss += self.config.alpha * F.cross_entropy(output_global, similarity.argmax(dim=1))

            loss.backward()
            optimizer.step()

        # print(f'acc generator : {acc/total}')

    def combined_loss(self, outputs, targets, teacher_logits, z, z_global,
                      loss_item=['kl', 'cosine', 'ce', 'l2'], temp=1.0, alpha=0.1, cosine_reg=0.05):
        """

        :param outputs: 学生模型预测
        :param targets: 真实标签
        :param teacher_logits: 教师模型预测
        :param z: z=[z_1, z_2, ... , z_m]
        :param z_global: 学生模型的z
        :param loss_item: 选择需要使用的loss项
        :param temp:
        :param alpha:
        :param l2_reg:
        :return:
        """
        loss = torch.tensor(0.0, device=self.config.device)
        # KL散度
        if loss_item[0] == 'kl':
            total_kl_loss = torch.tensor(0.0, device=self.config.device)
            for teacher_logit in teacher_logits:
                total_kl_loss += F.kl_div(F.log_softmax(outputs / temp, dim=1), F.softmax(teacher_logit / temp, dim=1),
                                                reduction='batchmean') * (temp ** 2)
            loss += total_kl_loss / len(teacher_logits) * alpha
        # 余弦相似度
        if loss_item[1] == 'cosine':
            cosine_similarities = [F.cosine_similarity(z_global, z_i) for z_i in z]
            # loss += sum(1 - cosine_similarity for cosine_similarity in cosine_similarities) / len(z)
            loss += (1.0 - torch.mean(torch.stack(cosine_similarities))) * cosine_reg
        # 均方误差
        if loss_item[2] == 'ce':
            loss += F.cross_entropy(outputs, targets.squeeze()) * (1. - alpha - cosine_reg)
        # # L2正则化
        # if loss_item[3] == 'l2':
        #     l2_reg_loss = torch.tensor(0.0, device=self.config.device)
        #     for param in self.global_model.parameters():  # 假设你想对model的参数进行正则化
        #         l2_reg_loss += torch.norm(param)
        #     loss += l2_reg * l2_reg_loss

        return loss

    def train(self):
        print("begin train...")
        for epoch in range(1, self.config.epochs + 1):
            # 判断目录是否存在
            path = f"{self.config.save_dir}/{self.config.aggregation}_{self.config.beta}/{self.config.dataset}_{self.config.num_users}"
            if not os.path.exists(path):
                # 目录不存在，创建目录
                os.makedirs(path)

            begin_time = time.time()
            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.client_loss_data = []
            self.local_train(choose_user_ids, epoch)

            # 执行聚合算法，更新参数
            self.update_weights(choose_user_ids)

            # 保存本地模型
            for choose_user_id in choose_user_ids:
                torch.save(self.net[choose_user_id], path +
                           f"/local_model_{choose_user_id}_{self.config.aggregation}_{epoch}.pth")

            # 分发模型
            self.distribute_model()

            # 验证
            if epoch == 50:
                acc, test_loss = self.test(self.global_model, self.test_loader, type=1)
                print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
                self.loss_data.append(
                    {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})
            end_time = time.time()
            print(f"global epoch cost: {end_time-begin_time}s")

            # 保存全局模型
            torch.save(self.global_model,
                       path +
                       f"/global_model_{self.config.aggregation}_{epoch}.pth")
        with open(f'{self.config.dataset}_{self.config.aggregation}_'
                  f'{self.config.partition}_{self.config.model}_'
                  f'{self.config.beta}_{self.config.num_users}.json', 'w') as f:
            json.dump(self.loss_data, f)

    def local_train(self, choose_user_ids, global_epoch):
        """
        本地客户端训练
        :param choose_user_ids:
        :return:
        """
        for client_id in choose_user_ids:
            model = self.net[client_id]
            model.train()

            train_loader = DataLoader(DatasetSplitClip(self.train_dataset, self.user_groups[client_id], self.train_dataset_clip),
                                      batch_size=self.config.local_bs, shuffle=True, num_workers=4)
            optimizer = self.get_optimizer(model)
            pbar = tqdm(range(self.config.local_ep), desc=f'LocalTrain-{client_id}', unit='item')
            for i, epoch in enumerate(pbar):
                for idx, (x, y, x_clip) in enumerate(train_loader):
                    x, y = x.to(self.config.device), y.to(self.config.device)
                    clip_vector = self.calculate_clip_vec(x_clip.to(self.config.device))
                    model.zero_grad()
                    # ---------------------------------------
                    output, z = model(x, clip_vector)
                    loss = torch.nn.functional.cross_entropy(output, y)

                    # # 余弦近似
                    # if global_epoch != 1:
                    #     global_logits, global_z = self.global_model(x, clip_vector)
                    #     similarity = F.cosine_similarity(global_z, z)
                    #     loss += 0.01 * similarity

                    # ---------------------------------------
                    loss.backward()
                    optimizer.step()

            # acc, test_loss = self.test(model, self.test_loader, type=1)
            # pbar.set_postfix(
            #     {"client_id": client_id, "epoch": epoch, "acc": acc, "test_loss": test_loss},
            #     refresh=True)
            # print(f"client_id: {client_id}, epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            # self.client_loss_data.append({"client_id": int(client_id), "acc": acc, "test_loss": test_loss})

    def test(self, model, test_loader, type=1):
        model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.config.device), target.to(self.config.device)
                if type == 1:
                    output, _ = model(data, self.calculate_clip_vec(data))
                else:
                    output, _ = model(data, None)
                test_loss += F.cross_entropy(output, target, size_average=False).item()  # sum up batch loss
                pred = torch.max(output, 1)[1]
                correct += pred.eq(target.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        acc = 100. * correct / len(test_loader.dataset)
        # print('\n Test_set: Average loss: {:.4f}, Accuracy: {:.4f}\n'
        #       .format(test_loss, acc))
        return acc, test_loss
