import copy
import os
import time

import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from tqdm import tqdm

from Aggregation.BaseAggregation import BaseAggregation
from utils.utils import DatasetSplit, GenerationMD


class Generation(BaseAggregation):
    def __init__(self, config, train_dataset, test_dataset, user_groups, traindata_cls_counts):
        super(Generation, self).__init__(config, train_dataset, test_dataset, user_groups, traindata_cls_counts)

        self.config = config
        self.train_dataset = train_dataset
        self.test_dataset = test_dataset
        self.test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.config.local_bs,
                                              shuffle=False, num_workers=4)
        self.user_groups = user_groups
        self.traindata_cls_counts = traindata_cls_counts

        if self.config.dataset == "mnist":
            self.n_classes, self.latent_dim, self.img_size, self.channels = 10, 100, 28, 1
        elif self.config.dataset == "fmnist":
            self.n_classes, self.latent_dim, self.img_size, self.channels = 10, 100, 28, 1
        elif self.config.dataset == "svhn":
            self.n_classes, self.latent_dim, self.img_size, self.channels = 10, 100, 32, 3
        elif self.config.dataset == "cifar10":
            self.n_classes, self.latent_dim, self.img_size, self.channels = 10, 100, 32, 3
        elif self.config.dataset == "cifar100":
            self.n_classes, self.latent_dim, self.img_size, self.channels = 100, 100, 32, 3

        # 初始化全局模型
        self.global_generator = Generator(self.n_classes, self.latent_dim, self.img_size, self.channels)
        self.global_discriminator = Discriminator(self.n_classes, self.img_size, self.channels)
        self.global_generator.apply(weights_init_normal)
        self.global_discriminator.apply(weights_init_normal)

        self.local_generators = []
        self.local_discriminators = []
        for i in range(self.config.num_users):
            self.local_generators.append(copy.deepcopy(self.global_generator))
            self.local_discriminators.append(copy.deepcopy(self.global_discriminator))

        self.optimizer_G = torch.optim.Adam(self.global_generator.parameters(),
                                            lr=self.config.lr, betas=(self.config.b1, self.config.b2))
        self.optimizer_D = torch.optim.Adam(self.global_discriminator.parameters(),
                                            lr=self.config.lr, betas=(self.config.b1, self.config.b2))

        self.adversarial_loss = torch.nn.BCELoss()
        self.auxiliary_loss = torch.nn.CrossEntropyLoss()

    def update_weights(self, global_epoch):
        """
        training global generator model by local_generators.
        """
        # MD元数据集生成
        dataloader = DataLoader(GenerationMD(self.local_generators, self.traindata_cls_counts,
                                             self.user_groups, self.latent_dim, self.n_classes,
                                             self.config.device),
                                  batch_size=self.config.local_bs, shuffle=True, num_workers=4)

        self.global_generator.to(self.config.device)
        self.global_discriminator.to(self.config.device)

        # # 初始化全局模型
        # global_G_avg, global_D_avg = self.update_global_weights()
        # self.global_generator.load_state_dict(global_G_avg)
        # self.global_discriminator.load_state_dict(global_D_avg)

        self.global_generator.train()
        self.global_discriminator.train()

        for epoch in range(self.config.global_ep):
            for i, (imgs, labels) in enumerate(dataloader):
                imgs = imgs.to(self.config.device)
                labels = labels.to(self.config.device)

                batch_size = imgs.shape[0]

                # Adversarial ground truths
                valid = Variable(torch.cuda.FloatTensor(batch_size, 1, device=self.config.device).fill_(1.0), requires_grad=False)
                fake = Variable(torch.cuda.FloatTensor(batch_size, 1, device=self.config.device).fill_(0.0), requires_grad=False)

                # Configure input
                real_imgs = Variable(imgs.type(torch.cuda.FloatTensor))
                labels = Variable(labels.type(torch.cuda.LongTensor))

                # -----------------
                #  Train Generator
                # -----------------

                self.optimizer_G.zero_grad()

                # Sample noise and labels as generator input
                z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (batch_size, self.latent_dim)), device=self.config.device))
                gen_labels = Variable(torch.cuda.LongTensor(np.random.randint(0, self.n_classes, batch_size), device=self.config.device))

                # Generate a batch of images
                gen_imgs = self.global_generator(z, gen_labels)

                # Loss measures generator's ability to fool the discriminator
                validity, pred_label = self.global_discriminator(gen_imgs)
                g_loss = 0.5 * (self.adversarial_loss(validity, valid) + self.auxiliary_loss(pred_label, gen_labels))

                g_loss.backward()
                self.optimizer_G.step()

                # ---------------------
                #  Train Discriminator
                # ---------------------

                self.optimizer_D.zero_grad()

                # Loss for real images
                real_pred, real_aux = self.global_discriminator(real_imgs)
                d_real_loss = (self.adversarial_loss(real_pred, valid) + self.auxiliary_loss(real_aux, labels)) / 2

                # Loss for fake images
                fake_pred, fake_aux = self.global_discriminator(gen_imgs.detach())
                d_fake_loss = (self.adversarial_loss(fake_pred, fake) + self.auxiliary_loss(fake_aux, gen_labels)) / 2

                # Total discriminator loss
                d_loss = (d_real_loss + d_fake_loss) / 2

                # Calculate discriminator accuracy
                pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
                gt = np.concatenate([labels.data.cpu().numpy(), gen_labels.data.cpu().numpy()], axis=0)
                d_acc = np.mean(np.argmax(pred, axis=1) == gt)

                d_loss.backward()
                self.optimizer_D.step()

                print(
                    "[Global %d] [ Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
                    % (global_epoch, epoch, self.config.global_ep, i, len(dataloader), d_loss.item(), 100 * d_acc, g_loss.item())
                )

        # 分发全局参数
        global_weights_G = self.global_generator.state_dict()
        global_weights_D = self.global_discriminator.state_dict()

        for i in range(self.config.num_users):
            self.local_generators[i].load_state_dict(copy.deepcopy(global_weights_G))
            self.local_discriminators[i].load_state_dict(copy.deepcopy(global_weights_D))

    def update_global_weights(self):
        """聚合全局模型"""
        with torch.no_grad():
            global_G_avg = copy.deepcopy(self.local_generators[0].state_dict())
            for key in global_G_avg.keys():
                for i in range(1, len(self.local_generators)):
                    global_G_avg[key] += self.local_generators[i].state_dict()[key]
                if 'num_batches_tracked' in key:
                    global_G_avg[key] = global_G_avg[key].true_divide(len(self.local_generators))
                else:
                    global_G_avg[key] = torch.div(global_G_avg[key], len(self.local_generators))

            global_D_avg = copy.deepcopy(self.local_discriminators[0].state_dict())
            for key in global_D_avg.keys():
                for i in range(1, len(self.local_discriminators)):
                    global_D_avg[key] += self.local_discriminators[i].state_dict()[key]
                if 'num_batches_tracked' in key:
                    global_D_avg[key] = global_D_avg[key].true_divide(len(self.local_discriminators))
                else:
                    global_D_avg[key] = torch.div(global_D_avg[key], len(self.local_discriminators))

        return global_G_avg, global_D_avg

    def train(self):
        print("begin train...")
        for epoch in range(1, self.config.epochs + 1):
            # 判断epoch的全局模型是否存在，存在则加载
            if os.path.isfile(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{epoch}.pth"):
                generator = torch.load(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{epoch}.pth")
                discriminator = torch.load(f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_discriminator_{epoch}.pth")
                self.global_generator.load_state_dict(generator.state_dict())
                self.global_discriminator.load_state_dict(discriminator.state_dict())
                print(f'The {epoch}-th round of model training has been completed.')

                # 分发全局参数
                global_weights_G = self.global_generator.state_dict()
                global_weights_D = self.global_discriminator.state_dict()

                for i in range(self.config.num_users):
                    self.local_generators[i].load_state_dict(copy.deepcopy(global_weights_G))
                    self.local_discriminators[i].load_state_dict(copy.deepcopy(global_weights_D))

                continue

            begin_time = time.time()

            # 选择训练的客户端
            choose_user = int(self.config.frac * self.config.num_users)
            assert choose_user > 0, "choose_user > 0"
            choose_user_ids = np.random.choice(self.config.num_users, choose_user, replace=False)

            # 开始训练本地客户端
            self.local_train(choose_user_ids)

            # 执行聚合算法，更新参数
            self.update_weights(epoch)

            # 验证
            # acc, test_loss = test(self.global_model, self.test_loader, self.config)
            # print(f"global epoch: {epoch}, acc: {acc}, test_loss: {test_loss}")
            # self.loss_data.append(
            #     {"global_epoch": epoch, "acc": acc, "test_loss": test_loss, "clients": self.client_loss_data})

            # 判断目录是否存在
            if epoch % self.config.sample_interval == 0:
                self.sample_image(10, epoch)

                if not os.path.exists(self.config.save_dir + f'/{self.config.dataset}_{self.config.beta}'):
                    # 目录不存在，创建目录
                    os.makedirs(self.config.save_dir + f'/{self.config.dataset}_{self.config.beta}')
                # 保存全局模型
                torch.save(self.global_generator,
                           f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_generator_{epoch}.pth")
                torch.save(self.global_discriminator,
                           f"{self.config.save_dir}/{self.config.dataset}_{self.config.beta}/global_discriminator_{epoch}.pth")

            end_time = time.time()
            print('-------------------------------------------------------------------')
            print(f'Global Epoch {epoch} cost {end_time-begin_time}s')
        # with open(f'{self.config.dataset}_{self.config.aggregation}_'
        #           f'{self.config.partition}_{self.config.model}_'
        #           f'{self.config.beta}.json', 'w') as f:
        #     json.dump(self.loss_data, f)

    def local_train(self, choose_user_ids):
        """
        本地客户端训练
        :param choose_user_ids:
        :return:
        """
        for client_id in choose_user_ids:
            generator = self.local_generators[client_id]
            discriminator = self.local_discriminators[client_id]

            generator.train()
            discriminator.train()

            generator.to(self.config.device)
            discriminator.to(self.config.device)
            optimizer_G = torch.optim.Adam(generator.parameters(),
                                           lr=self.config.lr, betas=(self.config.b1, self.config.b2))
            optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                           lr=self.config.lr, betas=(self.config.b1, self.config.b2))

            train_loader = DataLoader(DatasetSplit(self.train_dataset, self.user_groups[client_id]),
                                      batch_size=self.config.local_bs, shuffle=True, num_workers=4)
            pbar = tqdm(range(self.config.local_ep), desc=f'LocalTrain-{client_id}', unit='item')
            for i, epoch in enumerate(pbar):
                for idx, (imgs, labels) in enumerate(train_loader):
                    imgs = imgs.to(self.config.device)
                    labels = labels.to(self.config.device)

                    batch_size = imgs.shape[0]

                    # Adversarial ground truths
                    valid = Variable(torch.cuda.FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
                    fake = Variable(torch.cuda.FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)

                    # Configure input
                    real_imgs = Variable(imgs.type(torch.cuda.FloatTensor))
                    labels = Variable(labels.type(torch.cuda.LongTensor))

                    # -----------------
                    #  Train Generator
                    # -----------------

                    optimizer_G.zero_grad()

                    # Sample noise and labels as generator input
                    z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (batch_size, self.latent_dim)), device=self.config.device))
                    gen_labels = Variable(torch.cuda.LongTensor(np.random.randint(0, self.n_classes, batch_size), device=self.config.device))

                    # Generate a batch of images
                    gen_imgs = generator(z, gen_labels)

                    # Loss measures generator's ability to fool the discriminator
                    validity, pred_label = discriminator(gen_imgs)
                    g_loss = 0.5 * (self.adversarial_loss(validity, valid) + self.auxiliary_loss(pred_label, gen_labels))

                    g_loss.backward()
                    optimizer_G.step()

                    # ---------------------
                    #  Train Discriminator
                    # ---------------------

                    optimizer_D.zero_grad()

                    # Loss for real images
                    real_pred, real_aux = discriminator(real_imgs)
                    d_real_loss = (self.adversarial_loss(real_pred, valid) + self.auxiliary_loss(real_aux, labels)) / 2

                    # Loss for fake images
                    fake_pred, fake_aux = discriminator(gen_imgs.detach())
                    d_fake_loss = (self.adversarial_loss(fake_pred, fake) + self.auxiliary_loss(fake_aux, gen_labels)) / 2

                    # Total discriminator loss
                    d_loss = (d_real_loss + d_fake_loss) / 2

                    # Calculate discriminator accuracy
                    pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
                    gt = np.concatenate([labels.data.cpu().numpy(), gen_labels.data.cpu().numpy()], axis=0)
                    d_acc = np.mean(np.argmax(pred, axis=1) == gt)

                    d_loss.backward()
                    optimizer_D.step()

                    # print(
                    #     "[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
                    #     % (epoch, self.config.local_ep, i, len(train_loader), d_loss.item(), 100 * d_acc, g_loss.item())
                    # )

    def get_optimizer(self, model):
        optimizer = None
        if self.config.optimizer == "SGD":
            optimizer = torch.optim.SGD(model.parameters(), lr=self.config.lr, momentum=self.config.momentum)

        return optimizer

    def sample_image(self, n_row, epoch):
        """Saves a grid of generated digits ranging from 0 to n_classes"""
        self.global_generator.eval()
        # Sample noise
        z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (n_row ** 2, self.latent_dim))))
        # Get labels ranging from 0 to n_classes for n rows
        labels = np.array([num for _ in range(n_row) for num in range(n_row)])
        labels = Variable(torch.cuda.LongTensor(labels))
        gen_imgs = self.global_generator(z, labels)

        if not os.path.exists(f'images/{self.config.dataset}/beta_{self.config.beta}'):
            # 目录不存在，创建目录
            os.makedirs(f'images/{self.config.dataset}/beta_{self.config.beta}')
        save_image(gen_imgs.data, f"images/{self.config.dataset}/beta_{self.config.beta}/{epoch}.png", nrow=n_row, normalize=True)


def weights_init_normal(m):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find("BatchNorm2d") != -1:
        torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
        torch.nn.init.constant_(m.bias.data, 0.0)


class Generator(nn.Module):
    def __init__(self, n_classes, latent_dim, img_size, channels):
        super(Generator, self).__init__()

        self.label_emb = nn.Embedding(n_classes, latent_dim)

        self.init_size = img_size // 4  # Initial size before upsampling
        self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))

        self.conv_blocks = nn.Sequential(
            nn.BatchNorm2d(128),
            nn.Upsample(scale_factor=2),
            nn.Conv2d(128, 128, 3, stride=1, padding=1),
            nn.BatchNorm2d(128, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Upsample(scale_factor=2),
            nn.Conv2d(128, 64, 3, stride=1, padding=1),
            nn.BatchNorm2d(64, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, channels, 3, stride=1, padding=1),
            nn.Tanh(),
        )

    def forward(self, noise, labels):
        gen_input = torch.mul(self.label_emb(labels), noise)
        out = self.l1(gen_input)
        out = out.view(out.shape[0], 128, self.init_size, self.init_size)
        img = self.conv_blocks(out)
        return img


class Discriminator(nn.Module):
    def __init__(self, n_classes, img_size, channels):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, bn=True):
            """Returns layers of each discriminator block"""
            block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
            if bn:
                block.append(nn.BatchNorm2d(out_filters, 0.8))
            return block

        self.conv_blocks = nn.Sequential(
            *discriminator_block(channels, 16, bn=False),
            *discriminator_block(16, 32),
            *discriminator_block(32, 64),
            *discriminator_block(64, 128),
        )

        # The height and width of downsampled image
        # ds_size = img_size // 2 ** 4
        ds_size = 2

        # Output layers
        self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
        self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, n_classes), nn.Softmax())

    def forward(self, img):
        out = self.conv_blocks(img)
        out = out.view(out.shape[0], -1)
        validity = self.adv_layer(out)
        label = self.aux_layer(out)

        return validity, label