import argparse
import os
import random
from datetime import datetime
from os.path import join

import torch
from scipy.linalg import sqrtm
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.models import inception_v3
from tqdm import tqdm

from ALE.data.dataset import Dataset
from ALE.models.confg_model import config_model
from common_utils.Evaluator import Evaluator
from common_utils.utils import load_args
import numpy as np
from common_models import gan
device = 'cuda' if torch.cuda.is_available() else 'cpu'


# 生成器模型
class Generator(nn.Module):
    def __init__(self, latent_dim, output_dim):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(latent_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Linear(512, output_dim),
            nn.Sigmoid()
        )

    def forward(self, z):
        return self.model(z)
# 判别器模型
class Discriminator(nn.Module):
    def __init__(self, input_dim):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_dim, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.model(x)
# GAN模型
class GAN(nn.Module):
    def __init__(self, generator, discriminator):
        super(GAN, self).__init__()
        self.generator = generator
        self.discriminator = discriminator

    def forward(self, z):
        generated_data = self.generator(z)
        validity = self.discriminator(generated_data)
        return generated_data, validity


# 计算FID
def calculate_fid(real_data, generated_data, batch_size, latent_dim, generator):
    # 计算真实数据的Inception特征
    inception_model = inception_v3(pretrained=True, transform_input=False)
    inception_model.eval()
    inception_model.fc = nn.Identity()

    real_features = []
    for i in range(0, real_data.shape[0], batch_size):
        batch = Variable(torch.FloatTensor(real_data[i:i + batch_size]))
        real_features.append(inception_model(batch).detach().numpy())

    real_features = np.concatenate(real_features, axis=0)

    # 计算生成数据的Inception特征
    generated_features = []
    for i in range(0, generated_data.shape[0], batch_size):
        noise = Variable(torch.FloatTensor(np.random.normal(0, 1, (batch_size, latent_dim))))
        generated_batch = generator(noise).detach()
        generated_features.append(inception_model(generated_batch).detach().numpy())

    generated_features = np.concatenate(generated_features, axis=0)

    # 计算FID
    mu_real, sigma_real = np.mean(real_features, axis=0), np.cov(real_features, rowvar=False)
    mu_generated, sigma_generated = np.mean(generated_features, axis=0), np.cov(generated_features, rowvar=False)

    fid = calculate_frechet_distance(mu_real, sigma_real, mu_generated, sigma_generated)

    return fid


# 计算Fréchet距离
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
    ssdif = np.sum((mu1 - mu2) ** 2.0)
    covmean = sqrtm(sigma1.dot(sigma2))

    if np.iscomplexobj(covmean):
        covmean = covmean.real

    fid = ssdif + np.trace(sigma1 + sigma2 - 2.0 * covmean)
    return fid


# 训练GAN
def train_gan(generator, discriminator, gan, epochs, batch_size, latent_dim, known_data, unknown_data):
    criterion = nn.MSELoss()
    optimizer_G = optim.Adam(generator.parameters(), lr=0.0002)
    optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0002)

    for epoch in range(epochs):
        for _ in tqdm(range(known_data.shape[0] // batch_size)):
            # 训练判别器
            optimizer_D.zero_grad()

            real_data = Variable(torch.FloatTensor(known_data[np.random.randint(0, known_data.shape[0], batch_size)]))
            labels_real = Variable(torch.ones(batch_size, 1))
            validity_real = discriminator(real_data)
            loss_real = criterion(validity_real, labels_real)

            noise = Variable(torch.FloatTensor(np.random.normal(0, 1, (batch_size, latent_dim))))
            generated_data = generator(noise)
            labels_fake = Variable(torch.zeros(batch_size, 1))
            validity_fake = discriminator(generated_data.detach())
            loss_fake = criterion(validity_fake, labels_fake)

            d_loss = (loss_real + loss_fake) / 2
            d_loss.backward()
            optimizer_D.step()

            # 训练生成器
            optimizer_G.zero_grad()

            # 使用未知类别数据生成样本
            noise_unknown = Variable(torch.FloatTensor(np.random.normal(0, 1, (batch_size, latent_dim))))
            generated_unknown_data = generator(noise_unknown)

            # 计算生成器损失，使用均方误差损失
            validity_known, _ = gan(noise)
            validity_unknown, _ = gan(noise_unknown)

            # 使用反向标签，使得生成器欺骗判别器
            labels_fake_g = Variable(torch.ones(batch_size, 1))
            g_loss_known = criterion(validity_known, labels_fake_g)
            g_loss_unknown = criterion(validity_unknown, labels_fake_g)

            g_loss = g_loss_known + g_loss_unknown

            g_loss.backward()
            optimizer_G.step()

        # 每个epoch计算一次FID
        noise_fid_known = Variable(torch.FloatTensor(np.random.normal(0, 1, (known_data.shape[0], latent_dim))))
        noise_fid_unknown = Variable(torch.FloatTensor(np.random.normal(0, 1, (unknown_data.shape[0], latent_dim))))
        generated_data_fid_known = generator(noise_fid_known).detach().numpy()
        generated_data_fid_unknown = generator(noise_fid_unknown).detach().numpy()

        generated_data_fid = np.concatenate([generated_data_fid_known, generated_data_fid_unknown], axis=0)

        fid = calculate_fid(np.concatenate([known_data, unknown_data], axis=0), generated_data_fid, batch_size,
                            latent_dim, generator)
        print(f"Epoch {epoch}, D Loss: {d_loss.item()}, G Loss: {g_loss.item()}, FID: {fid}")


# 示例数据
# 这里假设有一些训练数据，包含部分已知类别和未知类别的样本
# 这些未知类别的样本将由生成器生成
num_known_classes = 20
num_unknown_classes = 10
num_features = 100

known_data = np.random.rand(100, num_features)
unknown_data = np.random.rand(50, num_features)

# 参数设置
latent_dim = 50
generator = Generator(latent_dim, num_features)
discriminator = Discriminator(num_features)
gan = GAN(generator, discriminator)

# 训练GAN并计算FID
train_gan(generator, discriminator, gan, epochs=50, batch_size=32, latent_dim=latent_dim, known_data=known_data,
          unknown_data=unknown_data)
