import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from model.Gan_attack import Generator, Discriminator
import datetime
import csv
import numpy as np

from noise_layers.jpeg import Jpeg, JpegSS
from noise_layers.gaussian_noise import Gaussian_Noise
from noise_layers.quantization import Quantization
from noise_layers.salt_pepper_noise import SP

def train(generator, discriminator, train_loader, test_loader, num_epochs, model_dir, log_file, save_image_dir):

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    mse_loss = nn.MSELoss().to(device)
    bce_loss = nn.BCEWithLogitsLoss().to(device)
    # bce_loss = nn.BCELoss().to(device)
    l1_loss = nn.L1Loss().to(device)
    generator.to(device)
    discriminator.to(device)

    optimizer_generator = optim.Adam(generator.parameters())
    optimizer_discriminator = optim.Adam(discriminator.parameters())

    # optimizer_generator = optim.Adam(generator.parameters(), lr=0.001, betas=(0.5, 0.999))
    # optimizer_discriminator = optim.Adam(discriminator.parameters(), lr=0.001, betas=(0.5, 0.999))

    # jpeg = Jpeg().to(device)
    gaussian = Gaussian_Noise().to(device)
    # quantization = Quantization().to(device)
    # salt = SP().to(device)

    best_d_loss = 10
    best_g_loss = 10

    with open(log_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(["Epoch", "tra-G-Loss", "tra-D-Loss", "tes-G-Loss", "tes-D-Loss"])

        for epoch in range(num_epochs):
            is_first_save = True
            train_d_loss = 0.0
            train_g_loss = 0.0
            test_d_loss = 0.0
            test_g_loss = 0.0
            print("#######Train D-G epoch: ", epoch, "#######")
            # 训练阶段
            with torch.enable_grad():
                """
                    training Discriminator
                """
                print("Train discriminator:")
                for i in range(5):
                    loss_discriminator_ = 0
                    for train_images, _ in train_loader:
                        train_images = train_images.to(device)
                        matrix = torch.Tensor(np.random.choice([0, 1], size=[1, 128, 128])).to(device)
                        matrix = matrix.unsqueeze(0).expand(train_images.size(0), 1, 128, 128)
                        # noise_images = jpeg(train_images)
                        noise_images = gaussian(train_images)
                        real_labels = torch.ones(noise_images.size(0)).to(device)
                        fake_labels = torch.zeros(noise_images.size(0)).to(device)

                        # 训练Discriminator
                        optimizer_discriminator.zero_grad()
                        outputs_real = discriminator(noise_images)
                        loss_real = bce_loss(outputs_real, real_labels)
                        # loss_real.backward()

                        # 生成假图像并计算生成器损失
                        fake_images = generator(train_images, matrix)
                        outputs_fake = discriminator(fake_images)
                        loss_fake = bce_loss(outputs_fake, fake_labels)

                        # loss_fake.backward()
                        loss_discriminator = (loss_real + loss_fake) / 2
                        # loss_discriminator += (loss_real.item() + loss_fake.item()) / 20
                        loss_discriminator.backward()
                        loss_discriminator_ += loss_discriminator.item() / 5
                        optimizer_discriminator.step()
                    print("D-epoch: ", i, "loss: ", loss_discriminator_)
                """
                    training generator
                """
                print("Train generator:")
                for i in range(5):
                    loss_generator_ = 0
                    for train_images, _ in train_loader:
                        train_images = train_images.to(device)
                        # 训练Generator
                        optimizer_generator.zero_grad()
                        matrix = torch.Tensor(np.random.choice([0, 1], size=[1, 128, 128])).to(device)
                        matrix = matrix.unsqueeze(0).expand(train_images.size(0), 1, 128, 128)
                        fake_images = generator(train_images, matrix)
                        noise_images = gaussian(train_images)
                        real_labels = torch.ones(fake_images.size(0)).to(device)
                        outputs_fake = discriminator(fake_images)
                        loss_generator = bce_loss(outputs_fake, real_labels)
                        loss_image = l1_loss(noise_images, fake_images)
                        # mse_loss_generator = mse_loss(fake_images, train_images)
                        # loss_generator += mse_loss_generator
                        loss_generator += loss_image * 0.01
                        loss_generator.backward()
                        loss_generator_ += loss_generator.item() / 5
                        optimizer_generator.step()
                    print("G-epoch: ", i, "loss: ", loss_generator_)
                train_g_loss += loss_generator_
                train_d_loss += loss_discriminator_

            train_epoch_gloss = train_g_loss / len(train_loader)
            train_epoch_dloss = train_d_loss / len(train_loader)

            # 测试阶段
            with torch.no_grad():
                for test_images, _ in test_loader:
                    test_images = test_images.to(device)
                    # noise_images = jpeg(test_images)
                    noise_images = gaussian(test_images)
                    real_labels = torch.ones(noise_images.size(0)).to(device)
                    fake_labels = torch.zeros(noise_images.size(0)).to(device)

                    outputs_real = discriminator(noise_images.detach())
                    loss_real = bce_loss(outputs_real, real_labels)

                    matrix = torch.Tensor(np.random.choice([0, 1], size=[1, 128, 128])).to(device)
                    matrix = matrix.unsqueeze(0).expand(test_images.size(0), 1, 128, 128)
                    fake_images = generator(test_images, matrix)

                    outputs_fake = discriminator(fake_images.detach())
                    loss_fake = bce_loss(outputs_fake, fake_labels)
                    loss_discriminator = (loss_real + loss_fake) / 2

                    outputs_fake = discriminator(fake_images)
                    loss_generator = bce_loss(outputs_fake, real_labels)

                    if is_first_save:  # 只保存第一个批次的图像
                        is_first_save = False
                        save_images(test_images.cpu()[:8, :, :, :],
                                  noise_images[:8, :, :, :].cpu(), fake_images[:8, :, :, :].cpu(), epoch, save_image_dir, normalize)

                    test_g_loss += loss_generator
                    test_d_loss += loss_discriminator

            test_epoch_gloss = test_g_loss / len(test_loader)
            test_epoch_dloss = test_d_loss / len(test_loader)

            # 格式化损失为保留4位小数的字符串
            train_gloss_str = "{:.4f}".format(train_epoch_gloss)
            train_dloss_str = "{:.4f}".format(train_epoch_dloss)
            test_gloss_str = "{:.4f}".format(test_epoch_gloss)
            test_dloss_str = "{:.4f}".format(test_epoch_dloss)

            writer.writerow([epoch+1, train_gloss_str, train_dloss_str, test_gloss_str, test_dloss_str])

            print(f"Epoch [{epoch + 1}/{num_epochs}], Train G Loss: {train_epoch_gloss:.4f}, Train D Loss: {train_epoch_dloss:.4f}, Test G Loss: {test_epoch_gloss:.4f}, Test D Loss: {test_epoch_dloss:.4f}")


            # 保存最佳模型
            if test_epoch_dloss < best_d_loss:
                best_d_loss = test_epoch_dloss
                save_path = os.path.join(model_dir, "dis_best_model.pth")  # 模型保存路径
                torch.save(discriminator.state_dict(), save_path)  # 保存当前最佳模型权重

            if test_epoch_gloss < best_g_loss:
                best_g_loss = test_epoch_gloss
                save_path = os.path.join(model_dir, "gen_best_model.pth")  # 模型保存路径
                torch.save(generator.state_dict(), save_path)  # 保存当前最佳模型权重


    print(f"Training log saved to {log_file}.")
    print("Training finished.")

def save_images(original_images, noise_images, attack_images, epoch, folder, normalize):
    images = original_images[:original_images.shape[0], :, :, :].cpu()
    noise_images = noise_images[:noise_images.shape[0], :, :, :].cpu()
    attack_images = attack_images[:attack_images.shape[0], :, :, :].cpu()

    # scale values to range [0, 1] from original range of [-1, 1]
    if normalize:
        images = (images + 1) / 2
        noise_images = (noise_images + 1) / 2
        attack_images = (attack_images + 1) / 2

    revise_image = torch.abs(images - noise_images) * 5
    revise_image_ = torch.abs(images - attack_images) * 5

    stacked_images = torch.cat([images, noise_images, attack_images, revise_image, revise_image_], dim=0)
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch + 1))
    # torchvision.utils.save_image(stacked_images, filename=filename, original_images.shape[0], normalize=False)
    torchvision.utils.save_image(stacked_images, filename, normalize=False)


if __name__ == '__main__':
    generator = Generator()
    discriminator = Discriminator(hidden_size=64)

    file_dir = "attack_runs"
    if not os.path.exists(file_dir):
        os.makedirs(file_dir)

    # 获取当前日期和时间
    current_time = datetime.datetime.now()
    # 格式化日期和时间
    time_str = current_time.strftime("%Y-%m-%d_%H-%M-%S")

    name = "run-Gaussian-no-normalize" + "_" + time_str
    train_dir = os.path.join(file_dir, name)
    os.makedirs(train_dir, exist_ok=True)

    train_data_dir = "data/train"  # 训练数据
    test_data_dir = "data/val"     # 测试数据
    batch_size = 32
    num_epochs = 40000

    model_dir = "model"
    model_dir = os.path.join(train_dir, model_dir)
    os.makedirs(model_dir, exist_ok=True)

    log_dir = "log"
    log_dir = os.path.join(train_dir, log_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    log_file = os.path.join(log_dir, "log.csv")

    save_image_dir = "image"
    save_image_dir = os.path.join(train_dir, save_image_dir)
    os.makedirs(save_image_dir, exist_ok=True)

    normalize = False
    image_size = 128
    # 定义图像预处理的转换操作
    data_transforms = {
        'train': transforms.Compose([
            transforms.RandomCrop((image_size, image_size), pad_if_needed=True),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) if normalize else transforms.Compose([]),
        ]),
        'test': transforms.Compose([
            transforms.CenterCrop((image_size, image_size)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) if normalize else transforms.Compose([]),
        ])
    }

    # 创建ImageFolder数据集实例
    train_dataset = ImageFolder(train_data_dir, transform=data_transforms['train'])
    test_dataset = ImageFolder(test_data_dir, transform=data_transforms['test'])

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

    train(generator, discriminator, train_loader, test_loader, num_epochs, model_dir, log_file, save_image_dir)