import torch
import torch.nn as nn
from torch.autograd import grad
# from pytorch_msssim import ms_ssim
import os
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
from dataset2 import MyDataset
from model2 import MyModel, GAN
from PerceptualLoss import PerceptualLossFn
from pred2 import prediction

class WGANGP:
    def __init__(self, device, lambda_gp=10):
        self.device = device
        self.lambda_gp = lambda_gp
        
    def discriminator_loss(self, real_output, fake_output, real_samples, fake_samples, mask, discriminator):
        # 基本的Wasserstein距离
        d_loss = -torch.mean(real_output) + torch.mean(fake_output)
        
        # 计算梯度惩罚
        # 在真实和生成样本之间随机插值
        alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
        interpolates = alpha * real_samples + (1 - alpha) * fake_samples
        interpolates.requires_grad_(True)
        
        # 计算判别器对插值样本的输出
        d_interpolates = discriminator(interpolates, mask)
        
        # 计算梯度
        fake_outputs = torch.ones_like(d_interpolates, device=self.device, requires_grad=False)
        gradients = torch.autograd.grad(
            outputs=d_interpolates,
            inputs=interpolates,
            grad_outputs=fake_outputs,
            create_graph=True,
            retain_graph=True,
            only_inputs=True
        )[0]
        
        # 计算梯度L2范数
        gradients = gradients.view(gradients.size(0), -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
        
        # 添加梯度惩罚项
        d_loss = d_loss + self.lambda_gp * gradient_penalty
        
        return d_loss
    
    def generator_loss(self, fake_output):
        # 与WGAN相同
        g_loss = -torch.mean(fake_output)
        return g_loss

class NonSaturatingGANLoss:
    def __init__(self, device):
        self.device = device
        self.criterion = nn.BCELoss()
        
    def discriminator_loss(self, real_output, fake_output):
        # 与原始GAN相同
        real_labels = torch.ones_like(real_output, device=self.device)
        fake_labels = torch.zeros_like(fake_output, device=self.device)
        
        real_loss = self.criterion(real_output, real_labels)
        fake_loss = self.criterion(fake_output, fake_labels)
        
        d_loss = real_loss + fake_loss
        return d_loss
    
    def generator_loss(self, fake_output):
        # 非饱和损失：直接最大化log(D(G(z)))
        target_labels = torch.ones_like(fake_output, device=self.device)
        # 注意这里使用的是相同的BCE损失，但目标是让D将G(z)判断为真
        g_loss = self.criterion(fake_output, target_labels)
        return g_loss


if __name__ == '__main__':
    img_rootPath = r'../JPEGImages'
    saveModel_dir = r'checkpoints'
    verification = r'verification'
    perceptualLoss_weight = r'vgg16-397923af.pth'
    load_model = r''
    epoch = 1000
    batch_size = 16
    d_lr = 1e-5
    g_lr = 1e-3
    
        
    train_ds = MyDataset(img_rootPath, 'train', 0.98)
    test_ds = MyDataset(img_rootPath, 'test', 0.98)
    train_dl = DataLoader(train_ds, shuffle=True, batch_size=batch_size, drop_last=True)
    test_dl = DataLoader(test_ds, shuffle=True, batch_size=batch_size, drop_last=True)


    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MyModel()
    if load_model:
        model.load_state_dict(torch.load(load_model), strict=False)
    model.to(device)
    gan = GAN()
    gan.to(device)

    optim = torch.optim.Adam(model.parameters(), g_lr, betas=(0.5, 0.999))
    gan_optim = torch.optim.Adam(gan.parameters(), d_lr, betas=(0.5, 0.999))
    # loss_fn = nn.L1Loss()
    perceptualLoss_fn = PerceptualLossFn(perceptualLoss_weight, device)
    gan_loss_fn = WGANGP(device)
    maxAcc_test = 0
    
    for e in range(epoch):
        model.train()
        with tqdm(train_dl) as train_dl_buffer:
            
            gan_total_loss = 0
            main_total_loss = 0
            for batch, (img, mask, target) in enumerate(train_dl_buffer):
                img = img.to(device)
                mask = mask.to(device)
                target = target.to(device)

                # gan对抗器训练
                with torch.no_grad():
                    out_img = model(img, mask)

                # local_target = []
                # local_out_img = []
                # for target_batch, out_img_batch, local_area_batch in zip(target, out_img, local_area):
                #     x1, x2, y1, y2 = local_area_batch
                #     local_target.append(target_batch[:, y1:y2, x1:x2])
                #     local_out_img.append(out_img_batch[:, y1:y2, x1:x2])
                # local_target = torch.stack(local_target, dim=0)
                # local_out_img = torch.stack(local_out_img, dim=0)

                real = gan(target, mask)
                fake = gan(out_img, mask)

                loss = gan_loss_fn.discriminator_loss(real, fake, target, out_img, mask, gan)
                
                gan_optim.zero_grad()
                loss.backward()
                gan_optim.step()
                gan_total_loss += loss.item()

                # gan生成器训练 ---------------------------------------------------------------------------------#
                out_img = model(img, mask)

                # local_target = []
                # local_out_img = []
                # for target_batch, out_img_batch, local_area_batch in zip(target, out_img, local_area):
                #     x1, x2, y1, y2 = local_area_batch
                #     local_target.append(target_batch[:, y1:y2, x1:x2])
                #     local_out_img.append(out_img_batch[:, y1:y2, x1:x2])
                # local_target = torch.stack(local_target, dim=0)
                # local_out_img = torch.stack(local_out_img, dim=0)

                fake = gan(out_img, mask)
                # mse_loss = loss_fn(out_img*mask, target*mask)
                perceptual_loss = perceptualLoss_fn(out_img, target)
                gan_loss = gan_loss_fn.generator_loss(fake)
                
                loss = 5e-5*gan_loss + perceptual_loss

                optim.zero_grad()
                gan_optim.zero_grad()
                loss.backward()
                optim.step()
                main_total_loss += loss.item()
                
                train_dl_buffer.set_description("Epoch {}: mainloss {:.8f} ganloss {:.8f}".format(e+1, main_total_loss/(batch+1), gan_total_loss/(batch+1)))

        # 保存模型 
        path = os.path.join(saveModel_dir, 'checkpoint.pth')
        torch.save(model.state_dict(), path)
        print("保存模型===》{}".format(path))

        # # 模型测试
        # total_acc = 0
        # model.eval()
        # with tqdm(test_dl) as test_dl_buffer:
        #     for batch, (img, mask, target) in enumerate(test_dl_buffer):

        #         with torch.no_grad():
        #             out = model(img.to(device), mask.to(device))
        #             acc = ms_ssim(out, target.to(device), data_range=1, size_average=True)
                
        #         total_acc += acc.item()
        #         batch += 1
        #         test_dl_buffer.set_description("Epoch {}: testAcc {:.4f}".format(e+1, total_acc/batch))
        
        # if total_acc/batch > maxAcc_test:
        #     maxAcc_test = total_acc/batch
        #     path = os.path.join(saveModel_dir, 'best_test.pth')
        #     torch.save(model.state_dict(), path)
        #     print("保存模型===》{}".format(path))
        
        for i in range(10):
            img_path = test_ds.img_path_arr[random.randint(0, test_ds.__len__()-1)]
            save_path = os.path.join(verification, f'{i}.jpg')
            prediction(model, img_path, save_path, device)