import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
from models import TransformerGenerator, Discriminator
import matplotlib.pyplot as plt
import torchvision
import random

class FogDataset(Dataset):
    def __init__(self, data_dir, split='train', img_size=256):
        self.data_dir = data_dir
        self.split = split
        self.img_size = img_size
        self.clear_dir = os.path.join(data_dir, split, 'clear')
        self.fog_dir = os.path.join(data_dir, split, 'fog')

        # 获取清晰图像和对应的有雾图像
        self.clear_images = os.listdir(self.clear_dir)
        self.fog_images = {}
        
        for clear_image in self.clear_images:
            base_name = clear_image.split('.')[0]
            self.fog_images[clear_image] = [f for f in os.listdir(self.fog_dir) if base_name in f]

    def __len__(self):
        return len(self.clear_images)

    def __getitem__(self, idx):
        clear_img_path = os.path.join(self.clear_dir, self.clear_images[idx])
        fog_img_paths = self.fog_images[self.clear_images[idx]]
        
        # 随机选择一个有雾图像
        fog_img_path = os.path.join(self.fog_dir, random.choice(fog_img_paths))
        
        clear_img = Image.open(clear_img_path).convert('RGB')
        fog_img = Image.open(fog_img_path).convert('RGB')

        if self.split == 'train':
            if np.random.random() < 0.5:
                clear_img = clear_img.transpose(Image.FLIP_LEFT_RIGHT)
                fog_img = fog_img.transpose(Image.FLIP_LEFT_RIGHT)

        clear_img = clear_img.resize((self.img_size, self.img_size))
        fog_img = fog_img.resize((self.img_size, self.img_size))

        clear_img = torchvision.transforms.ToTensor()(clear_img)
        fog_img = torchvision.transforms.ToTensor()(fog_img)
        
        return clear_img, fog_img  # 清晰图像作为输入，雾霾图像作为目标输出

def train_model():
    IMG_SIZE = 256
    BATCH_SIZE = 8
    LR = 0.0002  # 降低初始学习率
    EPOCHS = 500
    CHECKPOINT_DIR = 'checkpoints_true'
    LOG_DIR = 'logs'
    nhead = 8
    in_channels = 3
    
    # 初始化模型
    generator = TransformerGenerator(in_channels=in_channels, out_channels=3, nhead=nhead).cuda()
    discriminator = Discriminator(in_channels=3).cuda()

    # 数据加载
    train_dataset = FogDataset('data', split='train', img_size=IMG_SIZE)
    val_dataset = FogDataset('data', split='val', img_size=IMG_SIZE)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)

    # 损失函数
    criterion_gan = nn.MSELoss()
    criterion_l1 = nn.L1Loss()

    # 优化器
    d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=LR, betas=(0.5, 0.999))
    g_optimizer = torch.optim.Adam(generator.parameters(), lr=LR, betas=(0.5, 0.999))

    # 学习率调度
    scheduler_g = torch.optim.lr_scheduler.StepLR(g_optimizer, step_size=30, gamma=0.1)
    scheduler_d = torch.optim.lr_scheduler.StepLR(d_optimizer, step_size=30, gamma=0.1)

    os.makedirs(CHECKPOINT_DIR, exist_ok=True)

    # 训练记录
    train_loss_g = []
    train_loss_d = []
    val_psnr = []
    val_ssim = []

    def compute_metrics(gen, real):
        """计算PSNR和SSIM"""
        mse = torch.mean((gen - real) ** 2)
        psnr = 10 * torch.log10(1 / mse)
        
        # 简化SSIM计算（建议使用torchmetrics或完整实现）
        mu_x = torch.mean(gen)
        mu_y = torch.mean(real)
        sigma_x = torch.std(gen)
        sigma_y = torch.std(real)
        sigma_xy = torch.mean((gen - mu_x) * (real - mu_y))
        ssim = (2 * mu_x * mu_y + 1e-6) * (2 * sigma_xy + 1e-6) / \
               ((mu_x**2 + mu_y**2 + 1e-6) * (sigma_x**2 + sigma_y**2 + 1e-6))
        return psnr.item(), ssim.item()

    for epoch in range(EPOCHS):
        generator.train()
        discriminator.train()
        epoch_loss_g = 0
        epoch_loss_d = 0

        for i, (clear, fog) in enumerate(train_loader):
            clear = clear.cuda()
            fog = fog.cuda()

            # 训练判别器
            d_optimizer.zero_grad()
            
            with torch.no_grad():
                fake = generator(clear)
            
            real_pred = discriminator(fog)
            fake_pred = discriminator(fake.detach())
            
            loss_real = criterion_gan(real_pred, torch.ones_like(real_pred))
            loss_fake = criterion_gan(fake_pred, torch.zeros_like(fake_pred))
            loss_d = (loss_real + loss_fake) * 0.5
            loss_d.backward()
            d_optimizer.step()

            # 训练生成器
            g_optimizer.zero_grad()
            
            fake = generator(clear)
            gen_pred = discriminator(fake)
            
            loss_g_gan = criterion_gan(gen_pred, torch.ones_like(gen_pred))
            loss_g_l1 = criterion_l1(fake, fog) * 100  # 增加L1权重
            loss_g = loss_g_gan + loss_g_l1
            
            loss_g.backward()
            g_optimizer.step()

            # 累计损失
            epoch_loss_d += loss_d.item()
            epoch_loss_g += loss_g.item()

            if i % 100 == 0:
                print(f'Epoch [{epoch+1}/{EPOCHS}], Step [{i}/{len(train_loader)}], '
                      f'LossD: {loss_d.item():.4f}, LossG: {loss_g.item():.4f}')

        # 记录每个epoch的平均损失
        avg_loss_g = epoch_loss_g / len(train_loader)
        avg_loss_d = epoch_loss_d / len(train_loader)
        train_loss_g.append(avg_loss_g)
        train_loss_d.append(avg_loss_d)

        # 验证
        generator.eval()
        epoch_psnr = 0
        epoch_ssim = 0
        with torch.no_grad():
            for val_clear, val_fog in val_loader:
                val_clear = val_clear.cuda()
                val_fog = val_fog.cuda()
                generated = generator(val_clear)
                
                psnr, ssim = compute_metrics(generated, val_fog)
                epoch_psnr += psnr
                epoch_ssim += ssim

        avg_psnr = epoch_psnr / len(val_loader)
        avg_ssim = epoch_ssim / len(val_loader)
        val_psnr.append(avg_psnr)
        val_ssim.append(avg_ssim)
        print(f'Epoch [{epoch+1}/{EPOCHS}], Val PSNR: {avg_psnr:.2f} dB, SSIM: {avg_ssim:.4f}')

        # 保存模型和绘制曲线
        if (epoch + 1) % 20 == 0 or epoch == 0:
            torch.save(generator.state_dict(), os.path.join(CHECKPOINT_DIR, f'generator_epoch_{epoch+1}.pth'))
            torch.save(discriminator.state_dict(), os.path.join(CHECKPOINT_DIR, f'discriminator_epoch_{epoch+1}.pth'))

            # 绘制损失曲线
            plt.figure(figsize=(15, 5))
            
            plt.subplot(131)
            plt.plot(range(1, epoch+2), train_loss_g, label='Generator Loss')
            plt.plot(range(1, epoch+2), train_loss_d, label='Discriminator Loss')
            plt.title('Training Loss')
            plt.legend()
            
            plt.subplot(132)
            plt.plot(range(1, epoch+2), val_psnr, label='PSNR')
            plt.title('Validation PSNR')
            
            plt.subplot(133)
            plt.plot(range(1, epoch+2), val_ssim, label='SSIM')
            plt.title('Validation SSIM')
            
            plt.tight_layout()
            plt.savefig(os.path.join(CHECKPOINT_DIR, f'training_metrics_epoch_{epoch+1}.png'))
            plt.close()

        # 更新学习率
        scheduler_g.step()
        scheduler_d.step()
        print(f'Epoch {epoch+1} | G_lr: {g_optimizer.param_groups[0]["lr"]:.6f}, D_lr: {d_optimizer.param_groups[0]["lr"]:.6f}')

