import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import sys
import matplotlib.pyplot as plt
from PIL import Image
from skimage.color import rgb2lab
from torchvision import transforms

# 添加项目根目录到路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from models.colorization_model import ColorizationGenerator, ColorizationDiscriminator
from utils.data_utils import get_data_loaders, lab_to_rgb, save_checkpoint, load_checkpoint

def get_fixed_samples(val_dir, device, img_size=256, num_samples=3):
    """
    从samples目录获取固定的样本图像
    
    参数:
        val_dir (str): 验证集目录（不再使用）
        device: 设备
        img_size (int): 图像大小
        num_samples (int): 样本数量
    
    返回:
        samples: 样本列表，每个样本包含(L, ab_real)
    """
    # 使用固定的samples目录
    samples_dir = os.path.join(os.path.dirname(val_dir), "samples")
    print(f"Using fixed sample directory: {samples_dir}")
    
    # 获取样本目录中的所有彩色图像
    color_images = []
    for file in sorted(os.listdir(samples_dir)):
        if file.endswith(('.jpg', '.jpeg', '.png')) and not file.endswith('_gray.jpg'):
            color_images.append(file)
    
    transform = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.ToTensor(),
    ])
    
    samples = []
    for img_file in color_images:
        # 读取彩色图像
        color_path = os.path.join(samples_dir, img_file)
        color_img = Image.open(color_path).convert('RGB')
        color_tensor = transform(color_img)
        
        # 转换为LAB颜色空间
        img_lab = rgb2lab(color_tensor.permute(1, 2, 0).numpy())
        
        # 分离L通道和ab通道
        L = img_lab[:, :, 0] / 50.0 - 1.0  # 归一化到[-1, 1]
        ab = img_lab[:, :, 1:] / 110.0  # 归一化到[-1, 1]
        
        # 转换为张量
        L = torch.from_numpy(L).unsqueeze(0).float()
        ab = torch.from_numpy(ab).permute(2, 0, 1).float()
        
        samples.append((L, ab))
    
    return samples

def train(args):
    """
    训练灰度图像着色模型
    
    参数:
        args: 命令行参数
    """
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    os.makedirs(os.path.join(args.output_dir, 'checkpoints'), exist_ok=True)
    os.makedirs(os.path.join(args.output_dir, 'samples'), exist_ok=True)
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
    print(f"Using device: {device}")
    
    # 创建数据加载器
    train_loader, val_loader = get_data_loaders(
        args.data_dir,
        batch_size=args.batch_size,
        img_size=args.img_size,
        num_workers=args.num_workers
    )
    print(f"Training set size: {len(train_loader.dataset)}, Validation set size: {len(val_loader.dataset)}")
    
    # 创建模型
    generator = ColorizationGenerator(in_channels=1, out_channels=2).to(device)
    discriminator = ColorizationDiscriminator(in_channels=3).to(device)
    
    # 定义损失函数
    criterion_gan = nn.BCEWithLogitsLoss()
    criterion_pixelwise = nn.L1Loss()
    
    # 定义优化器
    optimizer_g = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
    optimizer_d = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(0.5, 0.999))
    
    # 学习率调度器
    scheduler_g = optim.lr_scheduler.ReduceLROnPlateau(optimizer_g, mode='min', factor=0.5, patience=5, verbose=True)
    scheduler_d = optim.lr_scheduler.ReduceLROnPlateau(optimizer_d, mode='min', factor=0.5, patience=5, verbose=True)
    
    # TensorBoard
    writer = SummaryWriter(log_dir=os.path.join(args.output_dir, 'logs'))
    
    # 加载检查点（如果存在）
    start_epoch = 0
    if args.resume and os.path.exists(args.resume):
        print(f"Loading checkpoint: {args.resume}")
        start_epoch = load_checkpoint(generator, optimizer_g, args.resume)
        print(f"Continuing training from epoch {start_epoch}")
    
    # 训练循环
    print("Starting training...")
    for epoch in range(start_epoch, args.epochs):
        print(f"Epoch {epoch+1}/{args.epochs}")
        
        # 训练一个轮次
        train_g_loss, train_d_loss = train_epoch(
            generator, discriminator,
            train_loader, optimizer_g, optimizer_d,
            criterion_gan, criterion_pixelwise,
            device, args.lambda_pixel
        )
        
        # 验证
        val_g_loss, val_d_loss, val_samples = validate(
            generator, discriminator,
            val_loader, criterion_gan, criterion_pixelwise,
            device, args.lambda_pixel, args.data_dir
        )
        
        # 更新学习率
        scheduler_g.step(val_g_loss)
        scheduler_d.step(val_d_loss)
        
        # 记录损失
        writer.add_scalar('Loss/train_generator', train_g_loss, epoch)
        writer.add_scalar('Loss/train_discriminator', train_d_loss, epoch)
        writer.add_scalar('Loss/val_generator', val_g_loss, epoch)
        writer.add_scalar('Loss/val_discriminator', val_d_loss, epoch)
        
        # 保存样本图像
        if epoch % args.sample_interval == 0:
            # 获取固定样本
            samples_dir = os.path.join(args.data_dir, "samples")
            color_images = [f for f in sorted(os.listdir(samples_dir)) 
                            if f.endswith(('.jpg', '.jpeg', '.png')) and not f.endswith('_gray.jpg')]
            
            # 保存验证集样本
            fig, axes = plt.subplots(3, 3, figsize=(12, 12))
            for i, (L, ab_real, ab_fake) in enumerate(val_samples[:3]):
                # 原始灰度图
                axes[i, 0].imshow(L[0], cmap='gray')
                axes[i, 0].set_title('Grayscale')
                axes[i, 0].axis('off')
                
                # 真实彩色图
                rgb_real = lab_to_rgb(L.unsqueeze(0), ab_real.unsqueeze(0))[0]
                axes[i, 1].imshow(rgb_real)
                axes[i, 1].set_title('Real Color')
                axes[i, 1].axis('off')
                
                # 生成的彩色图
                rgb_fake = lab_to_rgb(L.unsqueeze(0), ab_fake.unsqueeze(0))[0]
                axes[i, 2].imshow(rgb_fake)
                axes[i, 2].set_title('Generated Color')
                axes[i, 2].axis('off')
            
            plt.tight_layout()
            plt.savefig(os.path.join(args.output_dir, 'samples', f'epoch_{epoch+1}.png'))
            plt.close()
            
            # 添加到TensorBoard
            writer.add_figure('Samples', fig, epoch)
        
        # 保存检查点
        if (epoch + 1) % args.save_interval == 0:
            save_checkpoint(
                generator, optimizer_g, epoch + 1,
                os.path.join(args.output_dir, 'checkpoints', f'generator_epoch_{epoch+1}.pth')
            )
            save_checkpoint(
                discriminator, optimizer_d, epoch + 1,
                os.path.join(args.output_dir, 'checkpoints', f'discriminator_epoch_{epoch+1}.pth')
            )
    
    # 保存最终模型
    save_checkpoint(
        generator, optimizer_g, args.epochs,
        os.path.join(args.output_dir, 'checkpoints', 'generator_final.pth')
    )
    save_checkpoint(
        discriminator, optimizer_d, args.epochs,
        os.path.join(args.output_dir, 'checkpoints', 'discriminator_final.pth')
    )
    
    writer.close()
    print("Training completed!")

def train_epoch(generator, discriminator, dataloader, optimizer_g, optimizer_d, 
                criterion_gan, criterion_pixelwise, device, lambda_pixel):
    """
    训练一个轮次
    
    参数:
        generator: 生成器模型
        discriminator: 判别器模型
        dataloader: 数据加载器
        optimizer_g: 生成器优化器
        optimizer_d: 判别器优化器
        criterion_gan: GAN损失函数
        criterion_pixelwise: 像素级损失函数
        device: 设备
        lambda_pixel: 像素级损失权重
    
    返回:
        g_loss_avg: 平均生成器损失
        d_loss_avg: 平均判别器损失
    """
    generator.train()
    discriminator.train()
    
    g_loss_sum = 0.0
    d_loss_sum = 0.0
    
    for batch_idx, batch in enumerate(dataloader):
        # 获取数据
        L = batch['L'].to(device)
        ab_real = batch['ab'].to(device)
        
        # 真实标签和假标签
        batch_size = L.size(0)
        real_label = torch.ones(batch_size, 1, 30, 30).to(device)
        fake_label = torch.zeros(batch_size, 1, 30, 30).to(device)
        
        #------------------------
        # 训练判别器
        #------------------------
        optimizer_d.zero_grad()
        
        # 生成假彩色图
        ab_fake = generator(L)
        
        # 判别器对真实彩色图的判别
        real_output = discriminator(L, ab_real)
        d_real_loss = criterion_gan(real_output, real_label)
        
        # 判别器对假彩色图的判别
        fake_output = discriminator(L, ab_fake.detach())
        d_fake_loss = criterion_gan(fake_output, fake_label)
        
        # 判别器总损失
        d_loss = (d_real_loss + d_fake_loss) / 2
        
        # 反向传播和优化
        d_loss.backward()
        optimizer_d.step()
        
        #------------------------
        # 训练生成器
        #------------------------
        optimizer_g.zero_grad()
        
        # 生成器欺骗判别器
        fake_output = discriminator(L, ab_fake)
        g_loss_gan = criterion_gan(fake_output, real_label)
        
        # 像素级损失（L1损失）
        g_loss_pixel = criterion_pixelwise(ab_fake, ab_real)
        
        # 生成器总损失
        g_loss = g_loss_gan + lambda_pixel * g_loss_pixel
        
        # 反向传播和优化
        g_loss.backward()
        optimizer_g.step()
        
        # 累加损失
        g_loss_sum += g_loss.item()
        d_loss_sum += d_loss.item()
        
        # 打印进度
        if (batch_idx + 1) % 10 == 0:
            print(f"Batch [{batch_idx+1}/{len(dataloader)}] - D loss: {d_loss.item():.4f}, G loss: {g_loss.item():.4f}")
    
    # 计算平均损失
    g_loss_avg = g_loss_sum / len(dataloader)
    d_loss_avg = d_loss_sum / len(dataloader)
    
    print(f"Training - Generator loss: {g_loss_avg:.4f}, Discriminator loss: {d_loss_avg:.4f}")
    
    return g_loss_avg, d_loss_avg

def validate(generator, discriminator, dataloader, criterion_gan, criterion_pixelwise, 
             device, lambda_pixel, data_dir):
    """
    验证模型
    
    参数:
        generator: 生成器模型
        discriminator: 判别器模型
        dataloader: 数据加载器
        criterion_gan: GAN损失函数
        criterion_pixelwise: 像素级损失函数
        device: 设备
        lambda_pixel: 像素级损失权重
        data_dir: 数据目录
    
    返回:
        g_loss_avg: 平均生成器损失
        d_loss_avg: 平均判别器损失
        samples: 样本列表，每个样本包含(L, ab_real, ab_fake)
    """
    generator.eval()
    discriminator.eval()
    
    g_loss_sum = 0.0
    d_loss_sum = 0.0
    
    # 获取固定的样本（从data_dir/samples目录）
    val_dir = os.path.join(data_dir, 'val')
    fixed_samples = get_fixed_samples(val_dir, device, num_samples=3)
    
    # 为每个固定样本生成彩色图像
    samples = []
    
    with torch.no_grad():
        # 处理固定样本
        for L, ab_real in fixed_samples:
            # 将数据移至设备
            L = L.to(device).unsqueeze(0)  # 添加批次维度
            ab_real = ab_real.to(device).unsqueeze(0)  # 添加批次维度
            
            # 生成假彩色图
            ab_fake = generator(L)
            
            # 保存样本（原始L通道、真实ab通道、生成的ab通道）
            samples.append((
                L[0].cpu(),       # 移除批次维度
                ab_real[0].cpu(), # 移除批次维度
                ab_fake[0].cpu()  # 移除批次维度
            ))
        
        # 计算验证集上的损失
        batch_count = 0
        for batch in dataloader:
            # 获取数据
            L = batch['L'].to(device)
            ab_real = batch['ab'].to(device)
            
            # 真实标签和假标签
            batch_size = L.size(0)
            real_label = torch.ones(batch_size, 1, 30, 30).to(device)
            fake_label = torch.zeros(batch_size, 1, 30, 30).to(device)
            
            # 生成假彩色图
            ab_fake = generator(L)
            
            # 判别器损失
            real_output = discriminator(L, ab_real)
            d_real_loss = criterion_gan(real_output, real_label)
            
            fake_output = discriminator(L, ab_fake)
            d_fake_loss = criterion_gan(fake_output, fake_label)
            
            d_loss = (d_real_loss + d_fake_loss) / 2
            
            # 生成器损失
            g_loss_gan = criterion_gan(fake_output, real_label)
            g_loss_pixel = criterion_pixelwise(ab_fake, ab_real)
            g_loss = g_loss_gan + lambda_pixel * g_loss_pixel
            
            # 累加损失
            g_loss_sum += g_loss.item()
            d_loss_sum += d_loss.item()
            batch_count += 1
    
    # 计算平均损失
    g_loss_avg = g_loss_sum / batch_count if batch_count > 0 else 0
    d_loss_avg = d_loss_sum / batch_count if batch_count > 0 else 0
    
    print(f"Validation - Generator loss: {g_loss_avg:.4f}, Discriminator loss: {d_loss_avg:.4f}")
    print(f"Using fixed sample directory as test samples")
    
    return g_loss_avg, d_loss_avg, samples

def main():
    parser = argparse.ArgumentParser(description='灰度图像着色模型训练')
    
    # 数据参数
    parser.add_argument('--data_dir', type=str, required=True, help='数据集目录')
    parser.add_argument('--output_dir', type=str, default='output', help='输出目录')
    parser.add_argument('--img_size', type=int, default=256, help='图像大小')
    
    # 训练参数
    parser.add_argument('--batch_size', type=int, default=8, help='批次大小')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮次')
    parser.add_argument('--lr', type=float, default=0.0002, help='学习率')
    parser.add_argument('--lambda_pixel', type=float, default=100.0, help='像素级损失权重')
    parser.add_argument('--no_cuda', action='store_true', help='不使用CUDA')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载线程数')
    
    # 保存和加载参数
    parser.add_argument('--resume', type=str, default=None, help='恢复训练的检查点路径')
    parser.add_argument('--save_interval', type=int, default=10, help='保存检查点的间隔轮次')
    parser.add_argument('--sample_interval', type=int, default=5, help='保存样本的间隔轮次')
    
    args = parser.parse_args()
    
    train(args)

if __name__ == '__main__':
    main() 