# -*- coding: utf-8 -*-
# PyTorch_SuperResolution_GPU.py
"""
PyTorch长时间训练实验：图像超分辨率重建
使用SRCNN模型对低分辨率图像进行超分辨率重建
支持从检查点恢复训练，适合长时间运行和容器环境
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from PIL import Image
import os
import time
import argparse
import numpy as np
from math import log10
import json
from datetime import datetime
import torch.nn.functional as F
import glob
import random

# 设置工作目录
WORKSPACE_DIR = "/home/vscode/workspace"
os.chdir(WORKSPACE_DIR)

# 检查GPU可用性
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用的设备: {device}")

if torch.cuda.is_available():
    print(f"GPU名称: {torch.cuda.get_device_name(0)}")

# 命令行参数解析
parser = argparse.ArgumentParser(description='PyTorch超分辨率训练')
parser.add_argument('--batch_size', type=int, default=16, help='训练批量大小')
parser.add_argument('--epochs', type=int, default=1000, help='训练轮数')
parser.add_argument('--lr', type=float, default=0.001, help='学习率')
parser.add_argument('--resume', type=str, default=None, help='恢复训练的检查点路径')
parser.add_argument('--checkpoint_interval', type=int, default=10, help='保存检查点的间隔')
parser.add_argument('--log_interval', type=int, default=100, help='日志记录间隔')
parser.add_argument('--dataset_path', type=str, default='./data/DIV2K', help='数据集路径')
parser.add_argument('--scale_factor', type=int, default=4, help='超分辨率缩放因子')
parser.add_argument('--patch_size', type=int, default=128, help='训练图像块大小')
args = parser.parse_args()

# 创建输出目录
output_dir = os.path.join(WORKSPACE_DIR, 'super_resolution_output')
os.makedirs(output_dir, exist_ok=True)

# 创建检查点目录
checkpoint_dir = os.path.join(output_dir, 'checkpoints')
os.makedirs(checkpoint_dir, exist_ok=True)

# 定义SRCNN模型
class SRCNN(nn.Module):
    def __init__(self, scale_factor=4):
        super(SRCNN, self).__init__()
        self.scale_factor = scale_factor
        self.conv1 = nn.Conv2d(3, 64, kernel_size=9, padding=4)
        self.conv2 = nn.Conv2d(64, 32, kernel_size=1, padding=0)
        self.conv3 = nn.Conv2d(32, 3, kernel_size=5, padding=2)
        self.relu = nn.ReLU(inplace=True)
        
    def forward(self, x):
        # 首先对输入进行上采样
        x = F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic', align_corners=False)
        x = self.relu(self.conv1(x))
        x = self.relu(self.conv2(x))
        x = self.conv3(x)
        return x

# 自定义数据集类 - 使用真实图像
class SuperResolutionDataset(Dataset):
    def __init__(self, dataset_path, transform=None, train=True, scale_factor=4, patch_size=128):
        self.dataset_path = dataset_path
        self.transform = transform
        self.train = train
        self.scale_factor = scale_factor
        self.patch_size = patch_size
        
        # 收集图像路径
        if train:
            self.image_paths = glob.glob(os.path.join(dataset_path, 'train', '**', '*.png'), recursive=True)
            self.image_paths += glob.glob(os.path.join(dataset_path, 'train', '**', '*.jpg'), recursive=True)
        else:
            self.image_paths = glob.glob(os.path.join(dataset_path, 'valid', '**', '*.png'), recursive=True)
            self.image_paths += glob.glob(os.path.join(dataset_path, 'valid', '**', '*.jpg'), recursive=True)
            
        # 如果没有找到图像，使用备用数据集路径
        if len(self.image_paths) == 0:
            print(f"警告: 在 {dataset_path} 中没有找到图像文件，使用备用数据集")
            # 这里可以使用torchvision的预置数据集，如CIFAR10
            from torchvision.datasets import CIFAR10
            self.use_cifar = True
            self.cifar_dataset = CIFAR10(root='./data', train=train, download=True)
        else:
            self.use_cifar = False
            print(f"找到 {len(self.image_paths)} 张图像")
        
    def __len__(self):
        if self.use_cifar:
            return len(self.cifar_dataset)
        return len(self.image_paths)
    
    def __getitem__(self, idx):
        if self.use_cifar:
            # 使用CIFAR10作为备用数据集
            img, _ = self.cifar_dataset[idx]
            hr_image = transforms.ToTensor()(img)
            
            # 生成对应的低分辨率图像
            lr_size = hr_image.shape[1] // self.scale_factor
            lr_image = F.interpolate(hr_image.unsqueeze(0), 
                                    size=(lr_size, lr_size), 
                                    mode='bicubic', 
                                    align_corners=False).squeeze(0)
            
            return lr_image, hr_image
        
        # 加载真实图像
        img_path = self.image_paths[idx]
        try:
            hr_image = Image.open(img_path).convert('RGB')
            
            # 数据增强 - 随机裁剪
            if self.train:
                # 随机裁剪为指定大小的patch
                i = random.randint(0, hr_image.height - self.patch_size)
                j = random.randint(0, hr_image.width - self.patch_size)
                hr_image = hr_image.crop((j, i, j + self.patch_size, i + self.patch_size))
                
                # 随机水平翻转
                if random.random() > 0.5:
                    hr_image = hr_image.transpose(Image.FLIP_LEFT_RIGHT)
                    
                # 随机旋转
                if random.random() > 0.5:
                    hr_image = hr_image.rotate(90)
            else:
                # 验证集使用中心裁剪
                center_x, center_y = hr_image.width // 2, hr_image.height // 2
                half_size = self.patch_size // 2
                hr_image = hr_image.crop((
                    center_x - half_size, center_y - half_size,
                    center_x + half_size, center_y + half_size
                ))
            
            # 转换为张量并归一化到[0, 1]
            hr_image = transforms.ToTensor()(hr_image)
            
            # 生成对应的低分辨率图像
            lr_size = self.patch_size // self.scale_factor
            lr_image = F.interpolate(hr_image.unsqueeze(0), 
                                    size=(lr_size, lr_size), 
                                    mode='bicubic', 
                                    align_corners=False).squeeze(0)
            
            return lr_image, hr_image
            
        except Exception as e:
            print(f"加载图像 {img_path} 时出错: {e}")
            # 出错时返回随机数据
            hr_image = torch.rand(3, self.patch_size, self.patch_size)
            lr_image = F.interpolate(hr_image.unsqueeze(0), 
                                    size=(self.patch_size//self.scale_factor, self.patch_size//self.scale_factor), 
                                    mode='bicubic', 
                                    align_corners=False).squeeze(0)
            return lr_image, hr_image

# 创建数据加载器
def create_data_loaders(batch_size):
    train_dataset = SuperResolutionDataset(
        args.dataset_path, 
        train=True, 
        scale_factor=args.scale_factor,
        patch_size=args.patch_size
    )
    val_dataset = SuperResolutionDataset(
        args.dataset_path, 
        train=False, 
        scale_factor=args.scale_factor,
        patch_size=args.patch_size
    )
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, 
                             shuffle=True, num_workers=4, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size,
                           shuffle=False, num_workers=4, pin_memory=True)
    
    return train_loader, val_loader

# 计算PSNR指标
def psnr(original, compressed):
    mse = torch.mean((original - compressed) ** 2)
    if mse == 0:
        return 100
    max_pixel = 1.0
    psnr = 20 * log10(max_pixel / torch.sqrt(mse))
    return psnr

# 训练函数
def train(model, train_loader, criterion, optimizer, epoch, scheduler=None):
    model.train()
    train_loss = 0
    total_psnr = 0
    
    for batch_idx, (lr_imgs, hr_imgs) in enumerate(train_loader):
        lr_imgs, hr_imgs = lr_imgs.to(device), hr_imgs.to(device)
        
        optimizer.zero_grad()
        outputs = model(lr_imgs)
        loss = criterion(outputs, hr_imgs)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        batch_psnr = psnr(hr_imgs, outputs)
        total_psnr += batch_psnr
        
        if batch_idx % args.log_interval == 0:
            print(f'Train Epoch: {epoch} [{batch_idx * len(lr_imgs)}/{len(train_loader.dataset)} '
                  f'({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}\t'
                  f'PSNR: {batch_psnr:.2f} dB')
    
    avg_loss = train_loss / len(train_loader)
    avg_psnr = total_psnr / len(train_loader)
    
    return avg_loss, avg_psnr

# 验证函数
def validate(model, val_loader, criterion):
    model.eval()
    val_loss = 0
    total_psnr = 0
    
    with torch.no_grad():
        for lr_imgs, hr_imgs in val_loader:
            lr_imgs, hr_imgs = lr_imgs.to(device), hr_imgs.to(device)
            
            outputs = model(lr_imgs)
            val_loss += criterion(outputs, hr_imgs).item()
            total_psnr += psnr(hr_imgs, outputs)
    
    avg_loss = val_loss / len(val_loader)
    avg_psnr = total_psnr / len(val_loader)
    
    print(f'Validation set: Average loss: {avg_loss:.4f}, PSNR: {avg_psnr:.2f} dB')
    return avg_loss, avg_psnr

# 保存检查点
def save_checkpoint(model, optimizer, epoch, loss, psnr, is_best=False):
    state = {
        'epoch': epoch,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'loss': loss,
        'psnr': psnr
    }
    
    filename = os.path.join(checkpoint_dir, f'checkpoint_epoch_{epoch}.pth')
    torch.save(state, filename)
    
    if is_best:
        best_filename = os.path.join(checkpoint_dir, 'model_best.pth')
        torch.save(state, best_filename)
        
    # 清理旧的检查点，只保留最近的5个
    checkpoints = sorted([f for f in os.listdir(checkpoint_dir) if f.startswith('checkpoint_epoch_')])
    if len(checkpoints) > 5:
        for old_checkpoint in checkpoints[:-5]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))

# 加载检查点
def load_checkpoint(model, optimizer, checkpoint_path):
    if os.path.isfile(checkpoint_path):
        print(f"加载检查点 '{checkpoint_path}'")
        checkpoint = torch.load(checkpoint_path)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print(f"加载检查点完成，从第 {start_epoch} 轮开始训练")
        return start_epoch, checkpoint['loss'], checkpoint['psnr']
    else:
        print(f"未找到检查点 '{checkpoint_path}'")
        return 0, float('inf'), 0
    
# 在前几个epoch中使用较低的学习率，然后逐步上升
def warmup_scheduler(optimizer, current_epoch, warmup_epochs=10, base_lr=0.001):
    if current_epoch < warmup_epochs:
        lr = base_lr * (current_epoch + 1) / warmup_epochs
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

# 主训练循环
def main():
    # 初始化模型、损失函数和优化器
    model = SRCNN(scale_factor=args.scale_factor).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5)  # 每两百轮减半
    
    # 加载数据
    train_loader, val_loader = create_data_loaders(args.batch_size)
    
    # 恢复训练或从头开始
    start_epoch = 0
    best_psnr = 0
    
    if args.resume:
        start_epoch, best_loss, best_psnr = load_checkpoint(model, optimizer, args.resume)
    
    # 训练日志
    log_data = {
        'train_loss': [],
        'train_psnr': [],
        'val_loss': [],
        'val_psnr': [],
        'epoch_times': []
    }
    
    print("开始训练超分辨率模型...")
    
    for epoch in range(start_epoch, args.epochs):
        start_time = time.time()
        
        # 训练一个epoch
        train_loss, train_psnr = train(model, train_loader, criterion, optimizer, epoch, scheduler)
        
        # 验证
        val_loss, val_psnr = validate(model, val_loader, criterion)
        
        # 更新学习率
        scheduler.step()
        # 检查当前学习率
        current_lr = scheduler.get_last_lr()[0]
        if current_lr < 1e-5:
            for param_group in optimizer.param_groups:
                param_group['lr'] = 1e-5
        
        # 记录日志
        epoch_time = time.time() - start_time
        log_data['train_loss'].append(train_loss)
        log_data['train_psnr'].append(train_psnr)
        log_data['val_loss'].append(val_loss)
        log_data['val_psnr'].append(val_psnr)
        log_data['epoch_times'].append(epoch_time)
        
        # 保存检查点
        is_best = val_psnr > best_psnr
        if is_best:
            best_psnr = val_psnr
            
        if epoch % args.checkpoint_interval == 0:
            save_checkpoint(model, optimizer, epoch, val_loss, val_psnr, is_best)
        
        # 打印epoch统计信息
        print(f'Epoch {epoch} 完成, 耗时: {epoch_time:.2f}秒, '
              f'学习率: {scheduler.get_last_lr()[0]:.6f}')
        
        # 定期保存日志
        if epoch % 10 == 0:
            with open(os.path.join(output_dir, 'training_log.json'), 'w') as f:
                json.dump(log_data, f, indent=4)
    
    print("训练完成!")
    
    # 保存最终模型
    final_model_path = os.path.join(output_dir, 'final_model.pth')
    torch.save(model.state_dict(), final_model_path)
    print(f"最终模型已保存到: {final_model_path}")
    
    # 保存最终日志
    with open(os.path.join(output_dir, 'final_training_log.json'), 'w') as f:
        json.dump(log_data, f, indent=4)

if __name__ == '__main__':
    main()