import os
import sys
import torch
import argparse
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import logging
from torch.utils.tensorboard import SummaryWriter

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 检查CUDA并强制使用CPU
def setup_cpu_environment():
    """设置CPU环境，确保不使用GPU"""
    if torch.cuda.is_available():
        logger.warning("检测到CUDA可用，但将强制使用CPU")
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    else:
        logger.info("使用CPU进行训练")
    return torch.device('cpu')

# 极简化的RRDBNet模型
class SimpleRRDBNet(torch.nn.Module):
    """极简化的RRDBNet模型"""
    def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=8, num_block=1, num_grow_ch=4):
        super(SimpleRRDBNet, self).__init__()
        self.conv_first = torch.nn.Conv2d(num_in_ch, num_feat, kernel_size=3, padding=1)
        self.relu = torch.nn.LeakyReLU(negative_slope=0.2, inplace=True)
        
        # 简化的残差块
        self.res_blocks = torch.nn.ModuleList()
        for _ in range(num_block):
            self.res_blocks.append(torch.nn.Sequential(
                torch.nn.Conv2d(num_feat, num_grow_ch, kernel_size=3, padding=1),
                torch.nn.LeakyReLU(negative_slope=0.2, inplace=True),
                torch.nn.Conv2d(num_grow_ch, num_feat, kernel_size=3, padding=1)
            ))
        
        self.conv_out = torch.nn.Conv2d(num_feat, num_out_ch, kernel_size=3, padding=1)
    
    def forward(self, x):
        x = self.conv_first(x)
        res = x
        for block in self.res_blocks:
            res = res + block(res)
        x = self.relu(self.conv_out(res))
        return x

# 简单的数据集类
class SimpleImageDataset(Dataset):
    """简单的图像数据集类"""
    def __init__(self, hr_dir, lr_dir, gt_size=16):
        self.hr_dir = hr_dir
        self.lr_dir = lr_dir
        self.gt_size = gt_size
        
        # 获取所有文件名
        self.hr_files = sorted([f for f in os.listdir(hr_dir) if f.endswith(('.png', '.jpg', '.jpeg'))])
        self.lr_files = sorted([f for f in os.listdir(lr_dir) if f.endswith(('.png', '.jpg', '.jpeg'))])
        
        # 确保文件数量匹配
        self.common_files = list(set(self.hr_files) & set(self.lr_files))
        self.common_files.sort()
        
        logger.info(f"找到 {len(self.common_files)} 对HR/LR图像")
        
        # 转换
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])
    
    def __len__(self):
        return len(self.common_files)
    
    def __getitem__(self, idx):
        filename = self.common_files[idx]
        
        # 读取图像
        hr_path = os.path.join(self.hr_dir, filename)
        lr_path = os.path.join(self.lr_dir, filename)
        
        try:
            hr_img = Image.open(hr_path).convert('RGB')
            lr_img = Image.open(lr_path).convert('RGB')
            
            # 调整大小以确保一致
            hr_img = hr_img.resize((self.gt_size, self.gt_size), Image.BICUBIC)
            lr_img = lr_img.resize((self.gt_size, self.gt_size), Image.BICUBIC)
            
            # 转换为张量
            hr_tensor = self.transform(hr_img)
            lr_tensor = self.transform(lr_img)
            
            return {'lq': lr_tensor, 'gt': hr_tensor}
        except Exception as e:
            logger.error(f"处理图像 {filename} 时出错: {e}")
            # 返回一个占位符（在真实训练中应该跳过这个样本）
            return {'lq': torch.zeros(3, self.gt_size, self.gt_size), 
                    'gt': torch.zeros(3, self.gt_size, self.gt_size)}

# 训练函数
def train_simple_model(args):
    """简单训练函数"""
    # 设置CPU环境
    device = setup_cpu_environment()
    
    # 查找HR和LR目录
    logger.info("正在查找数据集...")
    # 首先尝试在项目根目录查找
    project_root = 'd:\\AIJumpProject\\LowLevelVision'
    
    possible_hr_paths = [
        os.path.join(project_root, 'HR'),
        os.path.join(project_root, 'datasets', 'HR'),
        os.path.join(project_root, 'datasets', 'display_dataset', 'HR')
    ]
    
    possible_lr_paths = [
        os.path.join(project_root, 'LR'),
        os.path.join(project_root, 'datasets', 'LR'),
        os.path.join(project_root, 'datasets', 'display_dataset', 'LR')
    ]
    
    hr_dir = None
    lr_dir = None
    
    for path in possible_hr_paths:
        if os.path.exists(path) and len(os.listdir(path)) > 0:
            hr_dir = path
            break
    
    for path in possible_lr_paths:
        if os.path.exists(path) and len(os.listdir(path)) > 0:
            lr_dir = path
            break
    
    if hr_dir is None or lr_dir is None:
        logger.error("找不到有效的HR或LR数据集目录!")
        logger.error(f"已尝试的HR路径: {possible_hr_paths}")
        logger.error(f"已尝试的LR路径: {possible_lr_paths}")
        return False
    
    logger.info(f"找到数据集目录:")
    logger.info(f"  HR目录: {hr_dir}")
    logger.info(f"  LR目录: {lr_dir}")
    
    # 创建数据集和数据加载器
    dataset = SimpleImageDataset(hr_dir, lr_dir, gt_size=args.gt_size)
    if len(dataset) == 0:
        logger.error("数据集中没有找到有效的图像对!")
        return False
    
    dataloader = DataLoader(
        dataset, 
        batch_size=args.batch_size, 
        shuffle=True, 
        num_workers=0  # CPU模式下禁用多进程
    )
    
    # 创建模型
    logger.info("创建简化的Real-ESRGAN模型...")
    model = SimpleRRDBNet(
        num_in_ch=3,
        num_out_ch=3,
        num_feat=args.num_feat,
        num_block=args.num_block,
        num_grow_ch=args.num_grow_ch
    ).to(device)
    
    # 定义损失函数和优化器
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    
    # 创建保存目录
    os.makedirs(args.save_dir, exist_ok=True)
    
    # 创建TensorBoard写入器
    tb_log_dir = os.path.join(args.save_dir, 'tensorboard_logs')
    os.makedirs(tb_log_dir, exist_ok=True)
    writer = SummaryWriter(log_dir=tb_log_dir)
    logger.info(f"TensorBoard日志目录已创建: {tb_log_dir}")
    logger.info(f"请在浏览器中访问: http://localhost:6006/ 查看训练进度")
    
    # 开始训练
    logger.info("开始训练...")
    model.train()
    
    for epoch in range(args.epochs):
        epoch_loss = 0.0
        for i, batch in enumerate(dataloader):
            lr_img = batch['lq'].to(device)
            hr_img = batch['gt'].to(device)
            
            # 前向传播
            optimizer.zero_grad()
            output = model(lr_img)
            
            # 计算损失
            loss = criterion(output, hr_img)
            
            # 反向传播和优化
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
            
            # 打印进度
            if (i + 1) % args.print_freq == 0:
                logger.info(f"Epoch [{epoch+1}/{args.epochs}], Batch [{i+1}/{len(dataloader)}], Loss: {loss.item():.4f}")
                
            # 记录到TensorBoard
            global_step = epoch * len(dataloader) + i
            writer.add_scalar('train/loss', loss.item(), global_step)
            writer.add_scalar('train/lr', optimizer.param_groups[0]['lr'], global_step)
        
        # 打印每个epoch的平均损失
        avg_loss = epoch_loss / len(dataloader)
        logger.info(f"Epoch [{epoch+1}/{args.epochs}] Average Loss: {avg_loss:.4f}")
        
        # 保存模型
        if (epoch + 1) % args.save_freq == 0:
            model_path = os.path.join(args.save_dir, f'model_epoch_{epoch+1}.pth')
            torch.save(model.state_dict(), model_path)
            logger.info(f"模型已保存到: {model_path}")
    
    # 保存最终模型
    final_model_path = os.path.join(args.save_dir, 'final_model.pth')
    torch.save(model.state_dict(), final_model_path)
    
    # 关闭TensorBoard写入器
    writer.close()
    
    logger.info(f"最终模型已保存到: {final_model_path}")
    logger.info(f"TensorBoard日志已保存到: {tb_log_dir}")
    logger.info("训练完成!")
    logger.info(f"请在浏览器中访问: http://localhost:6006/ 查看训练可视化结果")
    
    return True

# 主函数
def main():
    parser = argparse.ArgumentParser(description='简单的Real-ESRGAN CPU微调脚本')
    parser.add_argument('--batch_size', type=int, default=1, help='批量大小')
    parser.add_argument('--epochs', type=int, default=5, help='训练轮数')
    parser.add_argument('--lr', type=float, default=1e-5, help='学习率')
    parser.add_argument('--gt_size', type=int, default=16, help='图像大小')
    parser.add_argument('--num_feat', type=int, default=8, help='特征通道数')
    parser.add_argument('--num_block', type=int, default=1, help='残差块数量')
    parser.add_argument('--num_grow_ch', type=int, default=4, help='生长通道数')
    parser.add_argument('--print_freq', type=int, default=1, help='打印频率')
    parser.add_argument('--save_freq', type=int, default=1, help='保存频率')
    parser.add_argument('--save_dir', type=str, default='./simple_cpu_results', help='保存目录')
    
    args = parser.parse_args()
    
    # 运行训练
    success = train_simple_model(args)
    
    if success:
        logger.info("CPU训练成功完成!")
        sys.exit(0)
    else:
        logger.error("CPU训练失败!")
        sys.exit(1)

if __name__ == '__main__':
    main()