import sys
sys.path.append(".")

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
import glob
import os
import json
from datetime import datetime
import time
import argparse 

from UnetCBAM.net import GeneratorWithCBAM as Generator, Discriminator

# -------------------- 数据加载 --------------------
class PairedDataset(Dataset):
    def __init__(self, sample_dir, real_dir, transform=None):
        self.sample_paths = sorted(glob.glob(f"{sample_dir}/*.png"))
        self.real_paths = sorted(glob.glob(f"{real_dir}/*.png"))
        self.transform = transform

    def __getitem__(self, index):
        sample_img = Image.open(self.sample_paths[index]).convert('L')
        real_img = Image.open(self.real_paths[index]).convert('L')
        
        if self.transform:
            seed = torch.randint(0, 2**32, size=(1,)).item()
            torch.manual_seed(seed)
            sample_img = self.transform(sample_img)
            torch.manual_seed(seed)
            real_img = self.transform(real_img)
            
        return {'A': sample_img, 'B': real_img}
    
    # 添加以下方法
    def __len__(self):
        return len(self.sample_paths)  # 假设sample和real目录中的文件数量相同


# 储存训练数据
def save_train_data(file_path: str, data_list: list):
    # 写入 JSON 文件
    try:
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(data_list, f, indent=4, ensure_ascii=False)  # 格式化输出并支持非ASCII字符
        print(f"数据已成功保存至 {file_path}")
    except Exception as e:
        print(f"保存文件时出错: {e}")


def resume_train_data(resume_model_path):
    resume_train_data_path = os.path.join(os.path.dirname(resume_model_path), "train_data.json")
    with open(resume_train_data_path, "r") as f:
        resume_train_data_dict = json.load(f)
    return resume_train_data_dict

def format_time(seconds):
    seconds = int(seconds)
    hours = seconds // 3600
    minutes = (seconds % 3600) // 60
    seconds = seconds % 60
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}"


if __name__ == "__main__":

    current_time = datetime.now()
    time_str = current_time.strftime("%Y-%m-%d-%H-%M-%S")
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    L1_LAMBDA = 150
    SAVE_DIR = os.path.join("UnetCBAM/weights", time_str)
    os.makedirs(SAVE_DIR, exist_ok=True)

    parser = argparse.ArgumentParser()
    parser.add_argument("--resume", type=str, default=None, 
                        help="Path to checkpoint to resume training (e.g., 'weights/2023-01-01/pix2pix_checkpoint_1000.pth')")
    parser.add_argument("--epoch_nums", type=int, default=500)
    parser.add_argument("--datasets_dir", type=str, default="dataset")
    parser.add_argument("--learning_rate", type=float, default=0.001)
    parser.add_argument("--batch_size", type=int, default=16)
    args = parser.parse_args()

    EPOCHS = args.epoch_nums
    LR = args.learning_rate
    BATCH_SIZE = args.batch_size

    sample_dir = os.path.join(args.datasets_dir, "sample")
    real_dir = os.path.join(args.datasets_dir, "real")
    
    netG = Generator().to(DEVICE)
    netD = Discriminator().to(DEVICE)

    optimizer_G = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.999))
    optimizer_D = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.999))

    train_data = []
    start_epoch = 0
    best_loss = float('inf')
    
    # 如果指定了恢复路径，加载模型状态
    if args.resume:
        checkpoint = torch.load(args.resume)
        netG.load_state_dict(checkpoint['generator'], strict=False)
        netD.load_state_dict(checkpoint['discriminator'])
        optimizer_G.load_state_dict(checkpoint['optimizer_G'])
        optimizer_D.load_state_dict(checkpoint['optimizer_D'])
        print(f"Loaded checkpoint from {args.resume}, resuming from epoch {checkpoint['epoch'] + 1}")
        start_epoch = checkpoint['epoch'] + 1
        train_data = resume_train_data(args.resume)
        for data in train_data:
            if data["avg_loss_G"] < best_loss:
                best_loss = data["avg_loss_G"]
    else:
        print("Training from scratch")

    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomHorizontalFlip(p=0.3),
        transforms.RandomRotation(10),
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])

    dataset = PairedDataset(sample_dir, real_dir, transform=transform)
    dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)

    criterion_GAN = nn.MSELoss()
    criterion_L1 = nn.L1Loss()

    # -------------------- 训练循环 --------------------
    train_start_time = time.time()
    for epoch in range(start_epoch, start_epoch + EPOCHS):
        netG.train()
        netD.train()
        
        epoch_loss_G = 0.0
        epoch_loss_D = 0.0
        
        for i, batch in enumerate(dataloader):
            real_A = batch['A'].to(DEVICE)
            real_B = batch['B'].to(DEVICE)
            
            # 训练生成器
            optimizer_G.zero_grad()
            
            fake_B = netG(real_A)
            pred_fake = netD(real_A, fake_B)
            
            loss_GAN = criterion_GAN(pred_fake, torch.ones_like(pred_fake))
            loss_L1 = criterion_L1(fake_B, real_B) * L1_LAMBDA
            loss_G = loss_GAN + loss_L1

            loss_G.backward()
            optimizer_G.step()
            
            # 训练判别器
            optimizer_D.zero_grad()
            
            # 真实数据
            pred_real = netD(real_A, real_B)
            loss_real = criterion_GAN(pred_real, torch.ones_like(pred_real))
            
            # 生成数据
            pred_fake = netD(real_A, fake_B.detach())
            loss_fake = criterion_GAN(pred_fake, torch.zeros_like(pred_fake))
            
            loss_D = (loss_real + loss_fake) * 0.5
            loss_D.backward()
            optimizer_D.step()
            
            epoch_loss_G += loss_G.item()
            epoch_loss_D += loss_D.item()
        

        avg_loss_G = epoch_loss_G / len(dataloader)
        avg_loss_D = epoch_loss_D / len(dataloader)
        
        Epoch = epoch + 1 - start_epoch
        time_now = time.time()
        spend_time = time_now - train_start_time
        remain_time = spend_time * ((EPOCHS - Epoch) / Epoch)
        formatted_spend_time = format_time(spend_time)
        formatted_remain_time = format_time(remain_time)
        print(
            f"Epoch [{Epoch + start_epoch:4d}/{start_epoch + EPOCHS}] | Loss G: {avg_loss_G:.4f} | Loss D: {avg_loss_D:.4f} | Spend Time: {formatted_spend_time} | Remaining Time {formatted_remain_time}"
        )
        train_data.append(
            {
                "Epoch": Epoch + start_epoch,
                "avg_loss_G": avg_loss_G,
                "avg_loss_D": avg_loss_D,
                "spend_time": spend_time,
                "remain_time": remain_time
            }
        )
        if (epoch + 1) % 100 == 0:
            torch.save({
                'generator': netG.state_dict(),
                'discriminator': netD.state_dict(),
                'optimizer_G': optimizer_G.state_dict(),
                'optimizer_D': optimizer_D.state_dict(),
                'epoch': epoch,
                'loss': best_loss
            }, os.path.join(SAVE_DIR, f"checkpoint_{epoch + 1}.pth"))
        
        # 保存最佳模型
        if avg_loss_G < best_loss:
            pths = [file for file in os.listdir(SAVE_DIR) if file.endswith(".pth")]
            for pth in pths:
                if "best" in pth:
                    best_pth_path = os.path.join(SAVE_DIR, pth)
                    try:
                        os.remove(best_pth_path)
                        print(f"已删除文件: {best_pth_path}")
                    except OSError as e:
                        print(f"删除文件 {best_pth_path} 时出错: {e}")


            best_loss = avg_loss_G
            torch.save(
                {
                    'generator': netG.state_dict(),
                    'discriminator': netD.state_dict(),
                    'optimizer_G': optimizer_G.state_dict(),
                    'optimizer_D': optimizer_D.state_dict(),
                    'epoch': epoch,
                    'loss': best_loss
                }, 
                os.path.join(SAVE_DIR, f"best_checkpoint_{epoch + 1}.pth")
            )

    save_train_data(
        file_path=os.path.join(SAVE_DIR, "train_data.json"),
        data_list=train_data
    )
