# train_webfg400.py (优化版)
import time
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms, models
from torchvision.models import (
    ResNet50_Weights,
    ConvNeXt_Large_Weights
)
import argparse
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import numpy as np

# 允许加载截断图像（与训练一致）
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

# 忽略 PIL 的 Palette 透明度警告
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="PIL")

def parse_args():
    parser = argparse.ArgumentParser(description="Fine-tune ConvNeXt-V2 on WebFG-400")
    parser.add_argument('--data_dir', type=str, default='.', help='Directory containing train/')
    parser.add_argument('--model_dir', type=str, default='./model', help='Directory to save models and logs')
    parser.add_argument('--save_model_name', type=str, default='best_model', help='Base name for saved models')
    parser.add_argument('--model_name', type=str, default='convnext_v2_large',
                        choices=['resnet50', 'convnext_v2_large'])
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_epochs', type=int, default=50)
    parser.add_argument('--lr', type=float, default=1e-5)
    parser.add_argument('--val_ratio', type=float, default=0.1, help='Validation split ratio')
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
    parser.add_argument('--resume', type=str, default=None, help='Path to checkpoint to resume from')
    parser.add_argument('--target_acc', type=float, default=88.0, help='Target validation accuracy (%) to stop')
    parser.add_argument('--early_stop_patience', type=int, default=7, help='Patience for early stopping')
    parser.add_argument('--early_stop_delta', type=float, default=0.1, help='Min improvement to reset patience')
    return parser.parse_args()

def get_transform(model_name, is_train=True):
    if is_train:
        return transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.1),
            transforms.ToTensor(),
            transforms.RandomErasing(p=0.25, scale=(0.02, 0.33), ratio=(0.3, 3.3)),  # 关键增强
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        return transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

def build_model(model_name, num_classes):
    if model_name == 'convnext_v2_large':
        weights = ConvNeXt_Large_Weights.IMAGENET1K_V1
        model = models.convnext_large(weights=weights)
        model.classifier[2] = nn.Linear(model.classifier[2].in_features, num_classes)
        
        # 冻结底层（ConvNeXt-Large 有4个 stage，冻结前2-3个）
        for param in model.features[:3].parameters():  # 冻结前3个 stage
            param.requires_grad = False
        print("✅ Frozen first 3 stages of ConvNeXt backbone.")
    elif model_name == 'resnet50':
        weights = ResNet50_Weights.IMAGENET1K_V1
        model = models.resnet50(weights=weights)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
    else:
        raise ValueError(f"Unsupported model: {model_name}")
    return model

def create_train_val_datasets(dataset, val_ratio=0.1):
    """分层划分训练集和验证集"""
    targets = np.array(dataset.targets)
    indices = np.arange(len(dataset))
    
    # 按类别分层划分
    train_indices, val_indices = train_test_split(
        indices,
        test_size=val_ratio,
        stratify=targets,
        random_state=42  # 可复现
    )
    
    train_subset = Subset(dataset, train_indices)
    val_subset = Subset(dataset, val_indices)
    return train_subset, val_subset

class TransformDataset(torch.utils.data.Dataset):
    def __init__(self, subset, transform):
        self.subset = subset
        self.transform = transform
    def __getitem__(self, index):
        x, y = self.subset[index]
        if self.transform:
            x = self.transform(x)
        return x, y
    def __len__(self):
        return len(self.subset)
    
@torch.no_grad()
def evaluate(model, loader, device):
    model.eval()
    correct = 0
    total = 0
    for inputs, labels in loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
    return 100. * correct / total

def main():
    args = parse_args()
    device = torch.device(args.device)
    print(f"Using device: {device}")

    train_dir = os.path.join(args.data_dir, 'train')
    assert os.path.exists(train_dir), f"Train directory not found: {train_dir}"

    model_dir = Path(args.model_dir)
    model_dir.mkdir(parents=True, exist_ok=True)

    # === 关键修改：加载完整数据集后划分 ===
    full_dataset = datasets.ImageFolder(root=train_dir, transform=None)  # 先不加 transform
    num_classes = len(full_dataset.classes)
    print(f"Dataset: {len(full_dataset)} images, {num_classes} classes")

    # 分层划分
    train_subset, val_subset = create_train_val_datasets(full_dataset, val_ratio=args.val_ratio)
    print(f"Train samples: {len(train_subset)}, Val samples: {len(val_subset)}")

    # 应用不同 transform
    train_dataset = TransformDataset(train_subset, get_transform(args.model_name, is_train=True))
    val_dataset = TransformDataset(val_subset, get_transform(args.model_name, is_train=False))
    
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, 
                              num_workers=args.num_workers, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, 
                            num_workers=args.num_workers, pin_memory=True)

    # 模型 & 优化器
    model = build_model(args.model_name, num_classes).to(device)
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)  # Label Smoothing
    optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=5e-4)  # 增大 weight decay

    # 训练循环
    # Initialize training state
    start_epoch = 0
    best_val_acc = 0.0
    epochs_no_improve = 0

    print(f"Start training {args.model_name} with {args.val_ratio*100:.1f}% val split...")

    # Resume if specified
    if args.resume:
        print(f"Loading checkpoint from {args.resume}")
        checkpoint = torch.load(args.resume, map_location=device, weights_only=True)
        if checkpoint['model_name'] != args.model_name:
            raise ValueError(f"Model name mismatch!")
        if checkpoint['num_classes'] != num_classes:
            raise ValueError(f"Class count mismatch!")
        model.load_state_dict(checkpoint['model_state_dict'])
        if 'optimizer_state_dict' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        start_epoch = checkpoint.get('epoch', 0)
        best_val_acc = checkpoint.get('best_val_acc', 0.0)
        print(f"Resumed from epoch {start_epoch}, best val acc: {best_val_acc:.2f}%")

    print(f"Target acc: {args.target_acc}%, Patience: {args.early_stop_patience}, Delta: {args.early_stop_delta}%")


    for epoch in range(start_epoch, args.num_epochs):
        model.train()
        running_loss = 0.0

        # === 记录 epoch 开始时间 ===
        epoch_start_time = time.time()

        pbar = tqdm(
            train_loader, desc=f"Epoch {epoch+1}/{args.num_epochs}",
            file=open(os.devnull, 'w'),  # 丢弃 tqdm 输出
            disable=True  # 直接 disable
        )
        
        for inputs, labels in pbar:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            pbar.set_postfix(loss=running_loss/len(train_loader))

        # 验证阶段（关键！）
        val_acc = evaluate(model, val_loader, device)
        epoch_duration = time.time() - epoch_start_time        
        print(f"Epoch {epoch+1}: Val Acc = {val_acc:.2f}%, Time: {epoch_duration:.2f}s")

        # Check target accuracy
        if val_acc >= args.target_acc:
            print(f"🎯 Target accuracy {args.target_acc}% reached at epoch {epoch+1}!")
            torch.save({
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch + 1,
                'best_val_acc': max(best_val_acc, val_acc),
                'model_name': args.model_name,
                'num_classes': num_classes,
            }, f'{args.model_dir}/{args.save_model_name}_target_reached.pth')
            break

        # Early stopping logic
        if val_acc > best_val_acc + args.early_stop_delta:
            best_val_acc = val_acc
            epochs_no_improve = 0
            torch.save({
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch + 1,
                'best_val_acc': best_val_acc,
                'model_name': args.model_name,
                'num_classes': num_classes,
            }, f'{args.model_dir}/{args.save_model_name}.pth')
            print(f"🎉 New best model saved! Val Acc: {val_acc:.2f}%")
        else:
            epochs_no_improve += 1
            print(f"No significant improvement for {epochs_no_improve} epoch(s).")

        if epochs_no_improve >= args.early_stop_patience:
            print(f"🛑 Early stopping triggered after {args.early_stop_patience} epochs.")
            break

    print(f"Training finished. Best Val Acc: {best_val_acc:.2f}%")

if __name__ == '__main__':
    main()