# train_webfg400.py (优化版)
import time
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms, models
from torchvision.models import (
    ResNet50_Weights,
    ConvNeXt_Large_Weights
)
import argparse
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import numpy as np
from collections import Counter
from torch.utils.data import WeightedRandomSampler
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR, SequentialLR

# 允许加载截断图像（与训练一致）
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

# 禁用像素限制
from PIL import Image
Image.MAX_IMAGE_PIXELS = None

# 忽略 PIL 的 Palette 透明度警告
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="PIL")
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*__array_wrap__.*")

def parse_args():
    parser = argparse.ArgumentParser(description="Fine-tune ConvNeXt-V2 on WebFG-400")
    parser.add_argument('--data_dir', type=str, default='.', help='Directory containing train/')
    parser.add_argument('--model_dir', type=str, default='./model', help='Directory to save models and logs')
    parser.add_argument('--save_model_name', type=str, default='best_model', help='Base name for saved models')
    parser.add_argument('--model_name', type=str, default='convnext_v2_large',
                        choices=['resnet50', 'convnext_v2_large'])
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_epochs', type=int, default=50)
    parser.add_argument('--lr', type=float, default=1e-5)
    parser.add_argument('--val_ratio', type=float, default=0.1, help='Validation split ratio')
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
    parser.add_argument('--resume', type=str, default=None, help='Path to checkpoint to resume from')
    parser.add_argument('--target_acc', type=float, default=88.0, help='Target validation accuracy (%) to stop')
    parser.add_argument('--early_stop_patience', type=int, default=7, help='Patience for early stopping')
    parser.add_argument('--early_stop_delta', type=float, default=0.1, help='Min improvement to reset patience')
    return parser.parse_args()

def get_transform(model_name, is_train=True):
    if is_train:
        return transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.1),
            transforms.AutoAugment(transforms.AutoAugmentPolicy.IMAGENET),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    else:
        return transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

def build_model(model_name, num_classes):
    if model_name == 'convnext_v2_large':
        weights = ConvNeXt_Large_Weights.IMAGENET1K_V1
        model = models.convnext_large(weights=weights)
        model.classifier[2] = nn.Linear(model.classifier[2].in_features, num_classes)
    elif model_name == 'resnet50':
        weights = ResNet50_Weights.IMAGENET1K_V1
        model = models.resnet50(weights=weights)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
    else:
        raise ValueError(f"Unsupported model: {model_name}")
    return model

# 在 build_model 后，构建参数组
def set_layerwise_lr(model, base_lr=1e-4, head_lr=1e-3, weight_decay=5e-2):
    param_groups = [
        {"params": model.features.parameters(), "lr": base_lr, "weight_decay": weight_decay},
        {"params": model.classifier.parameters(), "lr": head_lr, "weight_decay": weight_decay},
    ]
    return param_groups

def create_train_val_datasets(dataset, val_ratio=0.1):
    """分层划分训练集和验证集"""
    targets = np.array(dataset.targets)
    indices = np.arange(len(dataset))
    
    # 按类别分层划分
    train_indices, val_indices = train_test_split(
        indices,
        test_size=val_ratio,
        stratify=targets,
        random_state=42  # 可复现
    )
    
    train_subset = Subset(dataset, train_indices)
    val_subset = Subset(dataset, val_indices)
    return train_subset, val_subset

# 自定义损失函数
class LogitAdjustedLoss(nn.Module):
    def __init__(self, cls_num_list, tau=1.0):
        super().__init__()
        cls_priors = torch.log(cls_num_list / cls_num_list.sum())
        self.cls_priors = cls_priors.cuda()
        self.tau = tau
    def forward(self, logits, labels):
        adjusted_logits = logits + self.cls_priors / self.tau
        return nn.CrossEntropyLoss()(adjusted_logits, labels)

class TransformDataset(torch.utils.data.Dataset):
    def __init__(self, subset, transform):
        self.subset = subset
        self.transform = transform
    def __getitem__(self, index):
        x, y = self.subset[index]
        if self.transform:
            x = self.transform(x)
        return x, y
    def __len__(self):
        return len(self.subset)
    
@torch.no_grad()
def evaluate(model, loader, device):
    model.eval()
    correct = 0
    total = 0
    for inputs, labels in loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
    return 100. * correct / total

def main():
    args = parse_args()
    device = torch.device(args.device)
    print(f"Using device: {device}")

    train_dir = os.path.join(args.data_dir, 'train')
    assert os.path.exists(train_dir), f"Train directory not found: {train_dir}"

    model_dir = Path(args.model_dir)
    model_dir.mkdir(parents=True, exist_ok=True)

    # Step 1: 加载完整数据集以获取类别信息和划分索引
    full_dataset = datasets.ImageFolder(root=train_dir, transform=None)
    num_classes = len(full_dataset.classes)
    print(f"Full dataset size: {len(full_dataset)}, Classes: {num_classes}")

    # Step 2: 分层划分索引
    targets = np.array(full_dataset.targets)
    indices = np.arange(len(full_dataset))
    train_indices, val_indices = train_test_split(
        indices,
        test_size=args.val_ratio,
        stratify=targets,
        random_state=42
    )
    print(f"Train indices: {len(train_indices)}, Val indices: {len(val_indices)}")

    # Step 3: 创建独立的 train/val ImageFolder（不依赖 is_valid_file）
    train_transform = get_transform(args.model_name, is_train=True)
    val_transform = get_transform(args.model_name, is_train=False)

    # 直接使用默认 loader 和验证逻辑（无需 is_valid_file）
    train_dataset = datasets.ImageFolder(root=train_dir, transform=train_transform)
    val_dataset = datasets.ImageFolder(root=train_dir, transform=val_transform)

    # 强制同步类别映射（关键！）
    train_dataset.class_to_idx = full_dataset.class_to_idx
    val_dataset.class_to_idx = full_dataset.class_to_idx

    # 手动设置 samples 和 targets
    train_dataset.samples = [full_dataset.samples[i] for i in train_indices]
    train_dataset.targets = [full_dataset.targets[i] for i in train_indices]
    val_dataset.samples = [full_dataset.samples[i] for i in val_indices]
    val_dataset.targets = [full_dataset.targets[i] for i in val_indices]

    # Step 4: 创建 DataLoader
    class_counts = Counter(full_dataset.targets)
    class_freq = np.array([class_counts[i] for i in range(num_classes)], dtype=np.float32)

    # ✅ 为每个训练样本计算权重
    sample_weights = [1.0 / np.sqrt(class_freq[target]) for target in train_dataset.targets]
    sampler = WeightedRandomSampler(
        weights=sample_weights,
        num_samples=len(sample_weights),
        replacement=True
    )

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, 
                            sampler=sampler, num_workers=args.num_workers)    
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
                            num_workers=args.num_workers, pin_memory=True)

    # 模型 & 优化器
    model = build_model(args.model_name, num_classes).to(device)

    # 启用 LogitAdjustedLoss（长尾关键）
    cls_num_list = torch.tensor([class_counts[i] for i in range(num_classes)], dtype=torch.float32)
    criterion = LogitAdjustedLoss(cls_num_list, tau=1.0).to(device)    

    # optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=5e-2)
    # optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=5e-4)  # 增大 weight decay
    # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)

    param_groups = set_layerwise_lr(model, base_lr=args.lr * 0.1, head_lr=args.lr)
    optimizer = optim.AdamW(param_groups, lr=args.lr)  # head_lr 会覆盖

    warmup_epochs = max(1, int(0.05 * args.num_epochs))
    scheduler_warmup = LinearLR(optimizer, start_factor=0.1, total_iters=warmup_epochs)
    scheduler_cosine = CosineAnnealingLR(optimizer, T_max=args.num_epochs - warmup_epochs)
    scheduler = SequentialLR(optimizer, [scheduler_warmup, scheduler_cosine], [warmup_epochs])
    # scheduler = CosineAnnealingLR(optimizer, T_max=args.num_epochs - start_epoch)

    # 训练循环
    # Initialize training state
    start_epoch = 0
    best_val_acc = 0.0
    epochs_no_improve = 0

    print(f"Start training {args.model_name} with {args.val_ratio*100:.1f}% val split...")

    # Resume if specified
    if args.resume:
        print(f"Loading checkpoint from {args.resume}")
        checkpoint = torch.load(args.resume, map_location=device, weights_only=True)
        if checkpoint['model_name'] != args.model_name:
            raise ValueError(f"Model name mismatch!")
        if checkpoint['num_classes'] != num_classes:
            raise ValueError(f"Class count mismatch!")
        model.load_state_dict(checkpoint['model_state_dict'])
        if 'optimizer_state_dict' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        start_epoch = checkpoint.get('epoch', 0)
        best_val_acc = checkpoint.get('best_val_acc', 0.0)
        print(f"Resumed from epoch {start_epoch}, best val acc: {best_val_acc:.2f}%")

    print(f"Target acc: {args.target_acc}%, Patience: {args.early_stop_patience}, Delta: {args.early_stop_delta}%")


    for epoch in range(start_epoch, args.num_epochs):
        model.train()
        running_loss = 0.0

        # === 记录 epoch 开始时间 ===
        epoch_start_time = time.time()
        epoch_start_time_str = time.strftime("%Y-%m-%d %H:%M:%S")
        print(f"Epoch {epoch+1} started at: {epoch_start_time_str}")

        pbar = tqdm(
            train_loader, desc=f"Epoch {epoch+1}/{args.num_epochs}",
            file=open(os.devnull, 'w'),  # 丢弃 tqdm 输出
            disable=True  # 直接 disable
        )
        
        for inputs, labels in pbar:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            pbar.set_postfix(loss=running_loss/len(train_loader))

        scheduler.step()

        # 验证阶段（关键！）
        val_acc = evaluate(model, val_loader, device)
        epoch_duration = time.time() - epoch_start_time        
        print(f"Epoch {epoch+1}: Val Acc = {val_acc:.2f}%, Time: {epoch_duration:.2f}s")

        # Check target accuracy
        if val_acc >= args.target_acc:
            print(f"🎯 Target accuracy {args.target_acc}% reached at epoch {epoch+1}!")
            torch.save({
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch + 1,
                'best_val_acc': max(best_val_acc, val_acc),
                'model_name': args.model_name,
                'num_classes': num_classes,
            }, f'{args.model_dir}/{args.save_model_name}_target_reached.pth')
            break

        # Early stopping logic
        if val_acc > best_val_acc + args.early_stop_delta:
            best_val_acc = val_acc
            epochs_no_improve = 0
            torch.save({
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'epoch': epoch + 1,
                'best_val_acc': best_val_acc,
                'model_name': args.model_name,
                'num_classes': num_classes,
            }, f'{args.model_dir}/{args.save_model_name}.pth')
            print(f"🎉 New best model saved! Val Acc: {val_acc:.2f}%")
        else:
            epochs_no_improve += 1
            print(f"No significant improvement for {epochs_no_improve} epoch(s).")

        if epochs_no_improve >= args.early_stop_patience:
            print(f"🛑 Early stopping triggered after {args.early_stop_patience} epochs.")
            break

    print(f"Training finished. Best Val Acc: {best_val_acc:.2f}%")

if __name__ == '__main__':
    main()