# validate_model.py
import os
import torch
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms, models
from torchvision.models import ResNet50_Weights, ConvNeXt_Large_Weights
from sklearn.model_selection import train_test_split
import numpy as np
import argparse
from pathlib import Path

# 允许加载截断图像
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

# 忽略 PIL 警告
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="PIL")

def get_transform(model_name, is_train=False):
    """与训练脚本一致的 transform"""
    if is_train:
        raise NotImplementedError("Validation should use is_train=False")
    else:
        return transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

def build_model(model_name, num_classes, checkpoint_path, device):
    """构建模型并加载权重"""
    if model_name == 'convnext_v2_large':
        weights = ConvNeXt_Large_Weights.IMAGENET1K_V1
        model = models.convnext_large(weights=weights)
        model.classifier[2] = torch.nn.Linear(model.classifier[2].in_features, num_classes)
    elif model_name == 'resnet50':
        weights = ResNet50_Weights.IMAGENET1K_V1
        model = models.resnet50(weights=weights)
        model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
    else:
        raise ValueError(f"Unsupported model: {model_name}")

    # 加载 checkpoint
    checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=True)
    model.load_state_dict(checkpoint['model_state_dict'])
    model = model.to(device)
    model.eval()
    return model

def create_val_subset(dataset, val_ratio=0.1, random_state=42):
    """与训练脚本完全一致的分层划分逻辑"""
    targets = np.array(dataset.targets)
    indices = np.arange(len(dataset))
    _, val_indices = train_test_split(
        indices,
        test_size=val_ratio,
        stratify=targets,
        random_state=random_state
    )
    return Subset(dataset, val_indices)

class TransformDataset(torch.utils.data.Dataset):
    def __init__(self, subset, transform):
        self.subset = subset
        self.transform = transform
    def __getitem__(self, index):
        x, y = self.subset[index]
        if self.transform:
            x = self.transform(x)
        return x, y
    def __len__(self):
        return len(self.subset)

@torch.no_grad()
def evaluate(model, loader, device):
    model.eval()
    correct = 0
    total = 0
    for inputs, labels in loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
    return 100.0 * correct / total

def main():
    parser = argparse.ArgumentParser(description="Validate a trained model on a subset of training data")
    parser.add_argument('--data_dir', type=str, required=True, help='Directory containing train/')
    parser.add_argument('--checkpoint', type=str, required=True, help='Path to model .pth checkpoint')
    parser.add_argument('--model_name', type=str, choices=['resnet50', 'convnext_v2_large'], required=True)
    parser.add_argument('--val_ratio', type=float, default=0.1, help='Validation ratio (same as training)')
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
    args = parser.parse_args()

    device = torch.device(args.device)
    print(f"Using device: {device}")

    train_dir = os.path.join(args.data_dir, 'train')
    assert os.path.exists(train_dir), f"Train directory not found: {train_dir}"

    # 1. 加载完整训练集（无 transform）
    full_dataset = datasets.ImageFolder(root=train_dir, transform=None)
    num_classes = len(full_dataset.classes)
    print(f"Loaded dataset: {len(full_dataset)} images, {num_classes} classes")

    # 2. 分层划分验证子集（与训练一致）
    val_subset = create_val_subset(full_dataset, val_ratio=args.val_ratio)
    print(f"Validation subset size: {len(val_subset)}")

    # 3. 应用验证 transform
    val_dataset = TransformDataset(val_subset, get_transform(args.model_name, is_train=False))
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=True
    )

    # 4. 构建模型并加载权重
    model = build_model(args.model_name, num_classes, args.checkpoint, device)
    print(f"Model loaded from: {args.checkpoint}")

    # 5. 验证并输出准确率
    acc = evaluate(model, val_loader, device)
    print(f"Validation Accuracy on {args.val_ratio*100:.1f}% of training data: {acc:.2f}%")

if __name__ == '__main__':
    main()