import torch
import argparse
import os
from torch.utils.data import DataLoader
from tqdm import tqdm

# Import necessary modules
from dataset.dataset_dataloader import create_dataloader
from model.loss import *

# Function to dynamically import a class from a string name
def get_class_by_name(module_name, class_name):
    module = __import__(module_name, fromlist=[class_name])
    return getattr(module, class_name)

# Function to dynamically import a model class from a string name
def get_model_class_by_name(module_base, class_name):
    module_name = f'{module_base}.{class_name}'
    module = __import__(module_name, fromlist=[class_name])
    return getattr(module, class_name)

def parse_args():
    parser = argparse.ArgumentParser(description='Configuration for model prediction')
    
    # Data settings
    parser.add_argument('--data_dir', type=str, default=r'..\split_data\webfg400_train', 
                        help='Directory for split data')
    
    # Model settings
    parser.add_argument('--model_name', type=str, default='SwinTransformer',
                        help='选择模型的类名，例如：resnet101')
    parser.add_argument('--num_classes', type=int, default=400, 
                        help='Number of classes')
    parser.add_argument('--weight_path', type=str, default=r'D:\competition\2026quanqiu\code\weights\best_accuracy_epoch144_acc43.01.pth',
                        help='Path to the trained model weights (.pth file)')
    
    # Prediction settings
    parser.add_argument('--batch_size', type=int, default=128,
                        help='Batch size for prediction')
    parser.add_argument('--augmentation_config', type=str, default='augmentation_config.yml', 
                        help='路径到数据增广配置的YAML文件')
    
    # Other settings
    parser.add_argument('--random_seed', type=int, default=42, 
                        help='Random seed for reproducibility')
    
    return parser.parse_args()

def load_model(args, device):
    """Load the trained model"""
    # Select model
    model_class = get_model_class_by_name('model.backbone', args.model_name)
    model = model_class(num_classes=args.num_classes)
    
    # Load trained weights
    try:
        state_dict = torch.load(args.weight_path, map_location=device)
        model.load_state_dict(state_dict, strict=True)
        print(f"✓ Successfully loaded model weights from {args.weight_path}")
    except Exception as e:
        print(f"✗ Error loading model weights from {args.weight_path}: {str(e)}")
        raise e
    
    # Move model to device and set to evaluation mode
    model.to(device)
    model.eval()
    
    return model

def predict_validation_set(args):
    """Predict on validation set and calculate accuracy"""
    # Set device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')
    
    # Set random seed
    torch.manual_seed(args.random_seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.random_seed)
    
    # Load model
    print(f"Loading model: {args.model_name}")
    model = load_model(args, device)
    
    # Create data loaders (only validation set)
    print("Creating data loaders...")
    train_loader, val_loader = create_dataloader(
        args,
        batch_size=args.batch_size,
        augmentation_config_path=args.augmentation_config
    )
    
    # Prediction on validation set
    print("Starting prediction on validation set...")
    model.eval()
    correct = 0
    total = 0
    total_loss = 0.0
    
    # Create loss function for loss calculation
    criterion_class = get_class_by_name('model.loss', 'CrossEntropyLoss')
    criterion = criterion_class()
    
    with torch.no_grad():
        for inputs, labels in tqdm(val_loader, desc='Validation Prediction'):
            inputs, labels = inputs.to(device), labels.to(device)
            
            # Forward pass
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            # Calculate metrics
            total_loss += loss.item()
            total += labels.size(0)
            correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
    
    # Calculate final metrics
    accuracy = 100 * correct / total
    avg_loss = total_loss / len(val_loader)
    
    # Print results
    print("\n" + "=" * 60)
    print("PREDICTION RESULTS")
    print("=" * 60)
    print(f"Model: {args.model_name}")
    print(f"Weight Path: {args.weight_path}")
    print(f"Dataset: {args.data_dir}")
    print(f"Number of Classes: {args.num_classes}")
    print(f"Validation Set Size: {total}")
    print(f"Validation Accuracy: {accuracy:.2f}%")
    print(f"Validation Loss: {avg_loss:.4f}")
    print("=" * 60)
    
    return accuracy, avg_loss

if __name__ == "__main__":
    args = parse_args()
    
    print("Prediction Configuration:")
    print("-" * 40)
    for arg in vars(args):
        print(f"{arg}: {getattr(args, arg)}")
    print("-" * 40)
    
    try:
        accuracy, loss = predict_validation_set(args)
        print(f"\n🎯 Final Results:")
        print(f"   Validation Accuracy: {accuracy:.2f}%")
        print(f"   Validation Loss: {loss:.4f}")
    except Exception as e:
        print(f"❌ Prediction failed: {str(e)}")