"""
模型相关工具模块
包含模型创建、设备管理、模型文件操作等功能
"""
import os
import glob
from datetime import datetime


def get_device():    
    """获取最佳的计算设备"""    
    # 延迟导入torch，避免应用启动时阻塞
    import torch
    
    # 为了稳定性，当前使用CPU
    # 如需启用GPU，可以取消注释以下代码：
    
    if torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available():   
        return torch.device("mps")
    else:
        return torch.device("cpu")
    
    # return torch.device("cpu")


def get_optimal_workers():
    """根据平台和设备获取最佳的 worker 数量"""
    # 在所有情况下都使用 0 workers 避免多进程问题，特别是在 macOS 和 Flask-SocketIO 环境中
    return 0


def create_resnext_model(num_classes, device):
    """创建并初始化ResNeXt模型"""
    # 延迟导入torch相关模块
    import torch
    import torch.nn as nn
    from torchvision import models
    
    try:
        # 使用新的权重参数名称
        model = models.resnext50_32x4d(weights=models.ResNeXt50_32X4D_Weights.DEFAULT)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        model = model.to(device)
        return model
    except Exception as e:
        # 兼容旧版本PyTorch
        print(f"Warning: {e}. Falling back to legacy weight loading.")
        model = models.resnext50_32x4d(pretrained=True)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        model = model.to(device)
        return model


def get_latest_model(model_folder):
    """查找最新的模型并返回模型名称"""
    try:
        model_files = glob.glob(os.path.join(model_folder, 'trained_model_*.pth'))
        if not model_files:
            return None
        latest_file = max(model_files, key=os.path.getctime)
        return os.path.basename(latest_file)
    except Exception as e:
        print(f"Error getting latest model: {e}")
        return None


def save_model_and_labels(model, labels, model_folder):
    """保存带时间戳的模型和标签文件"""
    import torch
    import json
    
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    model_filename = f"trained_model_{timestamp}.pth"
    labels_filename = f"labels_{timestamp}.json"
    
    model_path = os.path.join(model_folder, model_filename)
    labels_path = os.path.join(model_folder, labels_filename)
    
    # 保存模型
    torch.save(model.state_dict(), model_path)
    
    # 保存标签
    with open(labels_path, 'w', encoding='utf-8') as f:
        json.dump(labels, f, ensure_ascii=False, indent=4)
    
    return model_filename


def load_model_and_labels(model_name, model_folder, device):
    """加载模型和对应的标签文件"""
    import torch
    import json
    
    model_path = os.path.join(model_folder, model_name)
    labels_filename = "labels_" + model_name.split('trained_model_')[1].replace('.pth', '.json')
    labels_path = os.path.join(model_folder, labels_filename)

    if not os.path.exists(model_path) or not os.path.exists(labels_path):
        raise FileNotFoundError(f"模型文件或标签文件不存在: {model_name}")

    # 加载标签
    with open(labels_path, 'r', encoding='utf-8') as f:
        labels_list = json.load(f)
    
    num_classes = len(labels_list)
    
    # 创建并加载模型
    model = create_resnext_model(num_classes, device)
    model.load_state_dict(torch.load(model_path, map_location=device))
    
    return model, labels_list


def cleanup_gpu_memory(device):
    """清理GPU/MPS内存"""
    import torch
    
    if device.type == 'cuda':
        torch.cuda.empty_cache()
    elif device.type == 'mps':
        # 检查是否有 mps.empty_cache 方法
        if hasattr(torch, 'mps') and hasattr(torch.mps, 'empty_cache'):
            torch.mps.empty_cache()
