import psutil
import platform
import json
import os

class ModelSelector:
    def __init__(self):
        self.system_info = self.get_system_info()
        
    def get_system_info(self):
        """获取系统配置信息"""
        info = {
            "cpu_cores": psutil.cpu_count(logical=False),  # 物理CPU核心数
            "total_memory": round(psutil.virtual_memory().total / (1024**3)),  # 总内存(GB)
            "system": platform.system(),
            "cpu_model": platform.processor()
        }
        
        # 尝试获取GPU信息
        try:
            import torch
            info["gpu_available"] = torch.cuda.is_available()
            if info["gpu_available"]:
                info["gpu_name"] = torch.cuda.get_device_name(0)
                info["gpu_memory"] = torch.cuda.get_device_properties(0).total_memory / (1024**3)  # GB
        except ImportError:
            info["gpu_available"] = False
            
        return info
    
    def recommend_model(self):
        """根据系统配置推荐模型和参数"""
        memory = self.system_info["total_memory"]
        cores = self.system_info["cpu_cores"]
        
        # 根据内存大小推荐模型
        if memory >= 64:
            model = {
                "name": "mixtral:8x7b",
                "quantization": "Q4_K_M",
                "description": "Mixtral 8x7B - 高性能多专家模型"
            }
        elif memory >= 32:
            model = {
                "name": "codellama:13b",
                "quantization": "Q4_K_M",
                "description": "CodeLlama 13B - 适合编程任务的大型模型"
            }
        elif memory >= 16:
            model = {
                "name": "codellama:7b",
                "quantization": "Q4_0",
                "description": "CodeLlama 7B - 平衡性能与资源占用"
            }
        else:
            model = {
                "name": "llama2:7b",
                "quantization": "Q4_0",
                "description": "Llama2 7B - 基础模型，适合资源受限环境"
            }
        
        # 推荐配置
        config = {
            "model": model,
            "threads": min(cores, 8),  # 建议线程数
            "context_length": 4096,    # 默认上下文长度
            "system_info": self.system_info
        }
        
        return config
