import sys
from config.ssq_config import logger
import joblib
from typing import List, Dict, Tuple
import multiprocessing

# 框架/依赖导入(兼容未安装场景)
try:
    import torch
except ImportError:
    torch = None

try:
    import pynvml
except ImportError:
    pynvml = None

try:
    import lightgbm as lgb
except ImportError:
    lgb = None

try:
    import xgboost as xgb
except ImportError:
    xgb = None

try:
    import sklearn
except ImportError:
    sklearn = None

try:
    import joblib
except ImportError:
    joblib = None

try:
    import optuna
except ImportError:
    optuna = None

def init_torch_device():
    # 检查PyTorch是否可用及设备配置
    try:
        if torch.cuda.is_available():
            device = torch.device("cuda")
            logger.info(f"PyTorch使用GPU加速: {torch.cuda.get_device_name(0)}")
        else:
            device = torch.device("cpu")
            logger.info("PyTorch使用CPU计算")
        return device
    except ImportError:
        logger.warning("未安装PyTorch, LSTM模型功能不可用！请执行 `pip install torch` 安装")
        return torch.device("cpu")
    
def check_environment_with_gpu_detail() -> bool:
    """
    整合版环境检查函数: GPU详细信息 + 依赖组件版本
    保留所有原功能, 最终以表格形式打印结果
    """
    logger.info("=" * 80)
    logger.info("                      运行环境综合检查(GPU+依赖组件)")
    logger.info("=" * 80)

    # -------------------------- 1. GPU详细信息获取(保留原get_gpu_device_info所有功能)--------------------------
    gpu_detail_list: List[Dict] = []
    if torch and torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        # 初始化pynvml(nvidia-ml-py)
        nvml_available = False
        try:
            if pynvml:
                pynvml.nvmlInit()
                nvml_available = True
                logger.info("提示: 成功导入 pynvml(nvidia-ml-py), 将使用其查询精准显存信息")
            else:
                logger.warning("提示: 未安装 nvidia-ml-py(pynvml), 请执行 'pip install nvidia-ml-py3' 安装以获取精准显存")
        except Exception as e:
            logger.error(f"提示: pynvml 初始化失败: {str(e)[:50]}, 将降级使用PyTorch内置函数查询显存")

        # 遍历每个GPU获取详细信息
        for gpu_idx in range(gpu_count):
            gpu_prop = torch.cuda.get_device_properties(gpu_idx)
            base_info = {
                "设备索引": gpu_idx,
                "GPU型号": gpu_prop.name,
                "总显存(GB)": round(gpu_prop.total_memory / 1024**3, 2),
                "空闲显存(GB)": "未知",
                "已用显存(GB)": "未知",
                "计算能力": f"{gpu_prop.major}.{gpu_prop.minor}",
                "当前使用": "是" if gpu_idx == torch.cuda.current_device() else "否"
            }

            # 优先使用pynvml查询精准显存
            if nvml_available:
                try:
                    handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_idx)
                    mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                    base_info["空闲显存(GB)"] = round(mem_info.free / 1024**3, 2)
                    base_info["已用显存(GB)"] = round(mem_info.used / 1024**3, 2)
                except Exception as e:
                    logger.warning(f"警告: GPU {gpu_idx} 显存查询失败(pynvml异常): {str(e)[:50]}")
            else:
                # 降级方案: PyTorch内置函数
                try:
                    free_mem = torch.cuda.memory_free(gpu_idx) / 1024**3
                    used_mem = torch.cuda.memory_allocated(gpu_idx) / 1024**3
                    base_info["空闲显存(GB)"] = round(free_mem, 2)
                    base_info["已用显存(GB)"] = round(used_mem, 2)
                    logger.info(f"提示: GPU {gpu_idx} 降级使用PyTorch内置函数查询显存")
                except Exception as e:
                    logger.warning(f"警告: GPU {gpu_idx} 显存查询失败(PyTorch降级方案): {str(e)[:50]}")

            gpu_detail_list.append(base_info)

        # 关闭pynvml资源
        if nvml_available:
            try:
                pynvml.nvmlShutdown()
            except Exception as e:
                logger.error(f"警告: 关闭pynvml资源失败: {str(e)[:50]}")
    else:
        gpu_detail_list.append({
            "设备索引": "-",
            "GPU型号": "-",
            "总显存(GB)": "-",
            "空闲显存(GB)": "-",
            "已用显存(GB)": "-",
            "计算能力": "-",
            "当前使用": "-"
        })
        logger.warning("警告: 未检测到可用PyTorch GPU环境, 将使用CPU训练(可能较慢)")

    # -------------------------- 2. 依赖组件版本获取(保留原check_environment所有功能)--------------------------
    version_info = {
        "Python": sys.version.split()[0],
        "joblib": joblib.__version__ if joblib else "未安装",
        "optuna": optuna.__version__ if optuna else "未安装",
        "PyTorch": torch.__version__ if torch else "未安装",
        "LightGBM": lgb.__version__ if lgb else "未安装",
        "XGBoost": xgb.__version__ if xgb else "未安装",
        "XGBoost-CUDA支持": xgb.build_info().get("USE_CUDA", False) if xgb else "未安装",
        "scikit-learn": sklearn.__version__ if sklearn else "未安装"
    }

    # -------------------------- 3. 表格形式打印结果 --------------------------
    # 3.1 GPU详细信息表
    logger.info("[GPU详细信息]")
    logger.info("-" * 100)
    # 表头
    gpu_headers = ["设备索引", "GPU型号", "总显存(GB)", "空闲显存(GB)", "已用显存(GB)", "计算能力", "当前使用"]
    # 格式化表头
    header_format = "{:<8} {:<30} {:<12} {:<12} {:<12} {:<10} {:<6}"
    logger.info(header_format.format(*gpu_headers))
    logger.info("-" * 100)
    # 填充数据
    for gpu_info in gpu_detail_list:
        logger.info(header_format.format(
            gpu_info["设备索引"],
            gpu_info["GPU型号"][:28] + "..." if len(gpu_info["GPU型号"]) > 30 else gpu_info["GPU型号"],
            gpu_info["总显存(GB)"],
            gpu_info["空闲显存(GB)"],
            gpu_info["已用显存(GB)"],
            gpu_info["计算能力"],
            gpu_info["当前使用"]
        ))

    # 3.2 依赖组件版本表
    logger.info("[依赖组件版本信息]")
    logger.info("-" * 80)
    version_headers = ["组件名称", "版本/状态"]
    version_format = "{:<20} {:<50}"
    logger.info(version_format.format(*version_headers))
    logger.info("-" * 80)
    for name, value in version_info.items():
        # 处理XGBoost-CUDA支持的布尔值显示
        display_value = "是" if value is True else "否" if value is False else str(value)
        logger.info(version_format.format(name, display_value[:48] + "..." if len(str(display_value)) > 50 else display_value))

    # -------------------------- 4. 检查结果总结 --------------------------
    logger.info("=" * 80)
    try:
        # 核心组件检查(Python、PyTorch、sklearn必须存在)
        required_components = [
            ("Python", sys.version.split()[0] is not None),
            ("PyTorch", torch is not None),
            ("scikit-learn", sklearn is not None)
        ]
        missing_required = [name for name, exists in required_components if not exists]
        
        if missing_required:
            logger.warning(f"环境检查失败: 缺少核心依赖组件: {', '.join(missing_required)}")
            return False
        else:
            logger.info("环境检查通过: 所有核心依赖组件已安装, GPU信息正常获取")
            return True
    except Exception as e:
        logger.critical(f"环境检查异常: {str(e)}")
        return False

def check_gpu_and_get_process_num(
    gpu_process_ratio: int = 2,
    max_process_limit: int = 16,
    min_process_num: int = 1
) -> Tuple[bool, int]:
    """
    公共GPU检测+多进程训练进程数建议
    支持XGBoost、LightGBM、PyTorch框架, 兼容框架未安装场景
    
    Args:
        gpu_process_ratio: 单GPU建议进程数(默认2, 16G显存可设3-4, 8G显存设1-2)
        max_process_limit: 最大进程数限制(默认16, 避免进程过多导致资源竞争)
        min_process_num: 最小进程数(默认1, 确保至少有一个进程运行)
    
    Returns:
        has_gpu: 是否存在可用GPU(bool)
        suggested_process_num: 建议的多进程数(int)
    """
    # 初始化GPU检测结果
    has_gpu = False
    gpu_count = 0

    # -------------------------- 1. 多框架GPU联合检测 --------------------------
    # PyTorch GPU检测(优先级最高, 最通用)
    if torch is not None and torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        has_gpu = gpu_count > 0
        logger.info(f"PyTorch检测到GPU: 数量={gpu_count}, 设备列表={[torch.cuda.get_device_name(i) for i in range(gpu_count)]}")
    # LightGBM GPU检测(补充检测)
    elif lgb is not None:
        try:
            devices = lgb.available_devices()
            gpu_count = devices.count("gpu")
            has_gpu = gpu_count > 0
            logger.info(f"LightGBM检测到GPU数量: {gpu_count}")
        except Exception as e:
            logger.error(f"LightGBM GPU检测失败: {str(e)}")
    # XGBoost GPU检测(补充检测)
    elif xgb is not None:
        try:
            build_info = xgb.build_info()
            xgb_cuda_available = build_info.get("USE_CUDA", False)
            if xgb_cuda_available:
                # XGBoost不直接提供GPU数量, 复用PyTorch检测结果(若未安装则设为1)
                gpu_count = 1 if not torch else torch.cuda.device_count()
                has_gpu = True
                logger.info(f"XGBoost编译支持CUDA, 预估GPU数量: {gpu_count}")
            else:
                logger.error("XGBoost编译时未启用CUDA")
        except Exception as e:
            logger.error(f"XGBoost GPU检测失败: {str(e)}")
    else:
        logger.error("未检测到XGBoost/LightGBM/PyTorch, 无法进行GPU兼容性检测")

    # -------------------------- 2. 进程数计算逻辑 --------------------------
    if has_gpu:
        # 有GPU：按GPU数量×比例计算, 同时限制最大进程数
        suggested_process_num = gpu_count * gpu_process_ratio
        suggested_process_num = min(suggested_process_num, max_process_limit)
        logger.info(f"GPU模式 - 建议进程数: {suggested_process_num} (GPU数量={gpu_count} × 单GPU进程比={gpu_process_ratio})")
    else:
        # 无GPU：使用CPU核心数的一半, 确保不低于最小进程数
        cpu_core_num = multiprocessing.cpu_count()
        suggested_process_num = max(cpu_core_num // 2, min_process_num)
        suggested_process_num = min(suggested_process_num, max_process_limit)
        logger.info(f"CPU模式 - 建议进程数: {suggested_process_num} (CPU核心数={cpu_core_num} ÷ 2)")

    # 最终确保进程数在合理范围
    suggested_process_num = max(min(suggested_process_num, max_process_limit), min_process_num)
    logger.info(f"最终建议多进程训练进程数: {suggested_process_num}")
    
    return has_gpu, suggested_process_num
