import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.feature_selection import mutual_info_classif
from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder
from sklearn.impute import SimpleImputer
import warnings
warnings.filterwarnings('ignore')


def get_importance(model_path):
    def safe_load_model(model_path):
        """安全加载LightGBM模型"""
        try:
            return lgb.Booster(model_file=model_path)
        except lgb.LightGBMError as e:
            print(f"加载模型失败: {e}")
            # 尝试替代加载方式
            try:
                with open(model_path, 'r') as f:
                    model_str = f.read()
                return lgb.Booster(model_str=model_str)
            except Exception as e2:
                print(f"替代加载方式失败: {e2}")
                return None

    # 使用安全加载方法
    model_recall = safe_load_model(model_path)
    # 获取特征重要性（split或gain）
    importance_split = model_recall.feature_importance(importance_type='split')
    importance_gain = model_recall.feature_importance(importance_type='gain')
    feature_names = model_recall.feature_name()
    importance_gain = np.round(importance_gain,2)
    
    # 打印重要性排序
    sorted_idx = importance_gain.argsort()[::-1]  # 按gain降序排序
    for idx in sorted_idx:
        print(f"{feature_names[idx]}: {importance_gain[idx]}")

def mutual_info_analysis(df, target_col='label', n_bins=15, sample_size=100000, top_n=140):
    """
    优化版互信息分析 - 处理缺失值并提高效率
    :param df: 输入DataFrame
    :param target_col: 目标列名
    :param n_bins: 离散化分箱数
    :param sample_size: 采样大小
    :param top_n: 返回最重要的特征数
    :return: 互信息结果DataFrame
    """
    # 1. 准备数据
    features = df.drop(columns=['label', 'date','code','ret3'])  # 日期列需排除

    X = df.drop(columns=[target_col])
    X=  X.drop(columns=[ 'date','code','ret3'])  # 日期列需排除

    y = df[target_col]
    
    # 2. 处理缺失值 - 数值特征用中位数填充
    num_cols = X.select_dtypes(include=['float64']).columns
    if not num_cols.empty:
        num_imputer = SimpleImputer(strategy='median')
        X[num_cols] = num_imputer.fit_transform(X[num_cols])
    
    # 3. 采样数据
    if len(X) > sample_size:
        # 分层采样保持类别分布
        unique_classes, class_counts = np.unique(y, return_counts=True)
        sample_idx = []
        
        for cls in unique_classes:
            cls_idx = np.where(y == cls)[0]
            if len(cls_idx) > 0:
                sample_size_cls = max(1, int(sample_size * len(cls_idx) / len(X)))
                sample_size_cls = min(sample_size_cls, len(cls_idx))
                sample_idx.extend(
                    np.random.choice(cls_idx, size=sample_size_cls, replace=False)
                )
        
        # 确保达到目标样本量
        if len(sample_idx) < sample_size:
            remaining = sample_size - len(sample_idx)
            all_idx = np.arange(len(X))
            unused_idx = np.setdiff1d(all_idx, sample_idx)
            if len(unused_idx) > 0:
                if remaining < len(unused_idx):
                    sample_idx.extend(np.random.choice(unused_idx, size=remaining, replace=False))
                else:
                    sample_idx.extend(unused_idx)
        
        X_sampled = X.iloc[sample_idx]
        y_sampled = y.iloc[sample_idx]
    else:
        X_sampled = X.copy()
        y_sampled = y.copy()
    
    print(f"使用 {len(X_sampled)} 个样本进行分析 ({len(X_sampled)/len(X):.1%} 总数据)")
    
    # 4. 离散化连续特征
    cont_features = X_sampled.select_dtypes(include=['float64']).columns
    if not cont_features.empty:
        discretizer = KBinsDiscretizer(n_bins=n_bins, encode='ordinal', strategy='quantile')
        X_sampled[cont_features] = discretizer.fit_transform(X_sampled[cont_features])
    
    # 5. 编码分类特征
    cat_features = X_sampled.select_dtypes(include=['object', 'category']).columns
    if not cat_features.empty:
        for col in cat_features:
            le = LabelEncoder()
            # 处理分类特征中的缺失值（如果有）
            if X_sampled[col].isnull().any():
                X_sampled[col] = X_sampled[col].fillna('Missing')
            X_sampled[col] = le.fit_transform(X_sampled[col])
    
    # 6. 计算互信息
    mi_scores = mutual_info_classif(X_sampled, y_sampled, random_state=42)
    
    # 7. 创建结果DataFrame
    mi_results = pd.DataFrame({
        'feature': X_sampled.columns,
        'mi_score': mi_scores
    }).sort_values('mi_score', ascending=False)
    
    # 8. 返回最重要的特征
    return mi_results.head(top_n)



def analyze_nan_columns(df, plot=True):
    """
    分析 DataFrame 中各列的 NaN 情况
    
    参数:
    df -- 输入的 DataFrame
    plot -- 是否生成可视化图表 (默认为 True)
    
    返回:
    包含各列 NaN 统计信息的 DataFrame
    """
    # 1. 计算各列的 NaN 数量
    nan_counts = df.isna().sum()
    
    # 2. 计算各列的 NaN 占比
    nan_percentages = (nan_counts / len(df)) * 100
    
    # 3. 创建统计 DataFrame
    nan_stats = pd.DataFrame({
        'column': df.columns,
        'nan_count': nan_counts.values,
        'nan_percentage': nan_percentages.values,
        'dtype': df.dtypes.values
    })
    
    # 4. 排序并添加排名
    nan_stats = nan_stats.sort_values('nan_count', ascending=False)
    nan_stats['rank'] = range(1, len(nan_stats) + 1)
    
    # 5. 打印摘要信息
    total_nan = nan_counts.sum()
    total_percentage = (total_nan / (len(df) * len(df.columns))) * 100
    nan_stats = nan_stats.round(2)

    print(f"总 NaN 值数量: {total_nan}")
    print(f"总 NaN 占比: {total_percentage:.2f}%")
    print(f"包含 NaN 的列数: {len(nan_counts[nan_counts > 0])}/{len(df.columns)}")
    
    # 6. 打印详细统计
    print("\n各列 NaN 统计:")
    print(nan_stats[['rank', 'column', 'dtype', 'nan_count', 'nan_percentage']].to_string(index=False))
    
