import pandas as pd
from scipy.stats import spearmanr
import lightgbm as lgb
import shap
from sklearn.feature_selection import mutual_info_regression



import numpy as np
import pandas as pd
import lightgbm as lgb
import shap
from sklearn.model_selection import cross_val_score


import numpy as np
import pandas as pd
import shap
import lightgbm as lgb
from joblib import Parallel, delayed
from scipy.stats import pearsonr
from sklearn.utils import resample

# def analyze_feature_impact_optimized(model, X, y, model_type='recall', n_jobs=-1, sample_size=5000):
#     """
#     优化版特征影响分析 - 针对大数据集优化
#     :param model: 已训练模型
#     :param X: 特征数据
#     :param y: 标签数据
#     :param model_type: 'recall' 或 'precision'
#     :param n_jobs: 并行任务数
#     :param sample_size: 采样大小
#     :return: 特征影响DataFrame
#     """
#     # 1. 数据采样 (减少计算量)
#     if len(X) > sample_size:
#         sample_idx = np.random.choice(len(X), size=sample_size, replace=False)
#         X_sampled = X.iloc[sample_idx]
#         y_sampled = y.iloc[sample_idx]
#     else:
#         X_sampled = X.copy()
#         y_sampled = y.copy()
    
#     # 2. 内置特征重要性 (快速计算)
#     importance = pd.DataFrame({
#         'feature': model.feature_name(),
#         'gain': model.feature_importance(importance_type='gain'),
#         'split': model.feature_importance(importance_type='split')
#     })
    
#     # 3. 并行计算SHAP值 (使用树路径依赖近似法)
#     def calculate_shap():
#         # 使用更快的近似算法
#         explainer = shap.TreeExplainer(
#             model, 
#             feature_perturbation="tree_path_dependent"  # 避免创建背景数据集
#         )
        
#         # 聚焦关键类别（类型3）
#         if model_type == 'recall':
#             # 召回模型：关注类型3的正向贡献
#             shap_target = explainer.shap_values(X_sampled)[3]
#             return np.abs(shap_target).mean(axis=0), np.abs(shap_target).std(axis=0)
#         else:
#             # 精准模型：关注类型3的负向贡献
#             shap_target = explainer.shap_values(X_sampled)[3]
#             return np.abs(shap_target).mean(axis=0), np.abs(shap_target).std(axis=0)
    
#     shap_mean, shap_std = calculate_shap()
#     print(importance)
#     print(shap_mean)
#     importance['shap_mean'] = shap_mean
#     importance['shap_std'] = shap_std
    
#     # 4. 并行计算置换重要性
#     base_preds = model.predict(X_sampled)
#     base_score = base_preds.max(axis=1).mean()
    
#     def permutation_importance(col):
#         X_permuted = X_sampled.copy()
#         X_permuted[col] = np.random.permutation(X_permuted[col])
#         perm_preds = model.predict(X_permuted)
#         return base_score - perm_preds.max(axis=1).mean()
    
#     # 并行计算
#     permutation_scores = Parallel(n_jobs=n_jobs)(
#         delayed(permutation_importance)(col) for col in X_sampled.columns
#     )
#     importance['permutation_score'] = permutation_scores
    
#     # 5. 并行计算特征稳定性
#     pred_values = base_preds.argmax(axis=1)  # 使用类别预测代替概率
    
#     def feature_stability(col):
#         # 使用更快的pearsonr计算
#         with np.errstate(invalid='ignore'):
#             corr, _ = pearsonr(pred_values, X_sampled[col].values)
#         return -np.abs(corr) if not np.isnan(corr) else 0
    
#     stability_scores = Parallel(n_jobs=n_jobs)(
#         delayed(feature_stability)(col) for col in X_sampled.columns
#     )
#     importance['stability'] = stability_scores
    
#     return importance.sort_values('permutation_score', ascending=False)
import numpy as np
import pandas as pd
import shap
import lightgbm as lgb
from joblib import Parallel, delayed
from scipy.stats import pearsonr
from sklearn.utils import resample
import warnings

# # 忽略SHAP的警告
# warnings.filterwarnings("ignore", category=UserWarning, module="shap")

# def analyze_feature_impact_optimized(model, X, y, model_type='recall', n_jobs=-1, sample_size=5000):
#     """
#     优化版特征影响分析 - 修复维度问题并进一步优化
#     :param model: 已训练模型
#     :param X: 特征数据
#     :param y: 标签数据
#     :param model_type: 'recall' 或 'precision'
#     :param n_jobs: 并行任务数
#     :param sample_size: 采样大小
#     :return: 特征影响DataFrame
#     """
#     # 1. 数据采样 (减少计算量)
#     if len(X) > sample_size:
#         # 分层采样保持类别分布
#         sample_idx = []
#         for cls in np.unique(y):
#             cls_idx = np.where(y == cls)[0]
#             sample_size_cls = max(1, int(sample_size * len(cls_idx) / len(X)))
#             sample_idx.extend(np.random.choice(cls_idx, size=sample_size_cls, replace=False))
#         X_sampled = X.iloc[sample_idx]
#         y_sampled = y.iloc[sample_idx]
#     else:
#         X_sampled = X.copy()
#         y_sampled = y.copy()
    
#     # 2. 内置特征重要性 (快速计算)
#     importance = pd.DataFrame({
#         'feature': model.feature_name(),
#         'gain': model.feature_importance(importance_type='gain'),
#         'split': model.feature_importance(importance_type='split')
#     })
    
#     # 3. 修复SHAP值计算维度问题
#     try:
#         # 使用更快的近似算法
#         explainer = shap.TreeExplainer(
#             model, 
#             feature_perturbation="tree_path_dependent"  # 避免创建背景数据集
#         )
        
#         # 计算所有类别的SHAP值
#         shap_values = explainer.shap_values(X_sampled)
        
#         # 检查SHAP值的结构
#         if isinstance(shap_values, list):
#             # 多分类问题：SHAP值是一个列表，每个元素对应一个类别
#             if model_type == 'recall':
#                 # 召回模型：关注类型3的正向贡献
#                 shap_target = shap_values[3]  # 假设类型3是索引3
#             else:
#                 # 精准模型：关注类型3的负向贡献
#                 shap_target = shap_values[3]
            
#             # 确保shap_target是二维数组
#             if len(shap_target.shape) == 1:
#                 shap_target = shap_target.reshape(-1, 1)
            
#             importance['shap_mean'] = np.abs(shap_target).mean(axis=0)
#             importance['shap_std'] = np.abs(shap_target).std(axis=0)
#         else:
#             # 二分类问题：SHAP值是单个数组
#             importance['shap_mean'] = np.abs(shap_values).mean(axis=0)
#             importance['shap_std'] = np.abs(shap_values).std(axis=0)
#     except Exception as e:
#         print(f"SHAP计算失败: {e}")
#         # 使用内置特征重要性作为后备
#         importance['shap_mean'] = importance['gain'] / importance['gain'].max()
#         importance['shap_std'] = 0.0
    
#     # 4. 优化置换重要性计算 (并行+批处理)
#     base_preds = model.predict(X_sampled)
#     base_score = base_preds.max(axis=1).mean() if base_preds.ndim > 1 else base_preds.mean()
    
#     # 分批处理特征，减少内存使用
#     def batch_permutation_importance(cols):
#         results = {}
#         for col in cols:
#             X_permuted = X_sampled.copy()
#             X_permuted[col] = np.random.permutation(X_permuted[col].values)
#             perm_preds = model.predict(X_permuted)
#             perm_score = perm_preds.max(axis=1).mean() if perm_preds.ndim > 1 else perm_preds.mean()
#             results[col] = base_score - perm_score
#         return results
    
#     # 将特征分成批次
#     batch_size = 20  # 每批处理20个特征
#     features = list(X_sampled.columns)
#     batches = [features[i:i+batch_size] for i in range(0, len(features), batch_size)]
    
#     # 并行处理批次
#     batch_results = Parallel(n_jobs=n_jobs)(
#         delayed(batch_permutation_importance)(batch) for batch in batches
#     )
    
#     # 合并结果
#     permutation_scores = {}
#     for batch_result in batch_results:
#         permutation_scores.update(batch_result)
    
#     importance['permutation_score'] = importance['feature'].map(permutation_scores)
    
#     # 5. 优化特征稳定性计算
#     pred_values = base_preds.argmax(axis=1) if base_preds.ndim > 1 else (base_preds > 0.5).astype(int)
    
#     def feature_stability(col):
#         with np.errstate(invalid='ignore'):
#             # 使用更快的spearman相关系数
#             corr, _ = pearsonr(pred_values, X_sampled[col].values)
#         return -np.abs(corr) if not np.isnan(corr) else 0
    
#     stability_scores = Parallel(n_jobs=n_jobs)(
#         delayed(feature_stability)(col) for col in features
#     )
    
#     importance['stability'] = stability_scores
    
#     # 确保所有列都有值
#     importance.fillna(0, inplace=True)
    
#     return importance.sort_values('permutation_score', ascending=False)
import numpy as np
import pandas as pd
import shap
import lightgbm as lgb
from joblib import Parallel, delayed
from scipy.stats import pearsonr
import warnings

# 忽略SHAP的警告
warnings.filterwarnings("ignore", category=UserWarning, module="shap")

def analyze_feature_impact_optimized(model, X, y, model_type='recall', n_jobs=-1, sample_size=5000):
    """
    优化版特征影响分析 - 修复空类别问题
    :param model: 已训练模型
    :param X: 特征数据
    :param y: 标签数据
    :param model_type: 'recall' 或 'precision'
    :param n_jobs: 并行任务数
    :param sample_size: 采样大小
    :return: 特征影响DataFrame
    """
    # 1. 数据采样 (修复空类别问题)
    if len(X) > sample_size:
        # 分层采样保持类别分布
        sample_idx = []
        unique_classes, class_counts = np.unique(y, return_counts=True)
        
        for cls in unique_classes:
            cls_idx = np.where(y == cls)[0]
            cls_count = len(cls_idx)
            
            # 确保类别有样本
            if cls_count > 0:
                # 计算该类别应采样的数量
                sample_size_cls = max(1, int(sample_size * cls_count / len(X)))
                
                # 确保不超过该类别实际样本数
                sample_size_cls = min(sample_size_cls, cls_count)
                
                sample_idx.extend(
                    np.random.choice(cls_idx, size=sample_size_cls, replace=False)
                )
        
        # 如果总样本不足，随机补充
        if len(sample_idx) < sample_size:
            remaining = sample_size - len(sample_idx)
            all_idx = np.arange(len(X))
            unused_idx = np.setdiff1d(all_idx, sample_idx)
            if len(unused_idx) > 0:
                if remaining < len(unused_idx):
                    sample_idx.extend(np.random.choice(unused_idx, size=remaining, replace=False))
                else:
                    sample_idx.extend(unused_idx)
        
        X_sampled = X.iloc[sample_idx]
        y_sampled = y.iloc[sample_idx]
    else:
        X_sampled = X.copy()
        y_sampled = y.copy()
    
    print(f"采样完成: {len(X_sampled)} 个样本")
    
    # 2. 内置特征重要性 (快速计算)
    print("计算内置特征重要性...")
    importance = pd.DataFrame({
        'feature': model.feature_name(),
        'gain': model.feature_importance(importance_type='gain'),
        'split': model.feature_importance(importance_type='split')
    })
    
    # 3. SHAP值计算
    print("计算SHAP值...")
    try:
        # 使用更快的近似算法
        explainer = shap.TreeExplainer(
            model, 
            feature_perturbation="tree_path_dependent"  # 避免创建背景数据集
        )
        
        # 计算所有类别的SHAP值
        shap_values = explainer.shap_values(X_sampled)
        
        # 检查SHAP值的结构
        if isinstance(shap_values, list):
            # 多分类问题：SHAP值是一个列表，每个元素对应一个类别
            target_class = 3  # 假设类型3是索引3
            if target_class < len(shap_values):
                shap_target = shap_values[target_class]
            else:
                # 如果目标类别不存在，使用第一个类别
                shap_target = shap_values[0]
            
            # 确保shap_target是二维数组
            if len(shap_target.shape) == 1:
                shap_target = shap_target.reshape(-1, 1)
            
            importance['shap_mean'] = np.abs(shap_target).mean(axis=0)
            importance['shap_std'] = np.abs(shap_target).std(axis=0)
        else:
            # 二分类问题：SHAP值是单个数组
            importance['shap_mean'] = np.abs(shap_values).mean(axis=0)
            importance['shap_std'] = np.abs(shap_values).std(axis=0)
    except Exception as e:
        print(f"SHAP计算失败: {e}")
        # 使用内置特征重要性作为后备
        importance['shap_mean'] = importance['gain'] / importance['gain'].max()
        importance['shap_std'] = 0.0
    
    # 4. 置换重要性计算
    print("计算置换重要性...")
    base_preds = model.predict(X_sampled)
    
    # 处理多分类和二分类
    if base_preds.ndim > 1:  # 多分类
        base_score = base_preds.max(axis=1).mean()
    else:  # 二分类
        base_score = base_preds.mean()
    
    # 并行计算置换重要性
    def permutation_importance(col):
        X_permuted = X_sampled.copy()
        X_permuted[col] = np.random.permutation(X_permuted[col].values)
        perm_preds = model.predict(X_permuted)
        
        if perm_preds.ndim > 1:  # 多分类
            perm_score = perm_preds.max(axis=1).mean()
        else:  # 二分类
            perm_score = perm_preds.mean()
            
        return base_score - perm_score
    
    permutation_scores = Parallel(n_jobs=n_jobs)(
        delayed(permutation_importance)(col) for col in X_sampled.columns
    )
    
    importance['permutation_score'] = permutation_scores
    
    # 5. 特征稳定性计算
    print("计算特征稳定性...")
    if base_preds.ndim > 1:  # 多分类
        pred_values = base_preds.argmax(axis=1)
    else:  # 二分类
        pred_values = (base_preds > 0.5).astype(int)
    
    def feature_stability(col):
        try:
            with np.errstate(invalid='ignore'):
                # 使用pearson相关系数
                corr, _ = pearsonr(pred_values, X_sampled[col].values)
                return -np.abs(corr) if not np.isnan(corr) else 0
        except:
            return 0
    
    stability_scores = Parallel(n_jobs=n_jobs)(
        delayed(feature_stability)(col) for col in X_sampled.columns
    )
    
    importance['stability'] = stability_scores
    
    # 确保所有列都有值
    importance.fillna(0, inplace=True)
    
    print("分析完成!")
    return importance.sort_values('permutation_score', ascending=False)

    
# def analyze_feature_impact(model, X, y, model_type='recall'):
#     """
#     一、综合评估特征影响
#     :param model: 已训练模型
#     :param X: 特征数据
#     :param y: 标签数据
#     :param model_type: 'recall' 或 'precision'
#     :return: 特征影响DataFrame
#     """
#     # 1. 内置特征重要性
#     importance = pd.DataFrame({
#         'feature': model.feature_name(),
#         'gain': model.feature_importance(importance_type='gain'),
#         'split': model.feature_importance(importance_type='split')
#     })
    
#     # 2. SHAP值分析
#     explainer = shap.TreeExplainer(model)
#     shap_values = explainer.shap_values(X)
    
#     # 聚焦关键类别（类型3）
#     if model_type == 'recall':
#         shap_target = shap_values[3]  # 召回模型关注类型3的正向贡献
#     else:
#         shap_target = shap_values[3]  # 精准模型关注类型3的负向贡献
    
#     importance['shap_mean'] = np.abs(shap_target).mean(axis=0)
#     importance['shap_std'] = np.abs(shap_target).std(axis=0)
    
#     # 3. 特征置换重要性
#     base_score = model.predict(X).max(axis=1).mean()
#     permutation_scores = []
    
#     for col in X.columns:
#         X_permuted = X.copy()
#         X_permuted[col] = np.random.permutation(X_permuted[col])
#         perm_score = model.predict(X_permuted).max(axis=1).mean()
#         permutation_scores.append(base_score - perm_score)
    
#     importance['permutation_score'] = permutation_scores
    
#     # 4. 特征稳定性分析
#     stability_scores = []
#     for col in X.columns:
#         corr = np.corrcoef(model.predict(X), X[col])[0, 1]
#         stability_scores.append(-np.abs(corr))  # 负相关表示不稳定
    
#     importance['stability'] = stability_scores
    
#     return importance.sort_values('permutation_score', ascending=False)












def eliminate_negative_features(model, X, y, model_type, impact_df, n_rounds=3):
    """
    迭代剔除负面特征
    :return: 优化后的特征列表
    """
    best_features = set(X.columns)
    best_score = evaluate_model(model, X, y, model_type)
    
    print(f"初始性能: {best_score:.4f}")
    
    for i in range(n_rounds):
        # 识别候选剔除特征
        if model_type == 'recall':
            # 召回模型：剔除低贡献且不稳定的特征
            candidates = impact_df[
                (impact_df['permutation_score'] < np.quantile(impact_df['permutation_score'], 0.3)) &
                (impact_df['stability'] < np.quantile(impact_df['stability'], 0.3))
            ].index
        else:
            # 精准模型：剔除高波动性特征
            candidates = impact_df[
                (impact_df['shap_std'] > np.quantile(impact_df['shap_std'], 0.7)) |
                (impact_df['stability'] > np.quantile(impact_df['stability'], 0.7))
            ].index
        
        current_features = best_features - set(candidates)
        
        if not current_features:
            print("无更多特征可剔除")
            break
            
        # 评估新特征集
        X_sub = X[list(current_features)]
        new_score = evaluate_model(model, X_sub, y, model_type)
        
        print(f"轮次 {i+1}: 特征数 {len(current_features)}, 性能 {new_score:.4f}")
        
        if new_score > best_score:
            best_score = new_score
            best_features = current_features
            print(f"✅ 性能提升至 {best_score:.4f}")
        else:
            print("⛔ 性能未提升，恢复原特征集")
    
    return list(best_features)

def evaluate_model(model, X, y, model_type):
    """
    根据模型类型评估性能
    """
    preds = model.predict(X).argmax(axis=1)
    
    if model_type == 'recall':
        # 召回率模型：关注类型3的召回率
        recall_3 = np.mean((preds == 3) & (y == 3)) / np.mean(y == 3)
        return recall_3
    else:
        # 精准率模型：关注类型3的精准率
        precision_3 = np.mean((preds == 3) & (y == 3)) / np.mean(preds == 3)
        return precision_3





def optimize_model(original_model, X_train, y_train, model_type):
    """
    模型优化主函数
    :return: 优化后的模型
    """
    # 1. 分析特征影响
    print("="*50)
    print(f"开始优化 {model_type} 模型...")
    impact_df = analyze_feature_impact(original_model, X_train, y_train, model_type)
    
    # 2. 识别并剔除负面特征
    best_features = eliminate_negative_features(
        original_model, X_train, y_train, model_type, impact_df
    )
    print(f"最终精选特征({len(best_features)}个): {best_features[:5]}...")
    
    # 3. 使用精选特征重新训练
    X_train_optimized = X_train[best_features]
    
    # 根据模型类型调整训练参数
    if model_type == 'recall':
        params = {
            'objective': 'multiclass',
            'num_class': 4,
            'metric': 'multi_logloss',
            'num_leaves': 127,
            'learning_rate': 0.05,
            'feature_fraction': 0.8  # 使用更多特征保持召回率
        }
    else:
        params = {
            'objective': 'multiclass',
            'num_class': 4,
            'metric': 'multi_logloss',
            'num_leaves': 63,
            'learning_rate': 0.01,
            'feature_fraction': 0.5  # 减少特征提高精准度
        }
    
    # 添加类别权重
    class_weights = {3: 30} if model_type == 'recall' else {3: 50}
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 重新训练模型
    optimized_model = lgb.train(
        params,
        lgb.Dataset(X_train_optimized, y_train, weight=sample_weights),
        num_boost_round=1000,
        callbacks=[lgb.early_stopping(50)]
    )
    
    return optimized_model, best_features


        










''' ==================================================== 旧方法 =============================================================='''
def mutual_info_analysis(df, target_col='ret3', exclude_cols=['date', 'code', 'ret3']):
    """
    计算所有特征列与目标列的互信息值
    :param df: 输入DataFrame
    :param target_col: 目标列名
    :param exclude_cols: 需要排除的列（非特征列）
    :return: 按MI值排序的DataFrame（列：feature, mi_score）
    """
    # 提取特征列
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    
    # 过滤缺失值
    data = df[feature_cols + [target_col]].dropna()
    
    # 计算互信息
    mi_scores = mutual_info_regression(data[feature_cols], data[target_col])
    
    # 生成结果
    result = pd.DataFrame({'feature': feature_cols, 'mi_score': mi_scores}) \
                   .sort_values('mi_score', ascending=False)
    return result

# 调用示例
# mi_result = mutual_info_analysis(df)


def spearman_analysis(df, target_col='ret3', exclude_cols=['date', 'code', 'ret3']):
    """
    计算所有特征列与目标列的斯皮尔曼相关系数及p值
    :param df: 输入DataFrame
    :param target_col: 目标列名
    :param exclude_cols: 需要排除的列
    :return: DataFrame（列：feature, spearman_corr, p_value）
    """
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    results = []
    
    for col in feature_cols:
        # 过滤缺失值
        valid_data = df[[col, target_col]].dropna()
        if len(valid_data) < 2:  # 至少需要2个样本
            results.append({'feature': col, 'spearman_corr': None, 'p_value': None})
            continue
        
        # 计算斯皮尔曼
        corr, p_value = spearmanr(valid_data[col], valid_data[target_col])
        results.append({'feature': col, 'spearman_corr': corr, 'p_value': p_value})
    
    return pd.DataFrame(results).sort_values('spearman_corr', key=abs, ascending=False)

# 调用示例
# spearman_result = spearman_analysis(df)



def shap_analysis(df, target_col='ret3', exclude_cols=['date', 'code', 'ret3'], model_params=None):
    """
    基于LightGBM模型计算SHAP特征重要性
    :param df: 输入DataFrame
    :param target_col: 目标列名
    :param exclude_cols: 需要排除的列
    :param model_params: 可自定义的LightGBM参数（默认使用回归参数）
    :return: SHAP值矩阵、特征重要性DataFrame
    """
    # 特征工程
    feature_cols = [col for col in df.columns if col not in exclude_cols]
    data = df[feature_cols + [target_col]].dropna()
    X, y = data[feature_cols], data[target_col]
    
    # 默认模型参数
    if model_params is None:
        model_params = {
            'objective': 'regression',
            'n_estimators': 100,
            'random_state': 42,
            'verbosity': -1
        }
    
    # 训练模型
    model = lgb.LGBMRegressor(**model_params).fit(X, y)
    
    # 计算SHAP值
    explainer = shap.TreeExplainer(model)
    shap_values = explainer.shap_values(X)
    
    # 特征重要性（基于SHAP绝对均值）
    shap_importance = pd.DataFrame({
        'feature': feature_cols,
        'shap_importance': np.abs(shap_values).mean(axis=0)
    }).sort_values('shap_importance', ascending=False)
    
    return shap_values, shap_importance

# 调用示例
# shap_values, shap_imp = shap_analysis(df)












def spearman_correlation_analysis(
    df: pd.DataFrame,
    x_columns: list,
    y_col: str,
    nan_handling: str = 'drop',
    alpha: float = 0.05
) -> pd.DataFrame:
    """
    批量计算多个特征列与目标列的斯皮尔曼相关系数及显著性
    
    参数:
        df (DataFrame): 包含所有分析列的原始数据
        x_columns (list): 需要分析的列名列表
        y_col (str): 目标列列名
        nan_handling (str): 缺失值处理方式，可选 'drop'（删除整行）或 'fill'（填充均值）
        alpha (float): 显著性水平阈值，默认0.05
        
    返回:
        DataFrame: 各列的相关系数、p值及显著性标记
    """
    results = []
    
    for x_col in x_columns:
        # 提取分析列并处理缺失值
        temp_df = df[[x_col, y_col]].copy()
        
        if nan_handling == 'drop':
            temp_df = temp_df.dropna()
        elif nan_handling == 'fill':
            temp_df[x_col] = temp_df[x_col].fillna(temp_df[x_col].mean())
            temp_df[y_col] = temp_df[y_col].fillna(temp_df[y_col].mean())
        
        # 确保有效数据量
        if len(temp_df) < 2:
            continue
            
        # 计算斯皮尔曼系数
        corr, p_value = spearmanr(temp_df[x_col], temp_df[y_col])
        corr = round(corr,3)
        p_value = round(p_value,3)
        # print(x_col,p_value,corr)
        # 显著性判断
        significance = '显著' if p_value < alpha else '不显著'
        
        results.append({
            '特征列': x_col,
            '斯皮尔曼系数': round(corr, 4),
            'p值': round(p_value, 6),
            '显著性': significance
        })
    
    return pd.DataFrame(results)
