| | import pandas as pd |
| | import numpy as np |
| | from scipy.stats import pearsonr |
| | import warnings |
| | from concurrent.futures import ThreadPoolExecutor, as_completed |
| | import time |
| | warnings.filterwarnings('ignore') |
| |
|
| | |
| | class Config: |
| | |
| | TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet" |
| | TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet" |
| | |
| | |
| | AGGREGATED_TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet" |
| | AGGREGATED_TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet" |
| | |
| | LABEL_COLUMN = "label" |
| | |
| | |
| | MAX_WORKERS = 4 |
| | USE_AGGREGATED_DATA = True |
| | |
| | |
| | OUTPUT_DIR = "./ic_analysis_results" |
| | SAVE_DETAILED_RESULTS = True |
| |
|
| | def fast_ic_calculation(df, features, label_col, max_workers=4): |
| | """ |
| | 快速计算特征IC值,支持并行计算 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | label_col : str |
| | 标签列名 |
| | max_workers : int |
| | 并行计算的工作线程数 |
| | |
| | Returns: |
| | -------- |
| | ic_values : pd.Series |
| | 特征IC值 |
| | """ |
| | |
| | print(f"开始计算特征IC值 (特征数量: {len(features)})") |
| | start_time = time.time() |
| | |
| | def calculate_ic(feature): |
| | """计算单个特征的IC值""" |
| | try: |
| | ic, p_value = pearsonr(df[feature], df[label_col]) |
| | return feature, ic, p_value |
| | except Exception as e: |
| | print(f"计算特征 {feature} 的IC值时出错: {e}") |
| | return feature, 0.0, 1.0 |
| | |
| | |
| | ic_dict = {} |
| | p_value_dict = {} |
| | |
| | with ThreadPoolExecutor(max_workers=max_workers) as executor: |
| | future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features} |
| | |
| | completed = 0 |
| | for future in as_completed(future_to_feature): |
| | feature, ic, p_value = future.result() |
| | ic_dict[feature] = ic |
| | p_value_dict[feature] = p_value |
| | completed += 1 |
| | |
| | if completed % 50 == 0: |
| | print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)") |
| | |
| | ic_values = pd.Series(ic_dict) |
| | p_values = pd.Series(p_value_dict) |
| | |
| | print(f"IC值计算耗时: {time.time() - start_time:.2f}秒") |
| | |
| | return ic_values, p_values |
| |
|
| | def calculate_feature_statistics(df, features, label_col): |
| | """ |
| | 计算特征的统计信息 |
| | |
| | Parameters: |
| | ----------- |
| | df : pd.DataFrame |
| | 数据框 |
| | features : list |
| | 特征列表 |
| | label_col : str |
| | 标签列名 |
| | |
| | Returns: |
| | -------- |
| | stats_df : pd.DataFrame |
| | 特征统计信息 |
| | """ |
| | |
| | print("计算特征统计信息...") |
| | stats_data = [] |
| | |
| | for feature in features: |
| | try: |
| | feature_data = df[feature] |
| | label_data = df[label_col] |
| | |
| | |
| | mean_val = feature_data.mean() |
| | std_val = feature_data.std() |
| | min_val = feature_data.min() |
| | max_val = feature_data.max() |
| | |
| | |
| | missing_count = feature_data.isna().sum() |
| | missing_ratio = missing_count / len(feature_data) |
| | |
| | |
| | zero_count = (feature_data == 0).sum() |
| | zero_ratio = zero_count / len(feature_data) |
| | |
| | |
| | outlier_count = ((feature_data - mean_val).abs() > 3 * std_val).sum() |
| | outlier_ratio = outlier_count / len(feature_data) |
| | |
| | stats_data.append({ |
| | 'feature': feature, |
| | 'mean': mean_val, |
| | 'std': std_val, |
| | 'min': min_val, |
| | 'max': max_val, |
| | 'missing_count': missing_count, |
| | 'missing_ratio': missing_ratio, |
| | 'zero_count': zero_count, |
| | 'zero_ratio': zero_ratio, |
| | 'outlier_count': outlier_count, |
| | 'outlier_ratio': outlier_ratio |
| | }) |
| | |
| | except Exception as e: |
| | print(f"计算特征 {feature} 统计信息时出错: {e}") |
| | stats_data.append({ |
| | 'feature': feature, |
| | 'mean': np.nan, |
| | 'std': np.nan, |
| | 'min': np.nan, |
| | 'max': np.nan, |
| | 'missing_count': np.nan, |
| | 'missing_ratio': np.nan, |
| | 'zero_count': np.nan, |
| | 'zero_ratio': np.nan, |
| | 'outlier_count': np.nan, |
| | 'outlier_ratio': np.nan |
| | }) |
| | |
| | return pd.DataFrame(stats_data) |
| |
|
| | def create_ic_analysis_report(ic_values, p_values, stats_df, output_dir): |
| | """ |
| | 创建IC分析报告 |
| | |
| | Parameters: |
| | ----------- |
| | ic_values : pd.Series |
| | IC值 |
| | p_values : pd.Series |
| | P值 |
| | stats_df : pd.DataFrame |
| | 统计信息 |
| | output_dir : str |
| | 输出目录 |
| | """ |
| | |
| | print("创建IC分析报告...") |
| | |
| | |
| | import os |
| | os.makedirs(output_dir, exist_ok=True) |
| | |
| | |
| | report_df = pd.DataFrame({ |
| | 'feature': ic_values.index, |
| | 'ic_value': ic_values.values, |
| | 'ic_abs': ic_values.abs().values, |
| | 'p_value': p_values.values, |
| | 'is_significant': p_values < 0.05 |
| | }) |
| | |
| | |
| | report_df = report_df.merge(stats_df, on='feature', how='left') |
| | |
| | |
| | report_df = report_df.sort_values('ic_abs', ascending=False) |
| | |
| | |
| | report_df['ic_rank'] = report_df['ic_abs'].rank(ascending=False, method='min') |
| | |
| | |
| | if Config.SAVE_DETAILED_RESULTS: |
| | detailed_path = os.path.join(output_dir, 'detailed_ic_analysis.csv') |
| | report_df.to_csv(detailed_path, index=False) |
| | print(f"详细IC分析报告已保存: {detailed_path}") |
| | |
| | |
| | simple_df = report_df[['feature', 'ic_value', 'ic_abs', 'ic_rank', 'p_value', 'is_significant']].copy() |
| | simple_path = os.path.join(output_dir, 'ic_analysis_summary.csv') |
| | simple_df.to_csv(simple_path, index=False) |
| | print(f"IC分析摘要已保存: {simple_path}") |
| | |
| | |
| | stats_path = os.path.join(output_dir, 'feature_statistics.csv') |
| | stats_df.to_csv(stats_path, index=False) |
| | print(f"特征统计信息已保存: {stats_path}") |
| | |
| | |
| | print("\n" + "="*60) |
| | print("IC分析摘要") |
| | print("="*60) |
| | print(f"总特征数量: {len(ic_values)}") |
| | print(f"平均IC值: {ic_values.mean():.4f}") |
| | print(f"IC值标准差: {ic_values.std():.4f}") |
| | print(f"最大IC值: {ic_values.max():.4f}") |
| | print(f"最小IC值: {ic_values.min():.4f}") |
| | print(f"显著特征数量 (p < 0.05): {(p_values < 0.05).sum()}") |
| | print(f"正IC值特征数量: {(ic_values > 0).sum()}") |
| | print(f"负IC值特征数量: {(ic_values < 0).sum()}") |
| | |
| | print(f"\nTop 10 最高IC值特征:") |
| | top_10 = report_df.head(10) |
| | for _, row in top_10.iterrows(): |
| | significance = "***" if row['is_significant'] else "" |
| | print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}") |
| | |
| | print(f"\nBottom 10 最低IC值特征:") |
| | bottom_10 = report_df.tail(10) |
| | for _, row in bottom_10.iterrows(): |
| | significance = "***" if row['is_significant'] else "" |
| | print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}") |
| | |
| | return report_df |
| |
|
| | def main(): |
| | """主函数""" |
| | print("="*60) |
| | print("开始IC值分析") |
| | print("="*60) |
| | |
| | |
| | print("\n1. 加载数据...") |
| | if Config.USE_AGGREGATED_DATA: |
| | try: |
| | train_df = pd.read_parquet(Config.AGGREGATED_TRAIN_PATH) |
| | print(f"使用聚合后的训练数据: {train_df.shape}") |
| | except FileNotFoundError: |
| | print("聚合数据文件不存在,使用原始数据...") |
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | print(f"使用原始训练数据: {train_df.shape}") |
| | else: |
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | print(f"使用原始训练数据: {train_df.shape}") |
| | |
| | |
| | print("\n2. 获取特征列表...") |
| | features = [col for col in train_df.columns if col != Config.LABEL_COLUMN] |
| | print(f"特征数量: {len(features)}") |
| | |
| | |
| | print("\n3. 数据预处理...") |
| | |
| | for col in features + [Config.LABEL_COLUMN]: |
| | if train_df[col].isna().any(): |
| | median_val = train_df[col].median() |
| | train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0) |
| | |
| | |
| | train_df = train_df.replace([np.inf, -np.inf], np.nan) |
| | for col in features + [Config.LABEL_COLUMN]: |
| | if train_df[col].isna().any(): |
| | median_val = train_df[col].median() |
| | train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0) |
| | |
| | print(f"预处理后数据形状: {train_df.shape}") |
| | |
| | |
| | print("\n4. 计算IC值...") |
| | ic_values, p_values = fast_ic_calculation(train_df, features, Config.LABEL_COLUMN, Config.MAX_WORKERS) |
| | |
| | |
| | print("\n5. 计算特征统计信息...") |
| | stats_df = calculate_feature_statistics(train_df, features, Config.LABEL_COLUMN) |
| | |
| | |
| | print("\n6. 创建分析报告...") |
| | report_df = create_ic_analysis_report(ic_values, p_values, stats_df, Config.OUTPUT_DIR) |
| | |
| | |
| | print("\n7. 保存原始IC值...") |
| | ic_df = pd.DataFrame({ |
| | 'feature': ic_values.index, |
| | 'ic_value': ic_values.values, |
| | 'p_value': p_values.values |
| | }) |
| | ic_path = f"{Config.OUTPUT_DIR}/ic_values.csv" |
| | ic_df.to_csv(ic_path, index=False) |
| | print(f"IC值已保存: {ic_path}") |
| | |
| | print("\n" + "="*60) |
| | print("IC值分析完成!") |
| | print("="*60) |
| | print(f"所有结果已保存到目录: {Config.OUTPUT_DIR}") |
| | print("生成的文件:") |
| | print("- ic_values.csv: 原始IC值") |
| | print("- ic_analysis_summary.csv: IC分析摘要") |
| | print("- detailed_ic_analysis.csv: 详细IC分析报告") |
| | print("- feature_statistics.csv: 特征统计信息") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|