import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import EditedNearestNeighbours
import warnings
warnings.filterwarnings('ignore')

def load_data(file_path):
    """加载数据"""
    try:
        data = pd.read_excel(file_path)
        print(f"数据加载成功，共{len(data)}行，{len(data.columns)}列")
        print("\n=== 根据'GC含量'进行数据过滤 ===")
        initial_rows = len(data)
        print(f"过滤前总行数: {initial_rows}")

        # 执行过滤操作，保留GC含量大于等于0.395的行
        data_filtered = data[data['GC含量'] >= 0.395].copy()

        final_rows = len(data_filtered)
        removed_rows = initial_rows - final_rows

        print(f"因'GC含量' < 0.395，已删除 {removed_rows} 行")
        print(f"过滤后剩余行数: {final_rows}")
        return data_filtered
    except Exception as e:
        print(f"数据加载失败: {e}")
        return None

def analyze_data_distribution(data):
    """分析数据分布"""
    print("\n=== 数据分布分析 ===")
    
    # 按孕妇分组统计
    pregnant_stats = data.groupby('孕妇代码').agg({
        '是否异常': ['count', 'sum', 'mean']
    }).round(3)
    pregnant_stats.columns = ['检测次数', '异常次数', '异常比例']
    
    print(f"总孕妇数: {len(pregnant_stats)}")
    print(f"总检测次数: {len(data)}")
    
    # 异常分布统计
    normal_count = (pregnant_stats['异常比例'] == 0).sum()
    abnormal_count = (pregnant_stats['异常比例'] > 0).sum()
    
    print(f"完全正常孕妇数: {normal_count}")
    print(f"有异常记录孕妇数: {abnormal_count}")
    print(f"孕妇异常比例: {abnormal_count / len(pregnant_stats):.3f}")
    
    # 检测记录异常分布
    total_records = len(data)
    abnormal_records = data['是否异常'].sum()
    print(f"异常检测记录数: {abnormal_records}")
    print(f"检测记录异常比例: {abnormal_records / total_records:.3f}")
    
    return pregnant_stats

def stratified_split_by_pregnant_women(data, test_size=0.2, random_state=42):
    """
    按孕妇进行分层抽样，保证异常比例一致
    确保同一孕妇的所有记录要么全部在训练集，要么全部在验证集
    """
    print(f"\n=== 按孕妇进行{test_size*100:.0f}%分层抽样 ===")
    
    # 计算每个孕妇的异常状态（如果有任何异常记录则标记为异常）
    pregnant_labels = data.groupby('孕妇代码')['是否异常'].max()
    
    # 统计异常和正常孕妇
    normal_pregnant = pregnant_labels[pregnant_labels == 0].index.tolist()
    abnormal_pregnant = pregnant_labels[pregnant_labels == 1].index.tolist()
    
    print(f"正常孕妇数: {len(normal_pregnant)}")
    print(f"异常孕妇数: {len(abnormal_pregnant)}")
    
    # 分别从正常和异常孕妇中抽取20%
    np.random.seed(random_state)
    
    # 计算需要抽取的数量
    normal_test_count = max(1, int(len(normal_pregnant) * test_size))
    abnormal_test_count = max(1, int(len(abnormal_pregnant) * test_size))
    
    # 随机抽取
    normal_test = np.random.choice(normal_pregnant, normal_test_count, replace=False)
    abnormal_test = np.random.choice(abnormal_pregnant, abnormal_test_count, replace=False)
    
    # 组合验证集孕妇列表
    test_pregnant = list(normal_test) + list(abnormal_test)
    
    # 剩余孕妇作为训练集
    all_pregnant = set(data['孕妇代码'].unique())
    train_pregnant = list(all_pregnant - set(test_pregnant))
    
    # 根据孕妇代码分割数据
    train_data = data[data['孕妇代码'].isin(train_pregnant)].copy()
    test_data = data[data['孕妇代码'].isin(test_pregnant)].copy()
    
    print(f"训练集孕妇数: {len(train_pregnant)}")
    print(f"验证集孕妇数: {len(test_pregnant)}")
    print(f"训练集记录数: {len(train_data)}")
    print(f"验证集记录数: {len(test_data)}")
    
    # 验证分层效果
    train_abnormal_ratio = train_data['是否异常'].mean()
    test_abnormal_ratio = test_data['是否异常'].mean()
    print(f"训练集异常比例: {train_abnormal_ratio:.3f}")
    print(f"验证集异常比例: {test_abnormal_ratio:.3f}")
    
    return train_data, test_data

def prepare_features_and_labels(data):
    """准备特征和标签"""
    # 排除非特征列
    exclude_columns = ['序号', '孕妇代码', '末次月经', '检测日期', '孕妇代码', 
                      '是否异常', 'IVF妊娠', '胎儿是否健康', '染色体的非整倍体',
                      'Unnamed: 20', 'Unnamed: 21','检测孕周','怀孕次数','GC含量','在参考基因组上比对的比例','21号染色体的Z值','重复读段的比例','检测抽血次数','生产次数']
    
    feature_columns = [col for col in data.columns if col not in exclude_columns]
    
    X = data[feature_columns].copy()
    y = data['是否异常'].copy()
    
    # 处理缺失值
    X = X.fillna(X.median())
    
    print(f"特征数量: {len(feature_columns)}")
    print(f"样本数量: {len(X)}")
    print(f"异常样本数: {y.sum()}")
    print(f"正常样本数: {len(y) - y.sum()}")
    
    return X, y, feature_columns

def apply_smote_enn(X_train, y_train, random_state=42):
    """应用SMOTE+ENN进行样本均衡"""
    print("\n=== 应用SMOTE+ENN样本均衡 ===")
    
    print("原始训练集分布:")
    print(f"正常样本: {(y_train == 0).sum()}")
    print(f"异常样本: {(y_train == 1).sum()}")
    print(f"异常比例: {y_train.mean():.3f}")
    
    try:
        # 使用SMOTE+ENN组合方法
        smote_enn = SMOTEENN(
            smote=SMOTE(random_state=random_state, k_neighbors=3),
            enn=EditedNearestNeighbours(n_neighbors=3),
            random_state=random_state
        )
        
        X_resampled, y_resampled = smote_enn.fit_resample(X_train, y_train)
        
        print("\n均衡后训练集分布:")
        print(f"正常样本: {(y_resampled == 0).sum()}")
        print(f"异常样本: {(y_resampled == 1).sum()}")
        print(f"异常比例: {y_resampled.mean():.3f}")
        print(f"总样本数变化: {len(y_train)} -> {len(y_resampled)}")
        
        return X_resampled, y_resampled
        
    except Exception as e:
        print(f"SMOTE+ENN处理失败: {e}")
        print("回退到原始数据")
        return X_train, y_train

def save_processed_data(X_train, y_train, X_test, y_test, feature_columns):
    """保存处理后的数据"""
    print("\n=== 保存处理后的数据 ===")
    
    # 创建训练集DataFrame
    train_df = pd.DataFrame(X_train, columns=feature_columns)
    train_df['是否异常'] = y_train.values if hasattr(y_train, 'values') else y_train
    
    # 创建验证集DataFrame
    test_df = pd.DataFrame(X_test, columns=feature_columns)
    test_df['是否异常'] = y_test.values if hasattr(y_test, 'values') else y_test
    
    # 保存到Excel文件
    train_filename = 'data_train_balanced.xlsx'
    test_filename = 'data_test_validation.xlsx'
    
    try:
        train_df.to_excel(train_filename, index=False)
        test_df.to_excel(test_filename, index=False)
        
        print(f"训练集已保存: {train_filename} ({len(train_df)}行)")
        print(f"验证集已保存: {test_filename} ({len(test_df)}行)")
        
        return train_filename, test_filename
        
    except Exception as e:
        print(f"保存文件失败: {e}")
        return None, None

def main():
    """主函数"""
    print("=== 数据预处理脚本 data_process_5.py ===")
    
    # 1. 加载数据
    data = load_data('data_after_process_4.xlsx')
    if data is None:
        return
    
    # 2. 分析数据分布
    pregnant_stats = analyze_data_distribution(data)
    
    # 3. 按孕妇进行分层抽样（20%作为验证集）
    train_data, test_data = stratified_split_by_pregnant_women(data, test_size=0.2, random_state=42)
    
    # 4. 准备训练集特征和标签
    X_train, y_train, feature_columns = prepare_features_and_labels(train_data)
    X_test, y_test, _ = prepare_features_and_labels(test_data)
    
    # 5. 对训练集应用SMOTE+ENN进行样本均衡
    X_train_balanced, y_train_balanced = apply_smote_enn(X_train, y_train, random_state=42)
    
    # 6. 保存处理后的数据
    train_file, test_file = save_processed_data(
        X_train_balanced, y_train_balanced, 
        X_test, y_test, 
        feature_columns
    )
    
    # 7. 总结报告
    print("\n=== 处理完成总结 ===")
    print(f"原始数据: {len(data)}条记录，{len(data['孕妇代码'].unique())}位孕妇")
    print(f"训练集: {len(y_train_balanced)}条记录（经过SMOTE+ENN均衡处理）")
    print(f"验证集: {len(y_test)}条记录（保持原始分布）")
    
    if train_file and test_file:
        print(f"✅ 数据处理成功！")
        print(f"📁 训练数据文件: {train_file}")
        print(f"📁 验证数据文件: {test_file}")
    else:
        print("❌ 数据保存失败！")

if __name__ == "__main__":
    main()