# ques0_prepare_data\sheet2_女胎检测数据_fillnan_sifted.csv
import warnings
warnings.filterwarnings('ignore')
import util_for_output_zh

import os,pdb
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
from util_set_zh_matplot import plt
import seaborn as sns
from sklearn.linear_model import Ridge  # 改用更简单的模型
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error
from skopt import gp_minimize
from skopt.space import Real
from typing import Tuple, List, Dict
from joblib import Parallel, delayed  # 并行处理
from sklearn.cluster import KMeans  # 用于预分组

# 设置显示选项
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)

# 创建输出目录
os.makedirs('ques4_prepare_data', exist_ok=True)

def main():
    # stage1()
    # stage1_1()
    stage2()

def check_missing_values(X):
    # 计算每列的缺失值数量
    missing_values = X.isnull().sum()
    
    # 计算每列的缺失值比例
    missing_percentage = (X.isnull().sum() / len(X)) * 100
    
    # 创建缺失值汇总表
    missing_data = pd.DataFrame({
        '缺失值数量': missing_values,
        '缺失值比例(%)': missing_percentage
    })
    
    # 只显示有缺失值的列
    missing_data = missing_data[missing_data['缺失值数量'] > 0]
    
    if len(missing_data) == 0:
        print("特征矩阵X中没有缺失值")
    else:
        print("特征矩阵X中的缺失值情况:")
        print(missing_data)
        
        # 可视化缺失值情况
        plt.figure(figsize=(10, 6))
        sns.heatmap(X.isnull(), yticklabels=False, cbar=False, cmap='viridis')
        plt.title('特征缺失值热图')
        plt.savefig('ques4_prepare_data/features_missing_values_heatmap.png')
        plt.close()
        
        # 保存缺失值报告
        missing_data.to_csv('ques4_prepare_data/features_missing_values_report.csv')


def stage1():
    # 对 女胎 数据 先处理异常值
    # 使用z score方法处理
    # features = [ 原始读段数 在参考基因组上比对的比例   重复读段的比例  唯一比对的读段数  
    # GC含量  13号染色体的Z值  18号染色体的Z值  21号染色体的Z值   X染色体的Z值
    # X染色体浓度  13号染色体的GC含量  18号染色体的GC含量  21号染色体的GC含量 ]
    # 处理 女胎数据 在 features 上的 异常值
    # 根据 z-score 方法处理异常值, 处理好的数据 存放在 ques4_prepare_data/{filename}_outlier.csv
    # features 中每个feature的箱线图 保存到 ques4_prepare_data 文件夹下
    df = pd.read_csv('ques0_prepare_data\sheet2_女胎检测数据_fillnan_sifted.csv')

    '''
    时间统一
        末次月经 两种日期格式
        2023-02-01 00:00:00 -> 2023-02-01
        2023-02-25 -> 2023-02-25
        检测日期 两种日期格式
        2023-06-07 00:00:00 -> 2023-06-07
        20230702 -> 2023-07-02
    分别依次处理这里的日期
    '''
    # 处理'末次月经'列
    if '末次月经' in df.columns:
        def clean_lmp(x):
            if pd.isnull(x):
                return x
            x = str(x)
            # 2023-02-01 00:00:00 -> 2023-02-01
            if ' ' in x:
                x = x.split(' ')[0]
            return x
        df['末次月经'] = df['末次月经'].apply(clean_lmp)

    # 处理'检测日期'列
    if '检测日期' in df.columns:
        def clean_test_date(x):
            if pd.isnull(x):
                return x
            x = str(x)
            # 2023-06-07 00:00:00 -> 2023-06-07
            if ' ' in x:
                x = x.split(' ')[0]
            # 20230702 -> 2023-07-02
            if len(x) == 8 and x.isdigit():
                try:
                    x = f"{x[:4]}-{x[4:6]}-{x[6:8]}"
                except Exception:
                    pass
            return x
        df['检测日期'] = df['检测日期'].apply(clean_test_date)

    # print(df.head())

    # 处理 df 中 怀孕次数 把 ≥3 全部换乘3
    # 将怀孕次数大于等于3的值全部替换为3
    df['怀孕次数'] = df['怀孕次数'].apply(lambda x: 3 if x == '≥3' else x)

    # 定义要处理的特征
    features = [
        '原始读段数', '在参考基因组上比对的比例', '重复读段的比例', '唯一比对的读段数',
        'GC含量', '13号染色体的Z值', '18号染色体的Z值', '21号染色体的Z值', 
        'X染色体的Z值', 'X染色体浓度', '13号染色体的GC含量', 
        '18号染色体的GC含量', '21号染色体的GC含量'
    ]

    # 绘制箱线图并保存
    def plot_boxplots(df, features, output_dir):
        for feature in features:
            plt.figure(figsize=(8, 6))
            sns.boxplot(data=df, y=feature)
            plt.title(f'Boxplot of {feature}')
            plt.savefig(f'{output_dir}/{feature}_boxplot.png')
            plt.close()

    plot_boxplots(df, features, 'ques4_prepare_data')

    # 使用z-score方法处理异常值
    def handle_outliers_zscore(df, features, threshold=3):
        df_clean = df.copy()
        for feature in features:
            if feature in df_clean.columns:
                z_scores = (df_clean[feature] - df_clean[feature].mean()) / df_clean[feature].std()
                outliers = np.abs(z_scores) > threshold
                # 用中位数替换异常值
                median_val = df_clean[feature].median()
                df_clean.loc[outliers, feature] = median_val
        return df_clean

    # 处理异常值
    df_clean = handle_outliers_zscore(df, features)

    # 保存处理后的数据
    output_path = 'ques4_prepare_data/sheet2_女胎检测数据_fillnan_sifted_outlier.csv'
    df_clean.to_csv(output_path, index=False)

    print(f"处理后的数据已保存到: {output_path}")
    print("箱线图已保存到 ques4_prepare_data 文件夹")
    print(df_clean.head())

def stage1_1():
    from util_for_ques0 import parse_gestational_age, calculate_last_menstrual_date
    data = pd.read_csv('ques4_prepare_data/sheet2_女胎检测数据_fillnan_sifted_outlier.csv')
    check_missing_values(data)
    # 处理缺失值
    if '检测孕周' in data.columns:
        data['孕周天数'] = data['检测孕周'].apply(parse_gestational_age)
    else:
        data['孕周天数'] = None
    if '末次月经' in data.columns and '检测日期' in data.columns and '孕周天数' in data.columns:
        missing_lmp = data['末次月经'].isnull()
        if missing_lmp.sum() > 0:
            data.loc[missing_lmp, '末次月经'] = data.loc[missing_lmp].apply(
                lambda row: calculate_last_menstrual_date(row['检测日期'], row['孕周天数']), axis=1
            )
            # 若仍有缺失，使用众数填充
            if data['末次月经'].isnull().sum() > 0:
                lmp_mode = data['末次月经'].mode()[0]
                data['末次月经'].fillna(lmp_mode, inplace=True)
    check_missing_values(data)
    # pdb.set_trace()
    data.to_csv('ques4_prepare_data/sheet2_女胎检测数据_fillnan_sifted_outlier.csv')


# features = [ 原始读段数 在参考基因组上比对的比例   重复读段的比例  唯一比对的读段数  
# GC含量  13号染色体的Z值  18号染色体的Z值  21号染色体的Z值   X染色体的Z值
# X染色体浓度  13号染色体的GC含量  18号染色体的GC含量  21号染色体的GC含量 ]
# target= [染色体的非整倍体]  这里 存在 '正常' 'T13' 'T18' 等 7 类值
# 思路 ： 输入 feature 预测 target
'''
创新性染色体非整倍体预测方案
方案概述
我将提出一个创新的多算法融合方案来预测染色体非整倍体状态，结合了以下创新点：

1.
多模型集成学习框架

2.
特征重要性自适应加权

3.
时间序列特征提取（针对同一孕妇多次检测数据）

4.
不平衡数据处理策略
'''
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
import tensorflow as tf
from keras.layers import Dense, Input, Conv1D, LSTM, MultiHeadAttention, Concatenate, Flatten
from keras.models import Model
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
def stage2():
    # ques4_prepare_data\sheet2_女胎检测数据_fillnan_sifted_outlier.csv
    # 加载数据
    data = pd.read_csv('ques4_prepare_data/sheet2_女胎检测数据_fillnan_sifted_outlier.csv')

    # 时间序列特征提取（针对同一孕妇多次检测）
    def extract_temporal_features(df):
        # 先复制原始数据
        df = df.copy()
        
        # 按孕妇代码分组
        grouped = df.groupby('孕妇代码')
        
        # 存储所有处理后的组
        processed_groups = []
        
        for name, group in grouped:
            # 按检测日期排序
            group = group.sort_values('检测日期')
            
            # 只有多次检测的孕妇才计算时间序列特征
            # if len(group) > 1:
            # 计算各项指标的增长率
            for col in ['原始读段数', 'GC含量', '13号染色体的Z值', '18号染色体的Z值', '21号染色体的Z值']:
                # 计算增长率
                group[f'{col}_增长率'] = group[col].pct_change()
                
                # 计算平均变化率（使用滚动平均，窗口大小为2）
                group[f'{col}_平均变化率'] = group[col].pct_change().rolling(window=2, min_periods=1).mean()
                
                # 对于第一次检测的记录，用0填充增长率（因为没有前一次数据）
                group[f'{col}_增长率'] = group[f'{col}_增长率'].fillna(0)
                group[f'{col}_平均变化率'] = group[f'{col}_平均变化率'].fillna(0)
            # else
            # 增长和变化都设为0
            
            processed_groups.append(group)
        
        # 合并所有组
        result = pd.concat(processed_groups) if len(processed_groups) > 0 else df
        
        return result

    data = extract_temporal_features(data)

    # 编码目标变量
    encoder = LabelEncoder()
    data['染色体的非整倍体'] = encoder.fit_transform(data['染色体的非整倍体'])

    # 特征选择
    features = ['原始读段数', '在参考基因组上比对的比例', '重复读段的比例', '唯一比对的读段数',
                'GC含量', '13号染色体的Z值', '18号染色体的Z值', '21号染色体的Z值',
                'X染色体的Z值', 'X染色体浓度', '13号染色体的GC含量', 
                '18号染色体的GC含量', '21号染色体的GC含量']

    # 添加时间序列特征（如果存在）
    temporal_feats = [col for col in data.columns if any(x in col for x in ['增长率', '平均变化率'])]
    features += temporal_feats

    X = data[features]
    y = data['染色体的非整倍体']

    # 检查X中的缺失值情况
    
    # 执行缺失值检查
    check_missing_values(X)

    pdb.set_trace()

    # 处理不平衡数据
    smote = SMOTE(random_state=42)
    X_res, y_res = smote.fit_resample(X, y)

    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X_res)

    '''
    我提出一种"动态加权集成学习"模型，结合了以下组件：

    1.
    ​​基础模型层​​：
    XGBoost：处理结构化数据优势
    1D CNN：捕捉特征间的局部模式
    LSTM：处理时间序列特征（如有）
    自注意力网络：学习特征间的重要性关系

    2.
    ​​元学习层​​：
    使用神经网络学习各基础模型的权重
    动态调整模型权重基于输入特征分布
    '''
    #  基础模型定义
    def build_xgboost_model(X, y):
        model = XGBClassifier(
            n_estimators=200,
            max_depth=5,
            learning_rate=0.1,
            subsample=0.8,
            colsample_bytree=0.8,
            use_label_encoder=False,
            eval_metric='mlogloss'
        )
        model.fit(X, y)
        return model

    def build_cnn_model(input_shape, num_classes):
        inputs = Input(shape=input_shape)
        x = Conv1D(64, 3, activation='relu')(inputs)
        x = Conv1D(128, 3, activation='relu')(x)
        x = Flatten()(x)
        outputs = Dense(num_classes, activation='softmax')(x)
        model = Model(inputs, outputs)
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model

    def build_attention_model(input_shape, num_classes):
        inputs = Input(shape=input_shape)
        x = MultiHeadAttention(num_heads=4, key_dim=64)(inputs, inputs)
        x = Flatten()(x)
        x = Dense(64, activation='relu')(x)
        outputs = Dense(num_classes, activation='softmax')(x)
        model = Model(inputs, outputs)
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model

    # 自适应损失函数实现示例
    import keras
    class AdaptiveLoss(keras.losses.Loss):
        def __init__(self, class_weights):
            super().__init__()
            self.class_weights = class_weights
            
        def call(self, y_true, y_pred):
            # 将类别权重应用于损失计算
            weights = tf.gather(self.class_weights, tf.cast(y_true, tf.int32))
            loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
            return tf.reduce_mean(loss * weights)

    # 计算类别权重
    class_counts = np.bincount(y_res)
    total = np.sum(class_counts)
    class_weights = {i: total/(len(class_counts)*count) for i, count in enumerate(class_counts)}

    # 元学习器定义
    def build_meta_learner(input_shape, num_base_models, num_classes):
        inputs = Input(shape=input_shape)
        
        # 特征重要性学习分支
        feat_importance = Dense(input_shape[0], activation='softmax')(inputs)
        weighted_inputs = inputs * feat_importance
        
        # 基础模型预测分支
        base_outputs = []
        for _ in range(num_base_models):
            x = Dense(64, activation='relu')(weighted_inputs)
            x = Dense(32, activation='relu')(x)
            base_outputs.append(Dense(num_classes, activation='softmax')(x))
        
        # 注意力机制融合
        concat_outputs = Concatenate()(base_outputs)
        reshaped = tf.reshape(concat_outputs, (-1, num_base_models, num_classes))
        attention = MultiHeadAttention(num_heads=2, key_dim=num_classes)(reshaped, reshaped)
        attention = Flatten()(attention)
        
        # 最终预测
        outputs = Dense(num_classes, activation='softmax')(attention)
        
        model = Model(inputs, outputs)
        # model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        model.compile(optimizer='adam', 
                loss=AdaptiveLoss(class_weights), 
                metrics=['accuracy'])
        return model

    # 数据准备
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_res, test_size=0.2, random_state=42)

    # 训练基础模型
    xgb_model = build_xgboost_model(X_train, y_train)

    # 为神经网络模型reshape数据
    X_train_nn = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    X_test_nn = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

    # 训练CNN模型
    cnn_model = build_cnn_model((X_train.shape[1], 1), len(np.unique(y_res)))
    cnn_model.fit(X_train_nn, y_train, epochs=50, batch_size=32, validation_split=0.1, verbose=0)

    # 训练注意力模型
    attention_model = build_attention_model((X_train.shape[1], 1), len(np.unique(y_res)))
    attention_model.fit(X_train_nn, y_train, epochs=50, batch_size=32, validation_split=0.1, verbose=0)

    # 训练元学习器
    # 获取基础模型预测作为元特征
    xgb_pred = xgb_model.predict_proba(X_train)
    cnn_pred = cnn_model.predict(X_train_nn)
    attention_pred = attention_model.predict(X_train_nn)

    meta_features = np.concatenate([xgb_pred, cnn_pred, attention_pred], axis=1)
    meta_learner = build_meta_learner((meta_features.shape[1],), 3, len(np.unique(y_res)))
    meta_learner.fit(meta_features, y_train, epochs=100, batch_size=32, validation_split=0.1, verbose=0)

    # 评估
    def evaluate_model(model, X_test, y_test, model_type='xgb'):
        if model_type == 'xgb':
            y_pred = model.predict(X_test)
        else:
            y_pred = np.argmax(model.predict(X_test.reshape(X_test.shape[0], X_test.shape[1], 1)), axis=1)
        
        print(classification_report(y_test, y_pred, target_names=encoder.classes_))

    print("XGBoost 性能:")
    evaluate_model(xgb_model, X_test, y_test)

    print("\nCNN 性能:")
    evaluate_model(cnn_model, X_test, y_test, 'nn')

    print("\n注意力模型 性能:")
    evaluate_model(attention_model, X_test, y_test, 'nn')

    # 集成模型预测
    xgb_test_pred = xgb_model.predict_proba(X_test)
    cnn_test_pred = cnn_model.predict(X_test_nn)
    attention_test_pred = attention_model.predict(X_test_nn)

    meta_test_features = np.concatenate([xgb_test_pred, cnn_test_pred, attention_test_pred], axis=1)
    ensemble_pred = np.argmax(meta_learner.predict(meta_test_features), axis=1)

    print("\n集成模型 性能:")
    print(classification_report(y_test, ensemble_pred, target_names=encoder.classes_))

    import shap

    # SHAP分析
    explainer = shap.TreeExplainer(xgb_model)
    shap_values = explainer.shap_values(X_test)

    # 可视化
    plt.figure(figsize=(10, 8))
    shap.summary_plot(shap_values, X_test, feature_names=features, class_names=encoder.classes_)
    plt.title('Feature Importance for Chromosomal Abnormality Prediction')
    plt.tight_layout()
    plt.savefig('ques4_Feature_Importanc.png')
    # plt.show()

if __name__ == '__main__':
    main()