import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
from util_set_zh_matplot import plt
import seaborn as sns
from pathlib import Path
import pdb
import os

import util_for_output_zh

from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score,mean_squared_error
import statsmodels.api as sm
from geneticalgorithm import geneticalgorithm as ga
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
import joblib

# 在查看数据前设置
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)


def main():
    # stage1()
    # stage2_get_heatmap_on_y()
    stage3()

def stage1():
    df = pd.read_csv('nipt_processed_results.csv')
     
    print(df.head())

def stage2_get_heatmap_on_y():
    df = pd.read_csv('nipt_processed_results.csv')
    # 怀孕次数 存在 >3 的 str
    df['怀孕次数'] = df['怀孕次数'].apply(lambda x: '3' if x=='≥3' else str(x))
    # 选择相关特征（示例）
    features = ['年龄', '身高', '体重', '平均孕周', '孕妇BMI', 
                '平均原始读段数', '平均唯一读段数', '怀孕次数', '生产次数',
                '加权平均_Y染色体的Z值', 'Y染色体的Z值_标准差']

    target = 'Y染色体浓度_末次'  # 也可选择平均浓度或变化量

    # 计算相关系数矩阵
    # pdb.set_trace()
    corr_matrix = df[features + [target]].corr(method='spearman')
    corr_matrix.to_csv('ques1_y_correlation_matrix.csv')

    # 绘制热力图
    plt.figure(figsize=(12, 10))
    sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', center=0)
    plt.title("Y染色体浓度与各指标相关性热力图")
    # plt.show()
    plt.savefig(os.path.join("visualization", f"ques1_y_heatmap.png"), bbox_inches='tight')

def stage3(model_dir='rf_ga_models', result_dir='rf_ga_results', visualization_dir='visualization'):
    """使用遗传算法优化的随机森林预测Y染色体浓度"""
    # 创建输出目录
    os.makedirs(model_dir, exist_ok=True)
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(visualization_dir, exist_ok=True)
    
    # 1. 读取数据
    df = pd.read_csv('nipt_processed_results.csv')
    df = df[df['平均原始读段数']>3000000]  # 筛选 > 300w 的数据
    
    # 2. 特征和目标变量
    features = [
        '孕妇BMI',
        '体重',          # 与BMI互补
        '平均孕周',
        '年龄'
    ]
    target = 'Y染色体浓度_末次'
    
    X = df[features]
    y = df[target]
    
    # 3. 数据划分 (70%训练集，30%测试集)
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )
    
    # 4. 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 5. 定义目标函数（随机森林的MSE）
    def objective_function(params):
        n_estimators = int(params[0])
        max_depth = int(params[1]) if params[1] > 0 else None
        min_samples_split = int(params[2])
        min_samples_leaf = int(params[3])
        max_features = params[4]  # 可以是'sqrt', 'log2'或浮点数
        
        # 处理max_features参数
        if max_features < 0.3:
            max_features = 'sqrt'
        elif max_features < 0.6:
            max_features = 'log2'
        else:
            max_features = max(0.1, min(1.0, max_features))
        
        rf = RandomForestRegressor(
            n_estimators=n_estimators,
            max_depth=max_depth,
            min_samples_split=min_samples_split,
            min_samples_leaf=min_samples_leaf,
            max_features=max_features,
            random_state=42,
            n_jobs=-1
        )
        rf.fit(X_train_scaled, y_train)
        y_pred = rf.predict(X_test_scaled)
        return mean_squared_error(y_test, y_pred)
    
    # 6. 定义参数范围
    varbound = np.array([
        [50, 500],     # n_estimators
        [5, 50],       # max_depth
        [2, 20],       # min_samples_split
        [1, 10],       # min_samples_leaf
        [0, 1]         # max_features (用于转换为'sqrt', 'log2'或浮点数)
    ])
    
    # 7. 遗传算法优化参数
    algorithm_param = {
        'max_num_iteration': 30,
        'population_size': 20,
        'mutation_probability': 0.1,
        'elit_ratio': 0.05,
        'crossover_probability': 0.5,
        'parents_portion': 0.3,
        'crossover_type': 'uniform',
        'max_iteration_without_improv': 10
    }
    
    print("开始遗传算法参数优化...")
    model_ga = ga(
        function=objective_function,
        dimension=5,
        # variable_type_mixed=[
        #     'int', 'int', 'int', 'int', 'real'
        # ],
        variable_type='int',
        variable_boundaries=varbound,
        algorithm_parameters=algorithm_param,
        function_timeout=300
    )
    model_ga.run()
    best_params = model_ga.best_variable
    
    # 8. 处理最优参数中的max_features
    if best_params[4] < 0.3:
        best_max_features = 'sqrt'
    elif best_params[4] < 0.6:
        best_max_features = 'log2'
    else:
        best_max_features = max(0.1, min(1.0, best_params[4]))
    
    # 9. 用最优参数训练模型
    best_rf = RandomForestRegressor(
        n_estimators=int(best_params[0]),
        max_depth=int(best_params[1]) if best_params[1] > 0 else None,
        min_samples_split=int(best_params[2]),
        min_samples_leaf=int(best_params[3]),
        max_features=best_max_features,
        random_state=42,
        n_jobs=-1
    )
    best_rf.fit(X_train_scaled, y_train)
    
    # 10. 同时训练线性回归模型作为对比
    lr_model = LinearRegression()
    lr_model.fit(X_train_scaled, y_train)
    
    # 11. 模型评估函数
    def evaluate_model(model, X, y_true):
        y_pred = model.predict(X)
        mse = mean_squared_error(y_true, y_pred)
        r2 = r2_score(y_true, y_pred)
        return {
            'y_pred': y_pred,
            'MSE': mse,
            'RMSE': np.sqrt(mse),
            'R2': r2,
            '预测值示例': y_pred[:5]  # 展示前5个预测值
        }
    
    # 评估随机森林模型
    rf_train_metrics = evaluate_model(best_rf, X_train_scaled, y_train)
    rf_test_metrics = evaluate_model(best_rf, X_test_scaled, y_test)
    
    # 评估线性回归模型
    lr_train_metrics = evaluate_model(lr_model, X_train_scaled, y_train)
    lr_test_metrics = evaluate_model(lr_model, X_test_scaled, y_test)
    
    # 12. 统计显著性检验（使用statsmodels对线性模型）
    X_train_sm = sm.add_constant(X_train_scaled)  # 添加截距项
    sm_model = sm.OLS(y_train, X_train_sm).fit()
    print("\n=== 线性模型统计显著性检验结果 ===")
    print(sm_model.summary())
    
    # 13. 可视化结果
    plt.figure(figsize=(12, 10))
    
    # 随机森林预测结果
    plt.subplot(2, 1, 1)
    plt.scatter(y_test, rf_test_metrics['y_pred'], alpha=0.6, color='blue', label='随机森林')
    plt.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=2)
    plt.xlabel('真实Y染色体浓度')
    plt.ylabel('预测Y染色体浓度')
    plt.title(f'随机森林预测效果 (测试集, R^2={rf_test_metrics["R2"]:.4f})')
    plt.grid(True)
    plt.legend()
    
    # 线性回归预测结果
    plt.subplot(2, 1, 2)
    plt.scatter(y_test, lr_test_metrics['y_pred'], alpha=0.6, color='red', label='线性回归')
    plt.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=2)
    plt.xlabel('真实Y染色体浓度')
    plt.ylabel('预测Y染色体浓度')
    plt.title(f'线性回归预测效果 (测试集, R^2={lr_test_metrics["R2"]:.4f})')
    plt.grid(True)
    plt.legend()
    
    plt.tight_layout()
    plt.savefig(os.path.join(visualization_dir, 'y_chromosome_prediction_comparison.png'), bbox_inches='tight')
    plt.close()
    
    # 特征重要性可视化
    plt.figure(figsize=(10, 6))
    importances = best_rf.feature_importances_
    indices = np.argsort(importances)[::-1]
    plt.bar(range(X.shape[1]), importances[indices])
    plt.xticks(range(X.shape[1]), [features[i] for i in indices], rotation=45)
    plt.title('随机森林特征重要性')
    plt.tight_layout()
    plt.savefig(os.path.join(visualization_dir, 'feature_importance.png'), bbox_inches='tight')
    plt.close()
    
    # 14. 输出关键指标
    print("\n=== 最优参数 ===")
    print(f"n_estimators: {int(best_params[0])}")
    print(f"max_depth: {int(best_params[1]) if best_params[1] > 0 else None}")
    print(f"min_samples_split: {int(best_params[2])}")
    print(f"min_samples_leaf: {int(best_params[3])}")
    print(f"max_features: {best_max_features}")
    
    print("\n=== 随机森林 - 训练集指标 ===")
    print(f"MSE: {rf_train_metrics['MSE']:.4f}")
    print(f"RMSE: {rf_train_metrics['RMSE']:.4f}")
    print(f"R²: {rf_train_metrics['R2']:.4f}")
    
    print("\n=== 随机森林 - 测试集指标 ===")
    print(f"MSE: {rf_test_metrics['MSE']:.4f}")
    print(f"RMSE: {rf_test_metrics['RMSE']:.4f}")
    print(f"R²: {rf_test_metrics['R2']:.4f}")
    
    print("\n=== 线性回归 - 测试集指标 ===")
    print(f"MSE: {lr_test_metrics['MSE']:.4f}")
    print(f"RMSE: {lr_test_metrics['RMSE']:.4f}")
    print(f"R²: {lr_test_metrics['R2']:.4f}")
    
    # 15. 保存模型和特征重要性
    feature_importance = pd.DataFrame({
        'Feature': features,
        'Importance': best_rf.feature_importances_
    }).sort_values('Importance', ascending=False)
    
    print("\n=== 随机森林特征重要性 ===")
    print(feature_importance)
    
    # 保存模型和scaler
    joblib.dump(best_rf, os.path.join(model_dir, 'random_forest_y_chromosome.pkl'))
    joblib.dump(lr_model, os.path.join(model_dir, 'linear_regression_y_chromosome.pkl'))
    joblib.dump(scaler, os.path.join(model_dir, 'scaler_y_chromosome.pkl'))
    
    # 保存预测结果
    result_df = pd.DataFrame({
        '真实值': y_test.values,
        '随机森林预测值': rf_test_metrics['y_pred'],
        '线性回归预测值': lr_test_metrics['y_pred'],
        '随机森林误差': np.abs(y_test.values - rf_test_metrics['y_pred']),
        '线性回归误差': np.abs(y_test.values - lr_test_metrics['y_pred'])
    })
    result_df.to_csv(os.path.join(result_dir, 'y_chromosome_prediction.csv'), 
                    index=False, encoding='utf-8-sig')
    
    return best_params, rf_test_metrics, lr_test_metrics

if __name__=='__main__':
    main()