# insurance_sales_prediction.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os  # 添加os模块用于文件夹操作
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix, roc_curve
from sklearn.feature_selection import SelectKBest, f_classif
import lightgbm as lgb
import joblib

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


# 1. 数据加载与预处理
def load_and_preprocess():
    # 加载数据
    train = pd.read_csv('F:/实训资料/实训二/工单/大数据-八维保险数据挖掘-09-保险销售预测数据/train.csv')
    test = pd.read_csv('F:/实训资料/实训二/工单/大数据-八维保险数据挖掘-09-保险销售预测数据/test.csv')

    # 合并数据用于特征工程
    combined = pd.concat([train.drop('Response', axis=1), test])

    # 特征工程
    # 车辆年限映射
    combined['Vehicle_Age'] = combined['Vehicle_Age'].map({
        '< 1 Year': 0,
        '1-2 Year': 1,
        '> 2 Years': 2
    })

    # 分类特征编码
    combined['Vehicle_Damage'] = LabelEncoder().fit_transform(combined['Vehicle_Damage'])
    combined['Gender'] = LabelEncoder().fit_transform(combined['Gender'])

    # 分箱处理
    combined['Age_Bin'] = pd.cut(combined['Age'], bins=[0, 25, 40, 60, 100],
                                 labels=[0, 1, 2, 3]).astype(int)
    combined['Premium_Bin'] = pd.cut(combined['Annual_Premium'],
                                     bins=[0, 20000, 40000, 60000, 1000000],
                                     labels=[0, 1, 2, 3]).astype(int)

    # 拆分回训练集和测试集
    train_processed = combined.iloc[:len(train)]
    test_processed = combined.iloc[len(train):]

    train_processed['Response'] = train['Response']
    return train_processed, test_processed

# 2. 特征选择
def select_features(X, y):
    selector = SelectKBest(f_classif, k=8)
    selector.fit(X, y)
    selected_features = X.columns[selector.get_support()]
    return list(selected_features)

# 创建模型文件夹
def create_model_directory(model_name):
    """为每个模型创建专用的文件夹"""
    dir_path = f"figures/{model_name}"
    os.makedirs(dir_path, exist_ok=True)
    return dir_path

# 绘制特征重要性
def plot_feature_importance(model, feature_names, model_name, directory):
    if hasattr(model, 'feature_importances_'):
        importances = model.feature_importances_
        indices = np.argsort(importances)[::-1]

        plt.figure(figsize=(10, 6))
        plt.title(f"{model_name} - 特征重要性")
        plt.bar(range(len(importances)), importances[indices], align="center")
        plt.xticks(range(len(importances)), [feature_names[i] for i in indices], rotation=45)
        plt.tight_layout()
        plt.savefig(os.path.join(directory, f'{model_name}_feature_importance.png'))
        plt.close()


# 绘制ROC曲线
def plot_roc_curve(y_true, y_pred, model_name, directory):
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    roc_auc = roc_auc_score(y_true, y_pred)

    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {roc_auc:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title(f'{model_name} - ROC曲线')
    plt.legend(loc="lower right")
    plt.savefig(os.path.join(directory, f'{model_name}_roc_curve.png'))
    plt.close()


# 绘制混淆矩阵
def plot_confusion_matrix(y_true, y_pred, model_name, directory):
    cm = confusion_matrix(y_true, y_pred)

    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['不购买', '购买'],
                yticklabels=['不购买', '购买'])
    plt.title(f'{model_name} - 混淆矩阵')
    plt.ylabel('真实值')
    plt.xlabel('预测值')
    plt.savefig(os.path.join(directory, f'{model_name}_confusion_matrix.png'))
    plt.close()


# 3. 模型训练与评估
def train_and_evaluate(X_train, y_train, X_val, y_val, model_name='rf'):
    # 为模型创建专用文件夹
    model_dir = create_model_directory(model_name)

    if model_name == 'rf':
        model = RandomForestClassifier(
            n_estimators=150,
            max_depth=10,
            min_samples_split=5,
            min_samples_leaf=2,
            random_state=42,
            n_jobs=-1
        )
    elif model_name == 'lgb':
        model = lgb.LGBMClassifier(
            n_estimators=200,
            learning_rate=0.05,
            max_depth=7,
            num_leaves=40,
            subsample=0.8,
            colsample_bytree=0.8,
            random_state=42,
            n_jobs=-1
        )

    model.fit(X_train, y_train)
    val_preds = model.predict_proba(X_val)[:, 1]
    auc = roc_auc_score(y_val, val_preds)

    # 绘制指标图表
    plot_feature_importance(model, X_train.columns, model_name.upper(), model_dir)
    plot_roc_curve(y_val, val_preds, model_name.upper(), model_dir)

    # 绘制混淆矩阵需要二值化预测
    val_preds_binary = np.where(val_preds > 0.5, 1, 0)
    plot_confusion_matrix(y_val, val_preds_binary, model_name.upper(), model_dir)

    print(f"{model_name.upper()}模型验证集AUC: {auc:.4f}")
    print(classification_report(y_val, val_preds_binary))

    return model, auc


# 4. 模型优化流程
def optimization_pipeline():
    # 创建主图目录
    os.makedirs("figures", exist_ok=True)

    print("=" * 50)
    print("开始数据加载与预处理...")
    train_df, test_df = load_and_preprocess()
    print(f"训练集形状: {train_df.shape}, 测试集形状: {test_df.shape}")

    # 绘制目标变量分布
    plt.figure(figsize=(8, 6))
    train_df['Response'].value_counts().plot(kind='bar')
    plt.title('保险购买分布 (0=不购买, 1=购买)')
    plt.xlabel('是否购买保险')
    plt.ylabel('数量')
    plt.savefig('figures/target_distribution.png')
    plt.close()

    # 划分数据集
    X = train_df.drop(['id', 'Response'], axis=1)
    y = train_df['Response']

    # 特征选择
    print("\n进行特征选择...")
    selected_features = select_features(X, y)
    print(f"精选特征({len(selected_features)}个): {selected_features}")

    # 数据标准化
    scaler = StandardScaler()
    X[selected_features] = scaler.fit_transform(X[selected_features])

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X[selected_features], y, test_size=0.2, random_state=42, stratify=y
    )
    print(f"训练集大小: {X_train.shape[0]}, 验证集大小: {X_val.shape[0]}")

    # 第一轮：基础随机森林
    print("\n" + "=" * 50)
    print("第一轮优化：基础随机森林模型")
    model_rf, auc_rf = train_and_evaluate(X_train, y_train, X_val, y_val, 'rf')

    # 第二轮优化：LightGBM
    print("\n" + "=" * 50)
    print("第二轮优化：LightGBM模型")
    model_lgb, auc_lgb = train_and_evaluate(X_train, y_train, X_val, y_val, 'lgb')

    # 第三轮优化：集成模型
    print("\n" + "=" * 50)
    print("第三轮优化：模型集成 (RF + LightGBM)")
    rf_preds = model_rf.predict_proba(X_val)[:, 1]
    lgb_preds = model_lgb.predict_proba(X_val)[:, 1]
    ensemble_preds = (rf_preds + lgb_preds) / 2
    ensemble_auc = roc_auc_score(y_val, ensemble_preds)

    # 为集成模型创建专用文件夹
    ensemble_dir = create_model_directory("ensemble")

    # 绘制集成模型的ROC曲线
    plot_roc_curve(y_val, ensemble_preds, "集成模型", ensemble_dir)

    # 绘制集成模型的混淆矩阵
    ensemble_preds_binary = np.where(ensemble_preds > 0.5, 1, 0)
    plot_confusion_matrix(y_val, ensemble_preds_binary, "集成模型", ensemble_dir)

    print(f"集成模型验证集AUC: {ensemble_auc:.4f}")

    # 保存最佳模型和标准化器
    joblib.dump(model_lgb, 'insurance_sales_model.pkl')
    joblib.dump(scaler, 'scaler.pkl')
    joblib.dump(selected_features, 'selected_features.pkl')

    print("\n优化结果总结：")
    print(f"第一轮(AUC): {auc_rf:.4f}")
    print(f"第二轮(AUC): {auc_lgb:.4f}")
    print(f"第三轮(AUC): {ensemble_auc:.4f}")

    return model_lgb, scaler, selected_features, auc_rf, auc_lgb, ensemble_auc


# 5. 预测与结果输出
def predict_and_output(model, scaler, selected_features):
    print("\n" + "=" * 50)
    print("开始测试集预测...")
    _, test_df = load_and_preprocess()

    # 预处理测试集
    test_processed = test_df[selected_features]
    test_processed = scaler.transform(test_processed)

    # 预测
    predictions = model.predict_proba(test_processed)[:, 1]

    # 创建提交文件
    submission = pd.DataFrame({
        'id': test_df['id'],
        'Response': np.where(predictions > 0.5, 1, 0)  # 二值化预测
    })

    # 保存结果
    submission.to_csv('insurance_sales_predictions.csv', index=False)
    print("预测结果已保存为 insurance_sales_predictions.csv")

    # 返回前10个预测结果用于展示
    return submission.head(10)


# 6. 模型部署函数
def deploy_model_api():
    print("\n" + "=" * 50)
    print("模型部署说明")
    print("工单编号: INS-DM-09-20230730")
    print("\n部署步骤:")
    print("1. 安装依赖: pip install flask joblib pandas scikit-learn lightgbm")
    print("2. 创建app.py文件，包含以下代码:")

    deploy_code = """
from flask import Flask, request, jsonify
import joblib
import numpy as np
import pandas as pd

app = Flask(__name__)

# 加载模型、标准化器和特征列表
model = joblib.load('insurance_sales_model.pkl')
scaler = joblib.load('scaler.pkl')
selected_features = joblib.load('selected_features.pkl')

# 分类特征编码映射
vehicle_age_map = {'< 1 Year': 0, '1-2 Year': 1, '> 2 Years': 2}
gender_map = {'Male': 1, 'Female': 0}
vehicle_damage_map = {'Yes': 1, 'No': 0}

@app.route('/predict', methods=['POST'])
def predict():
    try:
        # 获取JSON数据
        data = request.json

        # 创建DataFrame并预处理
        input_data = pd.DataFrame([data])

        # 特征转换
        input_data['Vehicle_Age'] = input_data['Vehicle_Age'].map(vehicle_age_map)
        input_data['Gender'] = input_data['Gender'].map(gender_map)
        input_data['Vehicle_Damage'] = input_data['Vehicle_Damage'].map(vehicle_damage_map)

        # 分箱处理
        input_data['Age_Bin'] = pd.cut(input_data['Age'], 
                                     bins=[0, 25, 40, 60, 100], 
                                     labels=[0, 1, 2, 3]).astype(int)
        input_data['Premium_Bin'] = pd.cut(input_data['Annual_Premium'], 
                                         bins=[0, 20000, 40000, 60000, 1000000], 
                                         labels=[0, 1, 2, 3]).astype(int)

        # 选择特征并标准化
        features = input_data[selected_features]
        scaled_features = scaler.transform(features)

        # 预测
        probability = model.predict_proba(scaled_features)[0][1]
        will_buy = 1 if probability > 0.5 else 0

        return jsonify({
            'probability': float(probability),
            'prediction': int(will_buy),
            'status': 'success'
        })

    except Exception as e:
        return jsonify({
            'error': str(e),
            'status': 'error'
        })

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)
    """

    print(deploy_code)
    print("\n3. 运行服务: python app.py")
    print("4. 使用以下示例测试API:")
    print("   curl -X POST http://localhost:5000/predict \\")
    print("        -H \"Content-Type: application/json\" \\")
    print("        -d '{\"Gender\": \"Male\", \"Age\": 35, \"Driving_License\": 1, \"Region_Code\": 28, ")
    print("             \"Previously_Insured\": 0, \"Vehicle_Age\": \"1-2 Year\", \"Vehicle_Damage\": \"Yes\", ")
    print("             \"Annual_Premium\": 35000, \"Policy_Sales_Channel\": 26, \"Vintage\": 150}'")

# 主函数
def main():
    # 模型训练与优化
    best_model, scaler, selected_features, auc_rf, auc_lgb, ensemble_auc = optimization_pipeline()

    # 预测并输出结果
    sample_predictions = predict_and_output(best_model, scaler, selected_features)

    # 输出预测示例
    print("\n预测结果示例：")
    print(sample_predictions)

    # 输出模型部署说明
    deploy_model_api()

    # 保存优化结果文档（使用UTF-8编码）
    with open('optimization_report.txt', 'w', encoding='utf-8') as f:
        f.write("保险销售预测模型优化报告\n")
        f.write("=" * 50 + "\n")
        f.write("第一轮优化：基础随机森林模型\n")
        f.write("   - 使用特征工程：分类变量编码、年龄和保费分箱\n")
        f.write(f"   - AUC: {auc_rf:.4f}\n")
        f.write("   - 图表保存在: figures/rf/\n\n")
        f.write("第二轮优化：LightGBM模型\n")
        f.write("   - 使用更高效的梯度提升框架\n")
        f.write("   - 调整关键参数：学习率、树深度、叶子节点数\n")
        f.write(f"   - AUC: {auc_lgb:.4f} (+{((auc_lgb - auc_rf) / auc_rf) * 100:.2f}%)\n")
        f.write("   - 图表保存在: figures/lgb/\n\n")
        f.write("第三轮优化：模型集成 (RF + LightGBM)\n")
        f.write("   - 结合两种模型的预测结果取平均\n")
        f.write(f"   - AUC: {ensemble_auc:.4f} (+{((ensemble_auc - auc_rf) / auc_rf) * 100:.2f}%)\n")
        f.write("   - 图表保存在: figures/ensemble/\n\n")
        f.write("最终模型: LightGBM (AUC最高)\n")
        f.write(f"最终AUC: {auc_lgb:.4f}\n\n")
        f.write("图表保存结构：\n")
        f.write("figures/\n")
        f.write("├── target_distribution.png\n")
        f.write("├── rf/\n")
        f.write("│   ├── RF_feature_importance.png\n")
        f.write("│   ├── RF_roc_curve.png\n")
        f.write("│   └── RF_confusion_matrix.png\n")
        f.write("├── lgb/\n")
        f.write("│   ├── LGB_feature_importance.png\n")
        f.write("│   ├── LGB_roc_curve.png\n")
        f.write("│   └── LGB_confusion_matrix.png\n")
        f.write("└── ensemble/\n")
        f.write("    ├── 集成模型_roc_curve.png\n")
        f.write("    └── 集成模型_confusion_matrix.png\n")

    print("\n优化报告已保存为 optimization_report.txt (UTF-8编码)")
    print("所有图表已按模型分类保存在 figures/ 目录下")

if __name__ == "__main__":
    main()
