import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, confusion_matrix, roc_curve, precision_recall_curve, classification_report
import joblib
import matplotlib.pyplot as plt
import seaborn as sns
import os
from matplotlib import font_manager as fm


# 改进的中文字体支持方案
def setup_chinese_font():
    """设置中文字体支持，自动适应不同操作系统"""
    try:
        # 尝试使用系统默认中文字体
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'STXihei', 'SimSun', 'Arial Unicode MS']
        plt.rcParams['axes.unicode_minus'] = False

        # 检查常见系统字体路径
        font_paths = [
            'C:/Windows/Fonts/simhei.ttf',  # Windows
            'C:/Windows/Fonts/msyh.ttc',  # Windows 微软雅黑
            '/System/Library/Fonts/PingFang.ttc',  # macOS
            '/Library/Fonts/Arial Unicode.ttf',  # macOS
            '/usr/share/fonts/truetype/droid/DroidSansFallbackFull.ttf'  # Linux
        ]

        # 添加当前目录作为备选
        current_dir = os.path.dirname(os.path.abspath(__file__))
        font_paths.append(os.path.join(current_dir, 'SimHei.ttf'))

        # 寻找可用的中文字体
        for path in font_paths:
            if os.path.exists(path):
                font_prop = fm.FontProperties(fname=path)
                plt.rcParams['font.sans-serif'] = [font_prop.get_name()]
                print(f"使用字体: {font_prop.get_name()}")
                return font_prop

        print("警告: 未找到中文字体文件，图表可能无法正确显示中文")
        return fm.FontProperties()

    except Exception as e:
        print(f"字体设置错误: {e}")
        return fm.FontProperties()


# 调用字体设置函数
font = setup_chinese_font()


# 加载数据
def load_data():
    # 注意：实际使用时应替换为您的文件路径
    train = pd.read_csv('F:/实训资料/实训二/工单/大数据-八维保险数据挖掘-10-保险反欺诈预测数据/train.csv')
    test = pd.read_csv('F:/实训资料/实训二/工单/大数据-八维保险数据挖掘-10-保险反欺诈预测数据/test.csv')
    return train, test


# 特征工程
def feature_engineering(df):
    # 转换日期类型
    df['policy_bind_date'] = pd.to_datetime(df['policy_bind_date'])
    df['incident_date'] = pd.to_datetime(df['incident_date'])

    # 创建新特征：保险持有时间（天）
    df['policy_duration_days'] = (df['incident_date'] - df['policy_bind_date']).dt.days

    # 创建新特征：事故时间分段
    bins = [-1, 5, 11, 17, 23]
    labels = ['凌晨', '上午', '下午', '晚上']
    df['incident_time_segment'] = pd.cut(df['incident_hour_of_the_day'],
                                         bins=bins, labels=labels)

    # 创建新特征：索赔比例
    df['injury_claim_ratio'] = df['injury_claim'] / df['total_claim_amount']
    df['property_claim_ratio'] = df['property_claim'] / df['total_claim_amount']
    df['vehicle_claim_ratio'] = df['vehicle_claim'] / df['total_claim_amount']

    # 创建新特征：资本净收益
    df['capital_net'] = df['capital-gains'] + df['capital-loss']

    # 处理特殊值
    df['property_damage'] = df['property_damage'].replace({'?': 'Unknown'})
    df['police_report_available'] = df['police_report_available'].replace({'?': 'Unknown'})

    # 删除不需要的列 - 确保保留 policy_id
    cols_to_drop = ['policy_bind_date', 'incident_date', 'insured_zip']
    df = df.drop(columns=[col for col in cols_to_drop if col in df.columns], errors='ignore')

    return df


# 特征选择
def select_features(df):
    # 根据业务理解和相关性分析选择特征
    selected_features = [
        'age', 'customer_months', 'policy_deductable', 'policy_annual_premium',
        'umbrella_limit', 'capital_net', 'incident_hour_of_the_day',
        'number_of_vehicles_involved', 'bodily_injuries', 'witnesses',
        'total_claim_amount', 'policy_duration_days', 'injury_claim_ratio',
        'property_claim_ratio', 'vehicle_claim_ratio',
        'policy_state', 'policy_csl', 'insured_sex', 'insured_education_level',
        'insured_occupation', 'insured_hobbies', 'insured_relationship',
        'incident_type', 'collision_type', 'incident_severity',
        'authorities_contacted', 'incident_state', 'incident_city',
        'incident_time_segment', 'property_damage', 'police_report_available',
        'auto_make', 'auto_model', 'auto_year'
    ]

    # 确保只保留存在的特征
    return df[[col for col in selected_features if col in df.columns]]


# 预处理管道
def create_preprocessor():
    # 数值特征处理
    numeric_features = [
        'age', 'customer_months', 'policy_deductable', 'policy_annual_premium',
        'umbrella_limit', 'capital_net', 'incident_hour_of_the_day',
        'number_of_vehicles_involved', 'bodily_injuries', 'witnesses',
        'total_claim_amount', 'policy_duration_days', 'injury_claim_ratio',
        'property_claim_ratio', 'vehicle_claim_ratio'
    ]

    # 分类特征处理
    categorical_features = [
        'policy_state', 'policy_csl', 'insured_sex', 'insured_education_level',
        'insured_occupation', 'insured_hobbies', 'insured_relationship',
        'incident_type', 'collision_type', 'incident_severity',
        'authorities_contacted', 'incident_state', 'incident_city',
        'incident_time_segment', 'property_damage', 'police_report_available',
        'auto_make', 'auto_model'
    ]

    # 数值特征转换
    numeric_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='median')),
        ('scaler', StandardScaler())])

    # 分类特征转换
    categorical_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
        ('onehot', OneHotEncoder(handle_unknown='ignore'))])

    # 组合预处理步骤
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', numeric_transformer, numeric_features),
            ('cat', categorical_transformer, categorical_features)])

    return preprocessor


# 数据探索可视化
def plot_data_exploration(train_df):
    """创建数据探索的可视化图表"""
    # 创建输出目录
    output_dir = "数据探索可视化"
    os.makedirs(output_dir, exist_ok=True)

    # 1. 欺诈比例分布
    plt.figure(figsize=(8, 6))
    fraud_counts = train_df['fraud'].value_counts()
    plt.pie(fraud_counts, labels=['非欺诈', '欺诈'], autopct='%1.1f%%',
            colors=['#66b3ff', '#ff9999'], startangle=90)
    plt.title('欺诈与非欺诈比例', fontproperties=font)
    plt.savefig(f"{output_dir}/欺诈比例分布.png")
    plt.close()

    # 2. 数值特征分布
    numeric_cols = ['age', 'policy_annual_premium', 'total_claim_amount', 'policy_duration_days']
    plt.figure(figsize=(14, 10))
    for i, col in enumerate(numeric_cols, 1):
        plt.subplot(2, 2, i)
        sns.histplot(train_df[col], kde=True, bins=30)
        plt.title(f'{col}分布', fontproperties=font)
        plt.xlabel('')
    plt.tight_layout()
    plt.savefig(f"{output_dir}/数值特征分布.png")
    plt.close()

    # 3. 分类特征分布
    categorical_cols = ['incident_type', 'incident_severity', 'property_damage', 'insured_sex']
    plt.figure(figsize=(14, 10))
    for i, col in enumerate(categorical_cols, 1):
        plt.subplot(2, 2, i)
        sns.countplot(y=col, data=train_df, order=train_df[col].value_counts().index)
        plt.title(f'{col}分布', fontproperties=font)
        plt.xlabel('数量')
        plt.ylabel('')
    plt.tight_layout()
    plt.savefig(f"{output_dir}/分类特征分布.png")
    plt.close()

    # 4. 欺诈与特征关系
    plt.figure(figsize=(14, 10))

    # 年龄与欺诈
    plt.subplot(2, 2, 1)
    sns.boxplot(x='fraud', y='age', data=train_df)
    plt.title('年龄与欺诈关系', fontproperties=font)
    plt.xlabel('欺诈')
    plt.ylabel('年龄')

    # 保险金额与欺诈
    plt.subplot(2, 2, 2)
    sns.boxplot(x='fraud', y='policy_annual_premium', data=train_df)
    plt.title('保险金额与欺诈关系', fontproperties=font)
    plt.xlabel('欺诈')
    plt.ylabel('保险金额')

    # 事故严重性与欺诈
    plt.subplot(2, 2, 3)
    incident_severity_fraud = train_df.groupby(['incident_severity', 'fraud']).size().unstack()
    incident_severity_fraud.plot(kind='bar', stacked=True)
    plt.title('事故严重性与欺诈关系', fontproperties=font)
    plt.xlabel('事故严重性')
    plt.ylabel('数量')

    # 财产损失与欺诈
    plt.subplot(2, 2, 4)
    property_damage_fraud = train_df.groupby(['property_damage', 'fraud']).size().unstack()
    property_damage_fraud.plot(kind='bar', stacked=True)
    plt.title('财产损失与欺诈关系', fontproperties=font)
    plt.xlabel('财产损失')
    plt.ylabel('数量')

    plt.tight_layout()
    plt.savefig(f"{output_dir}/欺诈与特征关系.png")
    plt.close()

    # 5. 新特征分析
    plt.figure(figsize=(14, 6))

    # 保险持有时间与欺诈
    plt.subplot(1, 2, 1)
    sns.boxplot(x='fraud', y='policy_duration_days', data=train_df)
    plt.title('保险持有时间与欺诈', fontproperties=font)
    plt.xlabel('欺诈')
    plt.ylabel('持有时间(天)')

    # 事故时间段与欺诈
    plt.subplot(1, 2, 2)
    time_segment_fraud = train_df.groupby(['incident_time_segment', 'fraud']).size().unstack()
    time_segment_fraud.plot(kind='bar', stacked=True)
    plt.title('事故时间段与欺诈', fontproperties=font)
    plt.xlabel('时间段')
    plt.ylabel('数量')

    plt.tight_layout()
    plt.savefig(f"{output_dir}/新特征分析.png")
    plt.close()

    return output_dir


# 模型评估可视化
def plot_model_evaluation(models, X_test, y_test):
    """创建模型评估的可视化图表"""
    # 创建输出目录
    output_dir = "模型评估可视化"
    os.makedirs(output_dir, exist_ok=True)

    # 1. ROC曲线
    plt.figure(figsize=(10, 8))
    for name, model in models.items():
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
        auc_score = roc_auc_score(y_test, y_pred_proba)
        plt.plot(fpr, tpr, label=f'{name} (AUC = {auc_score:.3f})')

    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel('假正率', fontproperties=font)
    plt.ylabel('真正率', fontproperties=font)
    plt.title('ROC曲线比较', fontproperties=font)
    plt.legend(loc='lower right')
    plt.grid(True)
    plt.savefig(f"{output_dir}/ROC曲线比较.png")
    plt.close()

    # 2. 混淆矩阵（最佳模型）
    # 选择最佳模型
    results = {}
    for name, model in models.items():
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        auc = roc_auc_score(y_test, y_pred_proba)
        results[name] = auc

    best_model_name = max(results, key=results.get)
    best_model = models[best_model_name]

    # 预测并绘制混淆矩阵
    y_pred = best_model.predict(X_test)
    cm = confusion_matrix(y_test, y_pred)

    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['非欺诈', '欺诈'],
                yticklabels=['非欺诈', '欺诈'])
    plt.title(f'{best_model_name}模型混淆矩阵', fontproperties=font)
    plt.xlabel('预测标签', fontproperties=font)
    plt.ylabel('真实标签', fontproperties=font)
    plt.savefig(f"{output_dir}/{best_model_name}_混淆矩阵.png")
    plt.close()

    # 3. 特征重要性（如果是树模型）
    if 'RandomForest' in models or 'GradientBoosting' in models:
        # 尝试获取特征重要性
        try:
            # 获取预处理后的特征名称
            preprocessor = best_model.named_steps['preprocessor']

            # 获取数值特征名称
            numeric_features = preprocessor.transformers_[0][2]

            # 获取分类特征名称
            categorical_transformer = preprocessor.transformers_[1][1]
            ohe = categorical_transformer.named_steps['onehot']
            categorical_features = ohe.get_feature_names_out(
                input_features=preprocessor.transformers_[1][2]
            )

            # 合并所有特征名称
            all_features = np.concatenate([numeric_features, categorical_features])

            # 获取特征重要性
            if hasattr(best_model.named_steps['classifier'], 'feature_importances_'):
                importances = best_model.named_steps['classifier'].feature_importances_

                # 创建特征重要性DataFrame
                feature_importance = pd.DataFrame({
                    'Feature': all_features,
                    'Importance': importances
                }).sort_values('Importance', ascending=False).head(20)

                # 绘制特征重要性
                plt.figure(figsize=(12, 8))
                sns.barplot(x='Importance', y='Feature', data=feature_importance)
                plt.title(f'{best_model_name}模型特征重要性', fontproperties=font)
                plt.xlabel('重要性', fontproperties=font)
                plt.ylabel('特征', fontproperties=font)
                plt.tight_layout()
                plt.savefig(f"{output_dir}/{best_model_name}_特征重要性.png")
                plt.close()
        except Exception as e:
            print(f"无法绘制特征重要性: {e}")

    # 4. 精确率-召回率曲线（最佳模型）
    y_pred_proba = best_model.predict_proba(X_test)[:, 1]
    precision, recall, _ = precision_recall_curve(y_test, y_pred_proba)

    plt.figure(figsize=(10, 8))
    plt.plot(recall, precision, marker='.', label=best_model_name)
    plt.xlabel('召回率', fontproperties=font)
    plt.ylabel('精确率', fontproperties=font)
    plt.title(f'{best_model_name}模型精确率-召回率曲线', fontproperties=font)
    plt.legend()
    plt.grid(True)
    plt.savefig(f"{output_dir}/{best_model_name}_精确率-召回率曲线.png")
    plt.close()

    return output_dir


# 模型训练
def train_models(X_train, y_train):
    preprocessor = create_preprocessor()

    # 定义模型
    models = {
        'RandomForest': RandomForestClassifier(n_estimators=200,
                                               max_depth=10,
                                               random_state=42,
                                               class_weight='balanced'),
        'GradientBoosting': GradientBoostingClassifier(n_estimators=150,
                                                       learning_rate=0.1,
                                                       max_depth=5,
                                                       random_state=42),
        'SVM': SVC(probability=True,
                   C=1.0,
                   kernel='rbf',
                   class_weight='balanced',
                   random_state=42)
    }

    trained_models = {}

    for name, model in models.items():
        # 创建完整管道
        pipeline = Pipeline(steps=[
            ('preprocessor', preprocessor),
            ('classifier', model)
        ])

        # 训练模型
        pipeline.fit(X_train, y_train)
        trained_models[name] = pipeline
        print(f"{name} 模型训练完成")

    return trained_models


# 模型评估
def evaluate_models(models, X_test, y_test):
    results = {}
    for name, model in models.items():
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        auc = roc_auc_score(y_test, y_pred_proba)
        results[name] = auc
        print(f"{name} 模型的 AUC: {auc:.4f}")

        # 输出分类报告
        y_pred = model.predict(X_test)
        print(f"\n{name} 模型分类报告:")
        print(classification_report(y_test, y_pred))

    return results


# 主函数
def main():
    # 加载数据
    train, test = load_data()

    # 数据探索可视化
    print("进行数据探索可视化...")
    try:
        exploration_dir = plot_data_exploration(train)
        print(f"数据探索图表已保存到目录: {exploration_dir}")
    except Exception as e:
        print(f"数据探索可视化出错: {e}")

    # 保存原始测试数据的policy_id
    test_policy_ids = test['policy_id'].copy()

    # 特征工程
    train = feature_engineering(train)
    test = feature_engineering(test)

    # 特征选择
    X_train = select_features(train)
    y_train = train['fraud']
    X_test = select_features(test)

    # 划分训练集和验证集
    X_train_split, X_val, y_train_split, y_val = train_test_split(
        X_train, y_train, test_size=0.2, random_state=42, stratify=y_train)

    # 训练模型
    print("开始训练模型...")
    models = train_models(X_train_split, y_train_split)

    # 评估模型
    print("\n模型评估结果:")
    results = evaluate_models(models, X_val, y_val)

    # 模型评估可视化
    print("\n进行模型评估可视化...")
    try:
        evaluation_dir = plot_model_evaluation(models, X_val, y_val)
        print(f"模型评估图表已保存到目录: {evaluation_dir}")
    except Exception as e:
        print(f"模型评估可视化出错: {e}")

    # 选择最佳模型
    best_model_name = max(results, key=results.get)
    best_model = models[best_model_name]
    print(f"\n最佳模型: {best_model_name}, AUC: {results[best_model_name]:.4f}")

    # 在测试集上进行预测
    test_pred_proba = best_model.predict_proba(X_test)[:, 1]

    # 创建输出结果，确保包含policy_id
    output = pd.DataFrame({
        'policy_id': test_policy_ids,
        'fraud_prob': test_pred_proba,
        'fraud_pred': (test_pred_proba > 0.5).astype(int)
    })

    # 保存结果
    output.to_csv('保险反欺诈预测结果.csv', index=False)
    print("预测结果已保存到 '保险反欺诈预测结果.csv'")

    # 保存模型
    joblib.dump(best_model, 'best_insurance_fraud_model.pkl')
    print("模型已保存为 'best_insurance_fraud_model.pkl'")


if __name__ == "__main__":
    main()