import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest, chi2


# 数据探查
def data_exploration():
    # 加载训练集和测试集数据
    try:
        train_data = pd.read_csv('train.csv')
        test_data = pd.read_csv('test.csv')
    except FileNotFoundError as e:
        print(f"文件读取错误: {e}")
        print("请确保train.csv和test.csv文件在当前目录下")
        return None, None

    print('=== train.csv 数据基本信息 ===')
    train_data.info()

    print('\n=== train.csv 前5行数据 ===')
    print(train_data.head())

    print('\n=== test.csv 数据基本信息 ===')
    test_data.info()

    print('\n=== test.csv 前5行数据 ===')
    print(test_data.head())

    return train_data, test_data


# 数据预处理
def data_preprocessing(train_data, test_data):
    # 保存保单ID用于最终结果输出
    test_policy_ids = test_data['policy_id'].copy()

    # 缺失值处理：用众数填充 authorities_contacted 列
    train_data['authorities_contacted'] = train_data['authorities_contacted'].fillna(
        train_data['authorities_contacted'].mode()[0]
    )
    test_data['authorities_contacted'] = test_data['authorities_contacted'].fillna(
        test_data['authorities_contacted'].mode()[0]
    )

    # 特征编码：对object类型列进行标签编码
    cat_columns = train_data.select_dtypes(include=['object']).columns
    label_encoders = {}

    for col in cat_columns:
        le = LabelEncoder()
        # 训练集编码
        train_data[col] = le.fit_transform(train_data[col])

        # 处理测试集中可能出现的未知类别
        unique_train = set(le.classes_)
        test_values = test_data[col].unique()
        unseen = [x for x in test_values if x not in unique_train]

        if unseen:
            print(f"警告：在 {col} 列发现训练集中未出现的类别，将其替换为最常见类别")
            # 将未知类别替换为训练集中最常见的类别
            most_common = le.classes_[0]
            test_data[col] = test_data[col].apply(lambda x: most_common if x in unseen else x)

        # 测试集编码
        test_data[col] = le.transform(test_data[col])
        label_encoders[col] = le

    # 时间特征提取
    def extract_date_features(df, col):
        df[col + '_year'] = pd.to_datetime(df[col]).dt.year
        df[col + '_month'] = pd.to_datetime(df[col]).dt.month
        df[col + '_day'] = pd.to_datetime(df[col]).dt.day
        df[col + '_dayofweek'] = pd.to_datetime(df[col]).dt.dayofweek
        return df

    # 提取时间特征
    train_data = extract_date_features(train_data, 'policy_bind_date')
    train_data = extract_date_features(train_data, 'incident_date')
    test_data = extract_date_features(test_data, 'policy_bind_date')
    test_data = extract_date_features(test_data, 'incident_date')

    # 删除原始日期列
    date_cols = ['policy_bind_date', 'incident_date']
    train_data = train_data.drop(date_cols, axis=1)
    test_data = test_data.drop(date_cols, axis=1)

    return train_data, test_data, test_policy_ids


# 模型训练与优化
def model_training_and_optimization(train_data, test_data, test_policy_ids):
    # 准备特征和目标变量
    X = train_data.drop('fraud', axis=1)
    y = train_data['fraud']

    # 处理负值（满足卡方检验要求）
    X = X.clip(lower=0)
    test_data = test_data.clip(lower=0)

    # 特征工程：使用卡方检验选择重要特征
    selector = SelectKBest(chi2, k=30)  # 选择30个最佳特征
    X_selected = selector.fit_transform(X, y)
    test_selected = selector.transform(test_data)

    # 处理数据不平衡：使用SMOTE过采样
    smote = SMOTE(random_state=42)
    X_resampled, y_resampled = smote.fit_resample(X_selected, y)

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_resampled, y_resampled, test_size=0.2, random_state=42
    )

    # 定义不同模型及其参数网格
    models = {
        'RandomForest': (RandomForestClassifier(random_state=42), {
            'n_estimators': [50, 100],
            'max_depth': [None, 10],
            'min_samples_split': [2, 5]
        }),
        'GradientBoosting': (GradientBoostingClassifier(random_state=42), {
            'n_estimators': [50, 100],
            'learning_rate': [0.01, 0.1],
            'max_depth': [3, 5]
        }),
        'XGBoost': (XGBClassifier(random_state=42), {
            'n_estimators': [50, 100],
            'learning_rate': [0.01, 0.1],
            'max_depth': [3, 5]
        }),
        'LightGBM': (LGBMClassifier(random_state=42), {
            'n_estimators': [50, 100],
            'learning_rate': [0.01, 0.1],
            'max_depth': [-1, 3]
        })
    }

    best_model = None
    best_score = -1
    best_model_name = ""

    # 遍历不同模型进行网格搜索
    for model_name, (model, param_grid) in models.items():
        print(f"\n正在训练 {model_name} 模型...")
        grid_search = GridSearchCV(model, param_grid, cv=3, scoring='f1', n_jobs=-1)
        grid_search.fit(X_train, y_train)

        # 在验证集上评估
        y_pred = grid_search.predict(X_val)
        accuracy = accuracy_score(y_val, y_pred)
        precision = precision_score(y_val, y_pred)
        recall = recall_score(y_val, y_pred)
        f1 = f1_score(y_val, y_pred)

        print(f"{model_name} 模型性能:")
        print(f"最佳参数: {grid_search.best_params_}")
        print(f"准确率: {accuracy:.4f}")
        print(f"精确率: {precision:.4f}")
        print(f"召回率: {recall:.4f}")
        print(f"F1得分: {f1:.4f}")

        # 更新最佳模型
        if f1 > best_score:
            best_score = f1
            best_model = grid_search.best_estimator_
            best_model_name = model_name

    print(f"\n最佳模型是 {best_model_name}，F1得分: {best_score:.4f}")

    # 在测试集上进行预测
    test_pred = best_model.predict(test_selected)
    test_pred_df = pd.DataFrame({
        'policy_id': test_policy_ids,
        'fraud_prediction': test_pred
    })
    test_pred_df.to_csv('test_fraud_predictions.csv', index=False)
    print("\n预测结果已保存至 test_fraud_predictions.csv")


if __name__ == '__main__':
    print("===== 保险反欺诈检测流程开始 =====")

    # 数据探查
    print("\n----- 数据探查阶段 -----")
    train_data, test_data = data_exploration()
    if train_data is None or test_data is None:
        print("程序终止")
        exit()

    # 数据预处理
    print("\n----- 数据预处理阶段 -----")
    train_processed, test_processed, test_policy_ids = data_preprocessing(train_data, test_data)

    # 模型训练与优化
    print("\n----- 模型训练与优化阶段 -----")
    model_training_and_optimization(train_processed, test_processed, test_policy_ids)

    print("\n===== 保险反欺诈检测流程完成 =====")
