import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.feature_selection import SelectKBest, f_classif
import joblib


def feature_engineering(df):
    """执行特征工程创建新特征"""
    # 日期转换
    df['policy_bind_date'] = pd.to_datetime(df['policy_bind_date'])
    df['incident_date'] = pd.to_datetime(df['incident_date'])

    # 计算保险持有天数
    df['policy_duration_days'] = (df['incident_date'] - df['policy_bind_date']).dt.days

    # 时间周期特征
    df['incident_hour_sin'] = np.sin(2 * np.pi * df['incident_hour_of_the_day'] / 24)
    df['incident_hour_cos'] = np.cos(2 * np.pi * df['incident_hour_of_the_day'] / 24)

    # 索赔金额比例
    df['injury_claim_ratio'] = df['injury_claim'] / df['total_claim_amount']
    df['property_claim_ratio'] = df['property_claim'] / df['total_claim_amount']
    df['vehicle_claim_ratio'] = df['vehicle_claim'] / df['total_claim_amount']

    # 保费索赔比
    df['premium_claim_ratio'] = df['policy_annual_premium'] / df['total_claim_amount']

    # 车辆年龄
    current_year = 2015 # 假设当前年份
    df['vehicle_age'] = current_year - df['auto_year']

    # 资本净收益
    df['capital_net'] = df['capital-gains'] + df['capital-loss']

    # 高风险爱好标识
    high_risk_hobbies = ['skydiving', 'base-jumping', 'bungie-jumping', 'polo']
    df['high_risk_hobby'] = df['insured_hobbies'].apply(lambda x: 1 if x in high_risk_hobbies else 0)

    return df


def load_and_preprocess_data(train_path, test_path):
    """加载并预处理数据"""
    # 读取数据
    train_data = pd.read_csv(train_path)
    test_data = pd.read_csv(test_path)

    # 合并确保一致处理
    all_data = pd.concat([train_data, test_data], ignore_index=True)

    # 特征工程
    all_data = feature_engineering(all_data)

    # 重新拆分
    train_data = all_data[all_data['fraud'].notna()]
    test_data = all_data[all_data['fraud'].isna()]

    return train_data, test_data


def prepare_features(train_data, test_data):
    """准备建模所需特征"""
    # 定义特征和目标
    X_train = train_data.drop(['fraud', 'policy_id', 'policy_bind_date', 'incident_date'], axis=1)
    y_train = train_data['fraud']
    X_test = test_data.drop(['fraud', 'policy_id', 'policy_bind_date', 'incident_date'], axis=1)

    # 分类特征列表
    categorical_features = ['policy_state', 'policy_csl', 'insured_sex', 'insured_education_level',
                            'insured_occupation', 'insured_hobbies', 'insured_relationship',
                            'incident_type', 'collision_type', 'incident_severity',
                            'authorities_contacted', 'incident_state', 'incident_city',
                            'property_damage', 'police_report_available', 'auto_make', 'auto_model']
    # 数值特征
    numeric_features = [col for col in X_train.columns if col not in categorical_features and col != 'fraud']

    # 数值特征预处理管道
    numeric_transformer = Pipeline(steps=[
        ('imputer', SimpleImputer(strategy='median')),
        ('scaler', StandardScaler())])
    # 列转换器
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', numeric_transformer, numeric_features)])

    # 预处理数值特征
    X_train_processed = preprocessor.fit_transform(X_train)
    X_test_processed = preprocessor.transform(X_test)

    # 分类特征标签编码
    label_encoders = {}
    for col in categorical_features:
        le = LabelEncoder()
        # 合并训练测试集确保所有类别
        combined = pd.concat([X_train[col], X_test[col]])
        le.fit(combined)
        X_train_processed = np.column_stack((X_train_processed, le.transform(X_train[col])))
        X_test_processed = np.column_stack((X_test_processed, le.transform(X_test[col])))
        label_encoders[col] = le

    # 特征选择
    selector = SelectKBest(score_func=f_classif, k=30) # 选择最好的30个特征
    X_train_selected = selector.fit_transform(X_train_processed, y_train)
    X_test_selected = selector.transform(X_test_processed)

    # 获取特征名称
    selected_features = numeric_features + categorical_features
    selected_mask = selector.get_support()
    selected_feature_names = [feature for feature, mask in zip(selected_features, selected_mask) if mask]

    # 保存预处理对象
    joblib.dump(preprocessor, 'preprocessor.pkl')
    joblib.dump(selector, 'feature_selector.pkl')
    joblib.dump(label_encoders, 'label_encoders.pkl')

    return X_train_selected, y_train, X_test_selected, selected_feature_names, test_data['policy_id']