# -*- coding: utf-8 -*-
"""
保单续保预测模型
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay

# 配置显示参数
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def load_and_preprocess(file_path):
    """数据加载与预处理"""
    df = pd.read_excel(file_path, engine='openpyxl')
    
    # === 字段诊断 ===
    print("\n=== 原始字段名称 ===")
    print(df.columns.tolist())
    
    # 统一字段命名
    df.columns = df.columns.str.strip().str.lower()
    
    # === 关键字段验证 ===
    required_fields = {
        'policy_term': ['policy_term', '保险期限'],
        'policy_start_date': ['policy_start_date', '保单起始日'],
        'policy_end_date': ['policy_end_date', '保单到期日'],
        'renewal': ['renewal', '续保状态']
    }
    
    # 自动匹配字段
    column_mapping = {}
    for eng_name, candidates in required_fields.items():
        for candidate in candidates:
            if candidate.lower() in df.columns:
                column_mapping[candidate] = eng_name
                break
        else:
            raise KeyError(f"缺失必要字段: {eng_name} (尝试匹配: {candidates})")
    
    df.rename(columns=column_mapping, inplace=True)
    
    # === 日期处理 ===
    for date_col in ['policy_start_date', 'policy_end_date']:
        # 尝试多种日期格式
        try:
            df[date_col] = pd.to_datetime(df[date_col], format='%Y-%m-%d')
        except:
            try:
                df[date_col] = pd.to_datetime(df[date_col], format='%Y/%m/%d')
            except:
                df[date_col] = pd.to_datetime(df[date_col], errors='coerce')
        
        # 检查无效日期
        null_dates = df[date_col].isnull().sum()
        if null_dates > 0:
            print(f"警告：{date_col} 有 {null_dates} 条无效日期记录")
            df = df.dropna(subset=[date_col])
    
    # === 创建衍生字段 ===
    # 保险期限（优先使用policy_term字段）
    if 'policy_term' in df.columns:
        df['policy_term_years'] = df['policy_term'].str.extract(r'(\d+)').astype(float)
    else:
        df['policy_term_years'] = (df['policy_end_date'] - df['policy_start_date']).dt.days / 365.25
    
    # 保单持续时间
    df['policy_duration'] = (df['policy_end_date'] - df['policy_start_date']).dt.days / 365.25
    
    # === 打印验证信息 ===
    print("\n=== 关键字段验证 ===")
    print("policy_term_years 示例:", df['policy_term_years'].head(3).values)
    print("policy_duration 统计:")
    print(df['policy_duration'].describe())
    
    # 目标变量编码
    df['renewal'] = df['renewal'].map({'Yes':1, 'No':0})
    
    # 特征分类
    numeric_features = ['age', 'premium_amount', 'family_members', 
                       'policy_term_years', 'policy_duration']
    categorical_features = ['gender', 'birth_region', 'insurance_region',
                           'income_level', 'education_level', 'occupation',
                           'marital_status', 'policy_type']
    
    return df, numeric_features, categorical_features

def build_model_pipeline(numeric_features, categorical_features):
    """构建预处理和模型管道"""
    # 预处理转换器
    preprocessor = ColumnTransformer(
        transformers=[
            ('num', StandardScaler(), numeric_features),
            ('cat', OneHotEncoder(drop='first', handle_unknown='ignore'), categorical_features)
        ])
    
    # 模型管道
    pipeline = Pipeline(steps=[
        ('preprocessor', preprocessor),
        ('classifier', LogisticRegression(class_weight='balanced', max_iter=1000))
    ])
    
    return pipeline

def visualize_coefficients(pipeline, numeric_features, categorical_features, top_n=20):
    """可视化模型系数"""
    # 提取特征名称
    cat_encoder = pipeline.named_steps['preprocessor'].named_transformers_['cat']
    cat_features = cat_encoder.get_feature_names_out(categorical_features)
    all_features = numeric_features + list(cat_features)
    
    # 提取系数
    coefficients = pipeline.named_steps['classifier'].coef_[0]
    feature_importance = pd.DataFrame({'feature': all_features, 'coef': coefficients})
    
    # 处理长特征名
    feature_importance['feature'] = feature_importance['feature'].str.replace('_', ' ')
    
    # 按绝对值排序取TopN
    feature_importance['abs_coef'] = feature_importance['coef'].abs()
    top_features = feature_importance.sort_values('abs_coef', ascending=False).head(top_n)
    
    # 创建可视化
    plt.figure(figsize=(12, 8))
    colors = ['#2CA02C' if x > 0 else '#D62728' for x in top_features['coef']]
    plt.barh(top_features['feature'], top_features['coef'], color=colors)
    
    plt.title(f'Top {top_n} 影响续保的因素')
    plt.xlabel('系数值')
    plt.gca().invert_yaxis()  # 从上到下排序
    plt.grid(axis='x', alpha=0.3)
    
    # 添加数值标签
    for i, v in enumerate(top_features['coef']):
        plt.text(v if v >0 else v-0.02, i, f"{v:.2f}", 
                color='white' if abs(v)>0.5 else 'black',
                ha='right' if v <0 else 'left')
    
    plt.tight_layout()
    plt.savefig(f'{output_dir}/top_coefficients.png', dpi=150, bbox_inches='tight')
    plt.show()

def main(file_path):
    # 加载数据
    df, numeric_features, categorical_features = load_and_preprocess(file_path)
    
    # 拆分数据集
    X = df.drop('renewal', axis=1)
    y = df['renewal']
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 构建并训练模型
    pipeline = build_model_pipeline(numeric_features, categorical_features)
    pipeline.fit(X_train, y_train)
    
    # 评估模型
    y_pred = pipeline.predict(X_test)
    print(classification_report(y_test, y_pred))
    
    # 可视化混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    disp = ConfusionMatrixDisplay(confusion_matrix=cm)
    disp.plot(cmap='Blues')
    plt.title('混淆矩阵')
    plt.savefig(f'{output_dir}/confusion_matrix.png', dpi=150)
    plt.show()
    
    # 可视化系数
    visualize_coefficients(pipeline, numeric_features, categorical_features)

if __name__ == "__main__":
    output_dir = "analysis_figures"
    os.makedirs(output_dir, exist_ok=True)
    main("policy_data.xlsx") 