



import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from statsmodels.formula.api import ols, glm
from statsmodels.stats.anova import anova_lm
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.discrete.count_model import ZeroInflatedNegativeBinomialP, ZeroInflatedPoisson
from patsy import dmatrix
# import statsmodels.api as sm
from hmmlearn import hmm
from sklearn.preprocessing import StandardScaler


# 尝试导入statsmodels，处理兼容性问题
try:
    import statsmodels.api as sm
    use_statsmodels = True
except ImportError as e:
    if '_lazywhere' in str(e):
        print("警告: statsmodels与当前SciPy版本不兼容，将跳过回归分析部分")
        use_statsmodels = False
    else:
        raise



# 设置随机种子保证结果可复现
np.random.seed(42)

# ========================
# 1. 模拟车险数据集
# ========================
print("生成模拟车险数据...")
n = 5000  # 保单数量

# 生成基本保单信息
data = pd.DataFrame({
    'policy_id': range(1, n+1),
    'age': np.random.randint(18, 70, n),  # 车主年龄
    'vehicle_value': np.random.lognormal(mean=3.0, sigma=0.5, size=n),  # 车辆价值
    'driving_experience': np.random.randint(1, 50, n),  # 驾龄
    'vehicle_type': np.random.choice(['Sedan', 'SUV', 'Truck', 'Sports'], n, p=[0.4, 0.3, 0.2, 0.1]),  # 车型
    'vehicle_color': np.random.choice(['Red', 'Blue', 'Black', 'White', 'Silver'], n),  # 颜色
    'area': np.random.choice(['Urban', 'Suburban', 'Rural'], n, p=[0.5, 0.3, 0.2]),  # 地区
    'annual_mileage': np.random.gamma(shape=2, scale=5000, size=n),  # 年行驶里程
    'credit_score': np.random.normal(loc=700, scale=50, size=n)  # 信用评分
})

# 添加索赔次数（使用零膨胀负二项分布）
# 红色车辆索赔频率高8.2%，乡村地区索赔频率低12.3%
red_effect = np.where(data['vehicle_color'] == 'Red', 1.082, 1.0)
rural_effect = np.where(data['area'] == 'Rural', 0.877, 1.0)
mean_claim = 0.15 * red_effect * rural_effect * (data['annual_mileage']/10000)
claim_counts = np.random.negative_binomial(n=2, p=1/(1+mean_claim), size=n)
zero_mask = np.random.random(n) < 0.65  # 65%的保单无索赔
claim_counts[zero_mask] = 0
data['claim_count'] = claim_counts

# 添加索赔金额（使用伽玛分布）
claim_amounts = np.random.gamma(shape=1.5, scale=2000, size=n)
claim_amounts[claim_counts == 0] = 0  # 无索赔时金额为0
data['claim_amount'] = claim_amounts

# 添加高风险驾驶行为标记
data['high_risk'] = np.where(
        (data['annual_mileage'] > 20000) & 
        (data['age'] < 25) & 
        (data['vehicle_type'] == 'Sports'), 1, 0
    )

print("数据样例：")
print(data.head())
print("\n数据描述：")
print(data.describe())

# ========================
# 2. 抽样分布分析
# ========================
print("\n分析索赔次数的分布特征...")

# 可视化索赔次数分布
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.histplot(data['claim_count'], kde=False, bins=15)
plt.title('索赔次数分布')
plt.xlabel('索赔次数')

# 零膨胀特征分析
zero_claim_ratio = (data['claim_count'] == 0).mean()
print(f"零索赔保单比例: {zero_claim_ratio:.2%}")

# 拟合零膨胀负二项分布 (ZINB)
print("\n拟合零膨胀负二项分布(ZINB)...")
exog = data[['age', 'vehicle_value', 'driving_experience', 'annual_mileage', 'credit_score']]
exog = sm.add_constant(exog)  # 添加常数项

# 创建并拟合ZINB模型
zinb_model = ZeroInflatedNegativeBinomialP(
    endog=data['claim_count'],
    exog=exog,
    exog_infl=exog,
    inflation='logit'
)
zinb_results = zinb_model.fit(maxiter=100)
print(zinb_results.summary())

# ========================
# 3. 假设检验
# ========================
print("\n进行假设检验...")

# 检验1: 红色车辆是否索赔频率更高
red_claims = data[data['vehicle_color'] == 'Red']['claim_count']
other_claims = data[data['vehicle_color'] != 'Red']['claim_count']

# 独立样本t检验
t_stat, p_value = stats.ttest_ind(red_claims, other_claims, equal_var=False)
print(f"红色车辆索赔频率检验: t = {t_stat:.3f}, p = {p_value:.4f}")
if p_value < 0.05:
    red_mean = red_claims.mean()
    other_mean = other_claims.mean()
    print(f"红色车辆平均索赔次数: {red_mean:.3f}, 其他颜色: {other_mean:.3f}")
    print(f"红色车辆索赔频率高 {(red_mean/other_mean-1)*100:.1f}%")

# 检验2: 地区间索赔频率差异 (ANOVA)
print("\n地区间索赔频率差异分析:")
area_anova = ols('claim_count ~ area', data=data).fit()
anova_table = anova_lm(area_anova)
print(anova_table)

# 事后检验 (Tukey HSD)
tukey_results = pairwise_tukeyhsd(data['claim_count'], data['area'], alpha=0.05)
print(tukey_results)

# ========================
# 4. 多因素分析
# ========================
print("\n进行多因素分析...")

# 4.1 索赔频率预测 (GAM模型)
print("构建广义可加模型(GAM)预测索赔频率...")

# 创建样条项
data['mileage_spline'] = dmatrix(
        "bs(annual_mileage, df=4, degree=3)", 
        {"annual_mileage": data['annual_mileage']}, 
        return_type='dataframe'
    )

# 构建GAM模型公式
gam_formula = "claim_count ~ age + driving_experience + C(vehicle_type) + C(vehicle_color) + C(area) + mileage_spline"

# 拟合负二项回归模型 (用于计数数据)
gam_model = glm(gam_formula, data=data, family=sm.families.NegativeBinomial()).fit()
print(gam_model.summary())

# 4.2 关键因素分析
print("\n关键因素影响分析:")

# 计算变量重要性
coefs = gam_model.params
std_errors = gam_model.bse
z_scores = coefs / std_errors
p_values = gam_model.pvalues

# 创建结果DataFrame
factor_importance = pd.DataFrame({
    'Variable': coefs.index,
    'Coefficient': coefs.values,
    'Std_Error': std_errors.values,
    'z_score': z_scores.values,
    'p_value': p_values.values
})

# 过滤显著变量
significant_factors = factor_importance[
    (factor_importance['p_value'] < 0.05) & 
    (~factor_importance['Variable'].str.contains('Intercept'))
].sort_values('z_score', key=abs, ascending=False)

print("显著影响因素:")
print(significant_factors[['Variable', 'Coefficient', 'p_value']].head(10))

# 可视化关键因素影响
plt.figure(figsize=(10, 6))
sns.barplot(x='Coefficient', y='Variable', data=significant_factors.head(10), palette='viridis')
plt.title('车险索赔频率的关键影响因素')
plt.xlabel('回归系数')
plt.ylabel('变量')
plt.tight_layout()

# ========================
# 5. 随机过程 (驾驶行为建模)
# ========================
print("\n使用隐马尔可夫模型(HMM)建模驾驶行为...")

# 模拟驾驶行为数据
n_days = 365  # 一年数据
n_drivers = 100

# 驾驶状态: 0=安全, 1=中等风险, 2=高风险
states = ['Safe', 'Medium', 'High']

# 转移概率矩阵
transition_matrix = np.array([
    [0.85, 0.12, 0.03],  # 安全状态下的转移概率
    [0.20, 0.70, 0.10],  # 中等风险
    [0.05, 0.15, 0.80]   # 高风险
])

# 观测指标: 急刹车次数、超速事件、夜间驾驶
emission_means = np.array([
    [0.2, 0.1, 0.3],  # 安全状态
    [1.0, 0.8, 1.5],  # 中等风险
    [3.0, 2.5, 4.0]   # 高风险
])

# 生成驾驶行为序列
driver_data = []
for driver in range(n_drivers):
    # 随机初始状态
    current_state = np.random.choice(3)
    daily_behavior = []
    
    for day in range(n_days):
        # 记录当前状态
        daily_behavior.append({
            'driver_id': driver,
            'day': day,
            'state': states[current_state],
            'hard_braking': np.random.poisson(emission_means[current_state, 0]),
            'speeding': np.random.poisson(emission_means[current_state, 1]),
            'night_driving': np.random.poisson(emission_means[current_state, 2])
        })
        
        # 转移到下一天状态
        current_state = np.random.choice(3, p=transition_matrix[current_state])
    
    driver_data.extend(daily_behavior)

driver_df = pd.DataFrame(driver_data)

# 训练HMM模型
print("训练隐马尔可夫模型...")
observations = driver_df[['hard_braking', 'speeding', 'night_driving']].values

# 标准化观测值
scaler = StandardScaler()
observations_scaled = scaler.fit_transform(observations)

# 创建并训练HMM
model = hmm.GaussianHMM(n_components=3, covariance_type="diag", n_iter=100)
model.fit(observations_scaled)

# 解码隐藏状态
hidden_states = model.predict(observations_scaled)
driver_df['predicted_state'] = [states[int(s)] for s in hidden_states]

# 评估模型效果
accuracy = (driver_df['state'] == driver_df['predicted_state']).mean()
print(f"状态预测准确率: {accuracy:.2%}")

# 可视化状态转移
plt.figure(figsize=(10, 6))
state_counts = driver_df.groupby('day')['predicted_state'].value_counts(normalize=True).unstack()
state_counts.plot.area(stacked=True, colormap='viridis')
plt.title('驾驶行为状态随时间变化')
plt.ylabel('状态比例')
plt.xlabel('天数')
plt.legend(title='驾驶状态')
plt.tight_layout()

# ========================
# 6. 综合应用：动态保费定价
# ========================
print("\n构建综合定价模型...")

# 合并索赔数据和驾驶行为
driver_claim_data = data.sample(n_drivers).reset_index(drop=True)
driver_claim_data['driver_id'] = range(n_drivers)
merged_data = pd.merge(driver_df, driver_claim_data, on='driver_id')

# 构建定价模型
pricing_model = glm(
    "claim_count ~ predicted_state + age + vehicle_value + annual_mileage",
    data=merged_data,
    family=sm.families.NegativeBinomial()
).fit()

# 预测索赔频率
merged_data['predicted_claim'] = pricing_model.predict()

# 计算基础保费
base_premium = 1000  # 基础保费
merged_data['premium'] = base_premium * (1 + merged_data['predicted_claim'] * 0.5)

# 高风险驾驶附加费
risk_surcharge = {
    'Safe': 0.0,
    'Medium': 0.15,
    'High': 0.40
}
merged_data['risk_surcharge'] = merged_data['predicted_state'].map(risk_surcharge)
merged_data['total_premium'] = merged_data['premium'] * (1 + merged_data['risk_surcharge'])

# 分析不同状态下的保费差异
premium_by_state = merged_data.groupby('predicted_state')['total_premium'].mean()
print("\n不同驾驶状态的平均保费:")
print(premium_by_state)

# 保存结果
merged_data.to_csv('auto_insurance_pricing.csv', index=False)
print("分析完成! 结果已保存到 auto_insurance_pricing.csv")

# 显示所有图表
plt.show()