import pandas as pd
import numpy as np
import joblib
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from category_encoders import HashingEncoder
import matplotlib.pyplot as plt
import seaborn

# 1. 加载原始数据集
df = pd.read_csv("filing_rate/data.csv")


# 2. 列分组
y = df["result"]  # 标签列
low_card_cols = ["lbh_label"]  # 低基数列/后续类型不会再改变
high_card_cols = ["mid", "court", "field", "y_state"]  # 高基数列
num_cols = [
    "amount",
    "zx_amount",
    "call_yjt_count",
    "call_wjt_count",
    "add_to_tj_days",
    "tj_to_lbh_days",
]  # 数值列


# 3. 数据分割
X_train, X_test, y_train, y_test = train_test_split(
    df, y, test_size=0.1, random_state=42, stratify=y
)


# 4. 各个处理器
ohe = OneHotEncoder(handle_unknown="ignore")
hasher = HashingEncoder(n_components=2**16, cols=high_card_cols)
scaler = StandardScaler()


# 5. ColumnTransformer：列→处理器 映射
preprocess = ColumnTransformer(
    transformers=[
        ("ohe", ohe, low_card_cols),
        ("hash", hasher, high_card_cols),
        ("num", scaler, num_cols),
    ],
    remainder="drop",  # 其他列不用
)


# 6. 完整 Pipeline
pipe = Pipeline(
    [
        ("prep", preprocess),
        (
            "clf",  # 分类器
            SGDClassifier(
                loss="log_loss",  # 逻辑损失
                random_state=42,  # 随机种子
                class_weight="balanced",  # 类别权重平衡
            ),
        ),
    ]
)


# 7. 训练
pipe.fit(X_train, y_train)


# 8. 在训练集上评估
y_train_pred = pipe.predict(X_train)
train_accuracy = accuracy_score(y_train, y_train_pred)
print(f"\n训练集准确率: {train_accuracy:.4f}")


# 9. 在测试集上评估
y_test_pred = pipe.predict(X_test)
test_accuracy = accuracy_score(y_test, y_test_pred)
print(f"测试集准确率: {test_accuracy:.4f}")


# 10. 详细分类报告
print("\n详细分类报告:")
print(classification_report(y_test, y_test_pred))


# 11. 混淆矩阵
cm = confusion_matrix(y_test, y_test_pred)
plt.figure(figsize=(8, 6))
seaborn.heatmap(cm, annot=True, fmt="d", cmap="Blues")
plt.title("混淆矩阵")
plt.ylabel("真实标签")
plt.xlabel("预测标签")
plt.savefig("filing_rate/confusion_matrix.png", dpi=300, bbox_inches="tight")
plt.close()


# 12. 权重分析
print("\n" + "=" * 50)
print("模型权重分析 - 对类别1的影响")
print("=" * 50)

# 获取分类器
classifier = pipe.named_steps["clf"]
weights = classifier.coef_[0]  # 类别1相对于类别0的权重

# 获取特征名称
feature_names = []

# 1. 低基数列（OneHotEncoder）的特征名称
ohe_features = (
    pipe.named_steps["prep"]
    .named_transformers_["ohe"]
    .get_feature_names_out(low_card_cols)
)
feature_names.extend(ohe_features)

# 2. 高基数列（HashingEncoder）的特征名称
hash_features = [f"hash_{i}" for i in range(2**16)]  # HashingEncoder生成2^16个特征
feature_names.extend(hash_features)

# 3. 数值列的特征名称
feature_names.extend(num_cols)

# 只取实际使用的特征数量
feature_names = feature_names[: len(weights)]

print(f"\n总特征数量: {len(weights)}")
print(f"数值特征数量: {len(num_cols)}")
print(f"低基数特征数量: {len(ohe_features)}")
print(f"高基数特征数量: {len(hash_features)}")

# 创建权重DataFrame
weight_df = pd.DataFrame(
    {
        "特征": feature_names,
        "权重": weights,
        "影响方向": ["正向" if w > 0 else "负向" for w in weights],
        "权重绝对值": np.abs(weights),
    }
)

# 按权重绝对值排序
weight_df_sorted = weight_df.sort_values("权重绝对值", ascending=False)

print("\n" + "-" * 80)
print("所有特征权重（按重要性排序，前20个）：")
print("-" * 80)
print(weight_df_sorted.head(20).to_string(index=False))

print("\n" + "-" * 80)
print("数值特征权重分析：")
print("-" * 80)
num_weight_df = weight_df[weight_df["特征"].isin(num_cols)].sort_values(
    "权重绝对值", ascending=False
)
print(num_weight_df.to_string(index=False))

print("\n" + "-" * 80)
print("关键发现 - 数值特征对类别1的影响：")
print("-" * 80)
for _, row in num_weight_df.iterrows():
    direction = "促进" if row["权重"] > 0 else "抑制"
    importance = (
        "重要"
        if row["权重绝对值"] > 0.1
        else "一般" if row["权重绝对值"] > 0.05 else "轻微"
    )
    print(
        f"- {row['特征']}: {direction}类别1预测 (权重: {row['权重']:.4f}, 重要性: {importance})"
    )

# 13. 哈希特征反向追踪 - 新增功能
print("\n" + "=" * 80)
print("哈希特征反向追踪分析")
print("=" * 80)

# 获取最重要的哈希特征
top_hash_features = weight_df_sorted[
    weight_df_sorted["特征"].str.startswith("hash_")
].head(10)

print("最重要的10个哈希特征及其可能的原始特征对应关系：")
print("-" * 80)

# 对每个重要的哈希特征，找出可能的原始特征对应
for _, row in top_hash_features.iterrows():
    hash_index = int(row["特征"].split("_")[1])
    print(f"\n哈希特征: {row['特征']} (权重: {row['权重']:.4f})")

    # 分析每个高基数列对这个哈希特征的贡献
    for col in high_card_cols:
        # 获取该列的唯一值
        unique_values = df[col].dropna().unique()

        # 检查哪些值会映射到这个哈希索引
        matching_values = []
        for val in unique_values[:20]:  # 只检查前20个值避免计算量过大
            # 模拟HashingEncoder的哈希过程
            if hasattr(hasher, "hash_function"):
                # 使用HashingEncoder的哈希函数
                hash_val = hasher.hash_function(str(val)) % (2**16)
            else:
                # 使用简单的哈希函数作为替代
                hash_val = hash(str(val)) % (2**16)

            if hash_val == hash_index:
                matching_values.append(val)

        if matching_values:
            print(f"  - {col}列可能包含: {matching_values[:5]}")  # 只显示前5个匹配值

# 14. 更精确的哈希特征分析
print("\n" + "-" * 80)
print("精确哈希特征分析（使用训练数据）")
print("-" * 80)

# 使用训练数据来反向追踪哈希特征
X_train_transformed = pipe.named_steps["prep"].transform(X_train)

# 获取哈希特征的列索引
hash_start_idx = len(ohe_features)
hash_end_idx = hash_start_idx + 2**16

# 分析最重要的哈希特征
top_3_hash = top_hash_features.head(3)
for _, row in top_3_hash.iterrows():
    hash_index = int(row["特征"].split("_")[1])
    hash_col_idx = hash_start_idx + hash_index

    print(f"\n分析哈希特征: {row['特征']} (权重: {row['权重']:.4f})")

    # 找出哪些样本在这个哈希特征上有高值
    hash_values = X_train_transformed[:, hash_col_idx]
    top_samples_idx = np.argsort(hash_values)[-5:]  # 取哈希值最高的5个样本

    print("  对应的原始特征值（从高哈希值样本中提取）:")
    for sample_idx in top_samples_idx:
        original_sample = X_train.iloc[sample_idx]
        print(f"  样本 {sample_idx}:")
        for col in high_card_cols:
            print(f"    {col}: {original_sample[col]}")

# 13. 保存模型
joblib.dump(pipe, "models/filing_rate.pkl")
print("初始训练完成 → .pkl 文件已生成！")
