# 导入必要库
import warnings
from collections import Counter  # 用于模型融合的投票统计

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from catboost import CatBoostClassifier  # 集成学习模型
from flaml import AutoML  # 自动化机器学习工具
from lightgbm import LGBMClassifier  # 轻量级梯度提升机
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report  # 评估指标
from sklearn.model_selection import KFold  # 交叉验证工具
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline  # 管道工具（当前未直接使用）
from sklearn.preprocessing import LabelEncoder, StandardScaler  # 标签编码和标准化
from xgboost import XGBClassifier  # 极端梯度提升机

warnings.filterwarnings('ignore')  # 关闭警告提示

# 配置flaml日志级别（避免训练信息过多）
import logging

# 设置flaml的日志级别为WARNING（仅显示警告及以上日志）
logging.getLogger('flaml.automl.logger').setLevel(logging.WARNING)

# 加载数据（训练集、测试集、提交样例）
train = pd.read_csv('./train.csv', index_col='id')  # 训练集（含目标变量）
test = pd.read_csv('./test.csv', index_col='id')    # 测试集（需预测）
sub = pd.read_csv('./sample_submission.csv', index_col='id')  # 提交格式样例


# 第一步：数据概览（查看数据基本信息）
def load_and_overview_data(train, test):
    """打印数据基本信息（形状、列名、缺失值、目标分布）"""
    print('Train shape:', train.shape)  # 训练集维度（样本数×特征数）
    print('Test shape:', test.shape)    # 测试集维度
    print('\nTrain columns:', train.columns.tolist())  # 训练集特征列表
    print('\nTrain info:')
    print(train.info())  # 数据类型和非空值统计
    print('\nTrain missing values:')
    print(train.isnull().sum())  # 各特征缺失值数量
    print('\nTrain target value counts:')
    print(train['Personality'].value_counts())  # 目标变量类别分布


# 第二步：数据预处理（处理缺失值和类别特征）
def preprocess_data(train, test):
    """
    处理数值型和类别型特征的缺失值，统一类别格式
    :return: 处理后的训练集、测试集、填充信息（用于复现）
    """
    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']  # 数值特征列表
    cat_cols_train = ['Stage_fear', 'Drained_after_socializing', 'Personality']  # 训练集类别特征（含目标）
    cat_cols_test = ['Stage_fear', 'Drained_after_socializing']  # 测试集类别特征（不含目标）
    
    # 数值特征用训练集中位数填充（避免数据泄露）
    num_medians = train[num_cols].median()
    train[num_cols] = train[num_cols].fillna(num_medians)
    test[num_cols] = test[num_cols].fillna(num_medians)
    
    # 类别特征统一格式（转小写、去空格）并填充缺失值为'missing'
    for col in cat_cols_train:
        train[col] = train[col].astype(str).str.strip().str.lower()  # 统一格式
        train[col] = train[col].replace('nan', np.nan).fillna('missing')  # 填充缺失值
    for col in cat_cols_test:
        test[col] = test[col].astype(str).str.strip().str.lower()
        test[col] = test[col].replace('nan', np.nan).fillna('missing')
    
    # 记录填充信息（用于后续可能的验证）
    fill_info = {'num_medians': num_medians.to_dict(), 'cat_fill': 'missing'}
    return train, test, fill_info


# 第三步：特征工程（数值标准化+类别独热编码）
def build_features(train, test, target_col='Personality'):
    """
    构造模型输入特征（数值标准化+类别独热编码）
    :return: 训练集特征、训练集目标、测试集特征
    """
    num_cols = ['Time_spent_Alone', 'Social_event_attendance', 'Going_outside', 'Friends_circle_size', 'Post_frequency']
    cat_cols = ['Stage_fear', 'Drained_after_socializing']  # 类别特征（不含目标）
    
    # 1. 数值特征标准化（Z-score标准化）
    scaler = StandardScaler()
    train_num = scaler.fit_transform(train[num_cols])  # 训练集拟合+转换
    test_num = scaler.transform(test[num_cols])        # 测试集仅用训练集参数转换
    
    # 2. 类别特征独热编码（避免类别顺序影响模型）
    train_cat = pd.get_dummies(train[cat_cols], prefix=cat_cols)  # 训练集独热编码
    test_cat = pd.get_dummies(test[cat_cols], prefix=cat_cols)    # 测试集独热编码
    # 对齐训练集和测试集的特征列（处理测试集可能缺失的类别）
    train_cat, test_cat = train_cat.align(test_cat, join='left', axis=1, fill_value=0)
    
    # 3. 合并数值和类别特征（水平拼接）
    X_train = np.hstack([train_num, train_cat.values])  # 训练集特征矩阵
    X_test = np.hstack([test_num, test_cat.values])     # 测试集特征矩阵
    y_train = train[target_col].values                  # 训练集目标变量
    return X_train, y_train, X_test


# -------------------- 数据处理流程 --------------------
# 1. 数据概览（打印基本信息）
load_and_overview_data(train, test)

# 2. 预处理数据（填充缺失值）
train, test, fill_info = preprocess_data(train, test)

# 3. 特征工程（构造模型输入）
X_train, y_train, X_test = build_features(train, test, target_col='Personality')

# 4. 目标变量标签编码（将文本类别转为数值）
le = LabelEncoder()
y_train_enc = le.fit_transform(y_train)  # 训练集目标编码


# -------------------- AutoML 自动化训练 --------------------
# 初始化AutoML并设置参数（自动选择最优模型和超参数）
automl = AutoML()
settings = {
    "time_budget": 120,  # 训练时间预算（秒）
    "task": 'classification',  # 任务类型：分类
    "log_file_name": 'flaml.log',  # 日志文件
    "metric": 'accuracy',  # 优化指标：准确率
    "estimator_list": ['lgbm', 'xgboost', 'rf', 'extra_tree'],  # 候选模型列表
}
automl.fit(X_train=X_train, y_train=y_train_enc, **settings)  # 启动自动训练

# 输出AutoML结果
print('最佳模型：', automl.model)          # 自动选择的最优模型
print('最佳参数：', automl.best_config)    # 最优模型的超参数
print('最佳分数：', automl.best_loss)      # 最优模型的损失值

# 生成测试集预测（编码后转回原始类别）
automl_preds_enc = automl.predict(X_test)
automl_preds = le.inverse_transform(automl_preds_enc)  # 逆编码得到原始类别

# 训练集评估（验证过拟合情况）
automl_train_preds_enc = automl.predict(X_train)
automl_train_preds = le.inverse_transform(automl_train_preds_enc)
print('\n训练集评估：')
print(classification_report(y_train, automl_train_preds))  # 分类报告（精确率、召回率等）
print('训练集准确率：', accuracy_score(y_train, automl_train_preds))  # 准确率

# 保存AutoML预测结果（调整类别首字母大写，符合提交格式）
df_automl_pred = pd.DataFrame({'id': test.index, 'Personality': automl_preds})
df_automl_pred['Personality'] = df_automl_pred['Personality'].str.capitalize()
# df_automl_pred.to_csv('submission_automl.csv', index=False)


# -------------------- LGBM 模型（KFold交叉验证） --------------------
# 定义LGBM参数（手动调参）
lgbm_params = {
    'n_estimators': 200,    # 迭代次数
    'learning_rate': 0.05,  # 学习率
    'max_depth': 7,         # 树最大深度
    'random_state': 42      # 随机种子（保证可复现）
}
model = LGBMClassifier(**lgbm_params)

# 5折交叉验证（评估模型泛化能力）
kf = KFold(n_splits=5, shuffle=True, random_state=42)  # 5折，随机打乱数据
lgbm_oof_preds = np.zeros(len(X_train), dtype=int)     # 保存各折验证集预测结果
lgbm_oof_probs = np.zeros((len(X_train), len(le.classes_)))  # 保存概率值
fold_scores = []  # 保存各折准确率

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    # 划分训练/验证集
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    
    # 模型训练+验证
    model.fit(X_tr, y_tr)
    val_pred = model.predict(X_val)       # 验证集类别预测
    val_prob = model.predict_proba(X_val) # 验证集概率预测
    lgbm_oof_preds[val_idx] = val_pred    # 记录验证集预测结果
    lgbm_oof_probs[val_idx] = val_prob    # 记录概率值
    acc = accuracy_score(y_val, val_pred) # 计算当前折准确率
    print(f"Fold {fold+1} accuracy: {acc:.4f}")
    fold_scores.append(acc)

# 输出交叉验证结果
print("\n5折交叉验证平均准确率：", np.mean(fold_scores))
print("LGBM OOF分类报告：")
print(classification_report(y_train, le.inverse_transform(lgbm_oof_preds)))  # OOF预测与真实值对比

# 全量数据训练并生成测试集预测
model.fit(X_train, y_train_enc)
lgbm_preds_enc = model.predict(X_test)
lgbm_preds = le.inverse_transform(lgbm_preds_enc)  # 逆编码得到原始类别

# 保存LGBM预测结果
df_lgbm_pred = pd.DataFrame({'id': test.index, 'Personality': lgbm_preds})
df_lgbm_pred['Personality'] = df_lgbm_pred['Personality'].str.capitalize()
# df_lgbm_pred.to_csv('submission_lgbm.csv', index=False)


# -------------------- XGBoost 模型（KFold交叉验证） --------------------
# 定义XGBoost参数（兼容sklearn接口）
xgb_params = {
    'n_estimators': 200,
    'learning_rate': 0.05,
    'max_depth': 7,
    'random_state': 42,
    'use_label_encoder': False,  # 关闭标签编码警告
    'eval_metric': 'mlogloss'    # 评估指标：多分类对数损失
}
xgb_model = XGBClassifier(**xgb_params)
xgb_oof_preds = np.zeros(len(X_train), dtype=int)
xgb_oof_probs = np.zeros((len(X_train), len(le.classes_)))
xgb_fold_scores = []

# 5折交叉验证（流程同LGBM）
for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    xgb_model.fit(X_tr, y_tr)
    val_pred = xgb_model.predict(X_val)
    val_prob = xgb_model.predict_proba(X_val)
    xgb_oof_preds[val_idx] = val_pred
    xgb_oof_probs[val_idx] = val_prob
    acc = accuracy_score(y_val, val_pred)
    print(f"[XGB] Fold {fold+1} accuracy: {acc:.4f}")
    xgb_fold_scores.append(acc)

# 输出交叉验证结果
print("\n[XGB] 5折交叉验证平均准确率：", np.mean(xgb_fold_scores))
print("[XGB] OOF分类报告：")
print(classification_report(y_train, le.inverse_transform(xgb_oof_preds)))

# 全量数据训练并生成测试集预测
xgb_model.fit(X_train, y_train_enc)
xgb_preds_enc = xgb_model.predict(X_test)
xgb_preds = le.inverse_transform(xgb_preds_enc)

# 保存XGBoost预测结果
df_xgb_pred = pd.DataFrame({'id': test.index, 'Personality': xgb_preds})
df_xgb_pred['Personality'] = df_xgb_pred['Personality'].str.capitalize()
# df_xgb_pred.to_csv('submission_xgb.csv', index=False)


# -------------------- CatBoost 模型（KFold交叉验证） --------------------
# 定义CatBoost参数（默认处理类别特征，但此处已手动编码）
cat_params = {
    'iterations': 200,    # 迭代次数（类似n_estimators）
    'learning_rate': 0.05,
    'depth': 7,           # 树深度（类似max_depth）
    'random_seed': 42,    # 随机种子
    'verbose': 0          # 关闭训练日志输出
}
cat_model = CatBoostClassifier(**cat_params)
cat_oof_preds = np.zeros(len(X_train), dtype=int)
cat_oof_probs = np.zeros((len(X_train), len(le.classes_)))
cat_fold_scores = []

# 5折交叉验证（流程同LGBM）
for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    cat_model.fit(X_tr, y_tr)
    val_pred = cat_model.predict(X_val)
    val_prob = cat_model.predict_proba(X_val)
    cat_oof_preds[val_idx] = val_pred
    cat_oof_probs[val_idx] = val_prob
    acc = accuracy_score(y_val, val_pred)
    print(f"[CatBoost] Fold {fold+1} accuracy: {acc:.4f}")
    cat_fold_scores.append(acc)

# 输出交叉验证结果
print("\n[CatBoost] 5折交叉验证平均准确率：", np.mean(cat_fold_scores))
print("[CatBoost] OOF分类报告：")
print(classification_report(y_train, le.inverse_transform(cat_oof_preds)))

# 全量数据训练并生成测试集预测
cat_model.fit(X_train, y_train_enc)
cat_preds_enc = cat_model.predict(X_test)
cat_preds = le.inverse_transform(cat_preds_enc)

# 保存CatBoost预测结果
df_cat_pred = pd.DataFrame({'id': test.index, 'Personality': cat_preds})
df_cat_pred['Personality'] = df_cat_pred['Personality'].str.capitalize()
# df_cat_pred.to_csv('submission_catboost.csv', index=False)


# -------------------- ANN 神经网络模型（KFold交叉验证） --------------------
ann_params = {
    'hidden_layer_sizes': (128, 64, 32, 16),  # 增加网络深度和宽度
    'activation': 'relu',
    'solver': 'adam',
    'max_iter': 300,  # 增加迭代次数
    'random_state': 42
}
ann_model = MLPClassifier(**ann_params)
ann_oof_preds = np.zeros(len(X_train), dtype=int)
ann_oof_probs = np.zeros((len(X_train), len(le.classes_)))
ann_fold_scores = []

for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
    X_tr, X_val = X_train[train_idx], X_train[val_idx]
    y_tr, y_val = y_train_enc[train_idx], y_train_enc[val_idx]
    ann_model.fit(X_tr, y_tr)
    val_pred = ann_model.predict(X_val)
    val_prob = ann_model.predict_proba(X_val)
    ann_oof_preds[val_idx] = val_pred
    ann_oof_probs[val_idx] = val_prob
    acc = accuracy_score(y_val, val_pred)
    print(f"[ANN] Fold {fold+1} accuracy: {acc:.4f}")
    ann_fold_scores.append(acc)

print("\n[ANN] 5折交叉验证平均准确率：", np.mean(ann_fold_scores))
print("[ANN] OOF分类报告：")
print(classification_report(y_train, le.inverse_transform(ann_oof_preds)))

# 全量数据训练并生成测试集预测
ann_model.fit(X_train, y_train_enc)
ann_preds_enc = ann_model.predict(X_test)
ann_preds = le.inverse_transform(ann_preds_enc)

df_ann_pred = pd.DataFrame({'id': test.index, 'Personality': ann_preds})
df_ann_pred['Personality'] = df_ann_pred['Personality'].str.capitalize()
# df_ann_pred.to_csv('submission_ann.csv', index=False)

# -------------------- 五模型Stacking融合 --------------------

# 构造一级模型的概率输出作为二级模型输入特征
oof_meta = np.column_stack([
    automl.model.predict_proba(X_train),
    lgbm_oof_probs,
    xgb_oof_probs,
    cat_oof_probs,
    ann_oof_probs
])
meta_X_test = np.column_stack([
    automl.model.predict_proba(X_test),
    model.predict_proba(X_test),
    xgb_model.predict_proba(X_test),
    cat_model.predict_proba(X_test),
    ann_model.predict_proba(X_test)
])

meta_model = LogisticRegression(max_iter=1000)
meta_model.fit(oof_meta, y_train_enc)
meta_preds_enc = meta_model.predict(meta_X_test)
meta_preds = le.inverse_transform(meta_preds_enc)

stacking_df = pd.DataFrame({'id': test.index, 'Personality': meta_preds})
stacking_df['Personality'] = stacking_df['Personality'].str.capitalize()
stacking_df.to_csv('submission.csv', index=False)



# -------------------- 预测分布对比（分析模型差异） --------------------
print('\nAutoML测试集预测分布:')
print(pd.Series(automl_preds).value_counts())
print('\nLGBM+KFold测试集预测分布:')
print(pd.Series(lgbm_preds).value_counts())
print('\nXGBoost+KFold测试集预测分布:')
print(pd.Series(xgb_preds).value_counts())
print('\nCatBoost+KFold测试集预测分布:')
print(pd.Series(cat_preds).value_counts())
print('\nANN+KFold测试集预测分布:')
print(pd.Series(ann_preds).value_counts())
print('\nStacking融合后测试集预测分布:')
print(pd.Series(meta_preds).value_counts())


