# -*- coding: utf-8 -*-
import os
import json
import re
import math
import datetime as dt
from pathlib import Path
from typing import List, Tuple, Optional, Dict

import numpy as np
import pandas as pd
import lightgbm as lgb

from sklearn.model_selection import train_test_split
from sklearn.metrics import (
    accuracy_score, roc_auc_score, classification_report,
    precision_score, recall_score, f1_score, roc_curve, precision_recall_curve
)

import matplotlib.pyplot as plt
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# ================== 可配置区域 ==================
DATA_DIR = Path("/data/gongzhijia/data/processeddata")
MODEL_SAVE_DIR = Path("/data/gongzhijia/data/models/GBDT")
MODEL_SAVE_DIR.mkdir(parents=True, exist_ok=True)

SEED = 42
VAL_SIZE = 0.2

# 仅加载“昨天”的子目录（目录名含日期即可：YYYY-MM-DD / YYYYMMDD / YYYY_MM_DD）
YESTERDAY_ONLY = False

# LightGBM 训练总轮数与早停
NUM_BOOST_ROUND = 5000
EARLY_STOPPING_ROUNDS = 200

# 仅当概率 >= 阈值 判为正类；将会自动根据验证集重新搜索最优值
DEFAULT_THRESHOLD = 0.3

# 对象列转类别列的阈值（基数<=该值则保留为类别，否则剔除）
OBJECT_LOW_CARDINALITY = 64
# =================================================


# ----------------- 工具函数 -----------------
DATE_PATTERNS = (
    r'(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})',
    r'(?P<y>\d{4})_(?P<m>\d{2})_(?P<d>\d{2})',
    r'(?P<y>\d{4})(?P<m>\d{2})(?P<d>\d{2})',
)

def _parse_date_from_name(name: str) -> Optional[dt.date]:
    for pat in DATE_PATTERNS:
        m = re.search(pat, name)
        if m:
            try:
                return dt.date(int(m['y']), int(m['m']), int(m['d']))
            except ValueError:
                return None
    return None

def _yesterday() -> dt.date:
    return (dt.datetime.now()).date() - dt.timedelta(days=1)

def _list_folders(data_dir: Path, yesterday_only: bool) -> List[Path]:
    subdirs = [p for p in data_dir.iterdir() if p.is_dir()]
    if not yesterday_only:
        return subdirs
    y = _yesterday()
    picked = []
    for p in subdirs:
        d = _parse_date_from_name(p.name)
        if d is not None and d == y:
            picked.append(p)
    if not picked:
        print(f"[WARN] 未找到“昨天”({y.isoformat()})对应目录，改为加载全部目录。")
        return subdirs
    print(f"[INFO] 仅加载‘昨天’({y.isoformat()})的目录: {[x.name for x in picked]}")
    return picked

def _memory_optimize_numeric(df: pd.DataFrame) -> pd.DataFrame:
    """对数值列做降精度（int→最小整型，float→float32），减少内存与 IO。"""
    for c in df.select_dtypes(include=["int", "int32", "int64", "uint8", "uint16", "uint32"]).columns:
        df[c] = pd.to_numeric(df[c], downcast="integer")
    for c in df.select_dtypes(include=["float", "float64"]).columns:
        df[c] = pd.to_numeric(df[c], downcast="float")
    for c in df.select_dtypes(include=["bool"]).columns:
        df[c] = df[c].astype("int8")
    return df

def _select_and_encode_features(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[str], List[str], List[str]]:
    """
    - 数值列：直接保留（降精度）
    - 对象列：若基数<=阈值，则转为category并保留；否则丢弃
    返回：X, numeric_cols, categorical_cols, dropped_object_cols
    """
    if 'label' not in df.columns:
        raise ValueError("数据集中缺少 label 列。")

    # 拆出特征
    X = df.drop(columns=['label']).copy()

    # 识别数值列
    numeric_cols = X.select_dtypes(include=['number', 'bool']).columns.tolist()
    # 对象列（含字符串）
    object_cols = [c for c in X.columns if c not in numeric_cols]

    categorical_cols, dropped_obj_cols = [], []
    for c in object_cols:
        nunique = X[c].nunique(dropna=True)
        if nunique <= OBJECT_LOW_CARDINALITY:
            X[c] = X[c].astype("category")
            categorical_cols.append(c)
        else:
            dropped_obj_cols.append(c)

    if dropped_obj_cols:
        print(f"[INFO] 丢弃高基数对象列({len(dropped_obj_cols)}): {dropped_obj_cols[:10]}{' ...' if len(dropped_obj_cols)>10 else ''}")
        X = X.drop(columns=dropped_obj_cols)

    # 填充缺失：数值列→0，类别列→添加缺失类别
    for c in X.columns:
        if pd.api.types.is_numeric_dtype(X[c]):
            X[c] = X[c].fillna(0)
        elif pd.api.types.is_categorical_dtype(X[c]):
            X[c] = X[c].cat.add_categories(['__MISSING__']).fillna('__MISSING__')

    X = _memory_optimize_numeric(X)
    return X, numeric_cols, categorical_cols, dropped_obj_cols

def _compute_scale_pos_weight(y: np.ndarray) -> float:
    pos = (y == 1).sum()
    neg = (y == 0).sum()
    if pos == 0:
        return 1.0
    return max(1.0, neg / max(1, pos))

def _find_best_threshold(y_true: np.ndarray, y_prob: np.ndarray) -> Dict[str, float]:
    """返回三种准则的最佳阈值：F1、Youden J（敏感度+特异度-1）、Acc"""
    thresholds = np.linspace(0.01, 0.99, 99)
    best = {
        "f1": (0.0, DEFAULT_THRESHOLD),
        "youden": (float("-inf"), DEFAULT_THRESHOLD),
        "acc": (0.0, DEFAULT_THRESHOLD),
    }
    for t in thresholds:
        y_hat = (y_prob >= t).astype(int)
        tp = ((y_true == 1) & (y_hat == 1)).sum()
        tn = ((y_true == 0) & (y_hat == 0)).sum()
        fp = ((y_true == 0) & (y_hat == 1)).sum()
        fn = ((y_true == 1) & (y_hat == 0)).sum()

        prec = tp / (tp + fp + 1e-9)
        rec = tp / (tp + fn + 1e-9)
        f1 = 2 * prec * rec / (prec + rec + 1e-9)
        acc = (tp + tn) / max(1, len(y_true))
        tpr = rec
        tnr = tn / (tn + fp + 1e-9)
        youden = tpr + tnr - 1

        if f1 > best["f1"][0]:
            best["f1"] = (f1, t)
        if youden > best["youden"][0]:
            best["youden"] = (youden, t)
        if acc > best["acc"][0]:
            best["acc"] = (acc, t)

    return {
        "best_threshold_f1": float(best["f1"][1]),
        "best_f1": float(best["f1"][0]),
        "best_threshold_youden": float(best["youden"][1]),
        "best_youden": float(best["youden"][0]),
        "best_threshold_acc": float(best["acc"][1]),
        "best_acc": float(best["acc"][0]),
    }

# ----------------- 数据加载 -----------------
def load_all_csv_files(data_dir: Path, yesterday_only: bool = False) -> pd.DataFrame:
    """
    加载指定目录下（可选仅“昨天”的子目录）所有 CSV 并合并。
    - 自动跳过空表
    - 自动对齐列（按列名做外连接后再补缺）
    """
    folders = _list_folders(data_dir, yesterday_only)
    all_frames: List[pd.DataFrame] = []

    for folder in folders:
        for csv_path in folder.rglob("*.csv"):
            try:
                df = pd.read_csv(csv_path)
                if df is None or df.empty:
                    print(f"[INFO] 跳过空文件: {csv_path}")
                    continue
                all_frames.append(df)
                print(f"[OK] 读取: {csv_path}  rows={len(df)}")
            except Exception as e:
                print(f"[ERR] 读取失败: {csv_path}  error={e}")

    if not all_frames:
        raise ValueError("没有找到任何CSV文件")

    # 对齐所有列后合并（外连接），再统一缺失
    base_cols = set().union(*[set(df.columns) for df in all_frames])
    aligned = []
    for df in all_frames:
        missing = list(base_cols - set(df.columns))
        if missing:
            for c in missing:
                df[c] = np.nan
        aligned.append(df[list(base_cols)])

    combined_df = pd.concat(aligned, ignore_index=True)
    print(f"[INFO] 合并完成，总行数: {len(combined_df)}, 列数: {combined_df.shape[1]}")
    return combined_df


# ----------------- 训练与评估 -----------------
def train_gbdt_model(data_df: pd.DataFrame) -> Tuple[lgb.Booster, Dict]:
    if 'label' not in data_df.columns:
        raise ValueError("数据集中缺少 label 列。")

    # 清理无穷/异常
    data_df = data_df.replace([np.inf, -np.inf], np.nan)

    # 特征选择与编码
    X, numeric_cols, categorical_cols, dropped_obj_cols = _select_and_encode_features(data_df)
    y = data_df['label'].astype(int).values

    # 训练/验证划分（分层）
    X_train, X_val, y_train, y_val = train_test_split(
        X, y, test_size=VAL_SIZE, random_state=SEED, stratify=y
    )
    print(f"[INFO] 训练集: {X_train.shape}, 验证集: {X_val.shape}, 正类占比(总体)={y.mean():.4f}")

    # 自动计算类别权重
    spw = _compute_scale_pos_weight(y_train)
    print(f"[INFO] scale_pos_weight={spw:.3f}")

    # LightGBM 参数
    params = {
        'objective': 'binary',
        'metric': 'auc',
        'boosting_type': 'gbdt',
        'num_leaves': 63,
        'max_depth': -1,                # 交由 LGB 调整
        'learning_rate': 0.05,          # 提高收敛速度，配合早停
        'feature_fraction': 0.9,
        'bagging_fraction': 0.8,
        'bagging_freq': 1,
        'verbose': -1,
        'scale_pos_weight': spw,
        'lambda_l1': 0.0,
        'lambda_l2': 0.0,
        'min_data_in_leaf': 20,
        'min_sum_hessian_in_leaf': 1e-3,
        'deterministic': True,
        'force_row_wise': True,         # 小表时更快、更稳定
        'seed': SEED,
    }

    # 构造数据集（标注类别列）
    cat_idx = [X_train.columns.get_loc(c) for c in categorical_cols]
    train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=cat_idx or 'auto', free_raw_data=False)
    val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=cat_idx or 'auto', reference=train_data, free_raw_data=False)

    print("[INFO] 开始训练 LightGBM ...")
    model = lgb.train(
        params,
        train_data,
        num_boost_round=NUM_BOOST_ROUND,
        valid_sets=[train_data, val_data],
        valid_names=['train', 'valid'],
        callbacks=[
            lgb.early_stopping(stopping_rounds=EARLY_STOPPING_ROUNDS, verbose=False),
            lgb.log_evaluation(period=100)
        ]
    )

    best_iter = model.best_iteration or model.current_iteration()
    y_prob = model.predict(X_val, num_iteration=best_iter)
    auc = roc_auc_score(y_val, y_prob)

    # 阈值搜索
    th_info = _find_best_threshold(y_val, y_prob)
    best_threshold = th_info["best_threshold_f1"]

    y_pred_bin = (y_prob >= best_threshold).astype(int)
    accuracy = accuracy_score(y_val, y_pred_bin)
    precision = precision_score(y_val, y_pred_bin, zero_division=0)
    recall = recall_score(y_val, y_pred_bin, zero_division=0)
    f1 = f1_score(y_val, y_pred_bin, zero_division=0)

    print("\n===== 验证集评估 =====")
    print(f"AUC: {auc:.4f}")
    print(f"使用阈值(最佳F1)={best_threshold:.3f} -> Acc={accuracy:.4f}  Prec={precision:.4f}  Rec={recall:.4f}  F1={f1:.4f}")
    print("分类报告：")
    print(classification_report(y_val, y_pred_bin, digits=4, zero_division=0))

    # 特征重要性：Gain
    fi = pd.DataFrame({
        'feature': X_train.columns,
        'gain_importance': model.feature_importance(importance_type='gain', iteration=best_iter),
        'split_importance': model.feature_importance(importance_type='split', iteration=best_iter),
    }).sort_values('gain_importance', ascending=False)
    fi_path_csv = MODEL_SAVE_DIR / "feature_importance.csv"
    fi.to_csv(fi_path_csv, index=False)
    print(f"[INFO] 已保存特征重要性 CSV: {fi_path_csv}")

    # 保存特征重要性图
    try:
        ax = lgb.plot_importance(model, max_num_features=20, importance_type='gain')
        plt.title("特征重要性（Gain）")
        fig_path = MODEL_SAVE_DIR / "feature_importance.png"
        plt.tight_layout()
        plt.savefig(fig_path, dpi=160)
        plt.close()
        print(f"[INFO] 已保存特征重要性图: {fig_path}")
    except Exception as e:
        print(f"[WARN] 绘制特征重要性失败: {e}")

    # 汇总元数据
    meta = {
        "time": dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        "seed": SEED,
        "val_size": VAL_SIZE,
        "params": params,
        "num_boost_round": NUM_BOOST_ROUND,
        "early_stopping_rounds": EARLY_STOPPING_ROUNDS,
        "best_iteration": int(best_iter),
        "metrics": {
            "auc": float(auc),
            "accuracy": float(accuracy),
            "precision": float(precision),
            "recall": float(recall),
            "f1": float(f1),
        },
        "thresholds": th_info,  # 包含 F1/Youden/Acc 的最佳阈值
        "features": {
            "all": X_train.columns.tolist(),
            "categorical": categorical_cols,
            "dropped_object_cols": dropped_obj_cols,
        },
    }

    return model, meta


# ----------------- 模型保存 -----------------
def save_model_and_meta(model: lgb.Booster, meta: Dict, save_dir: Path) -> Tuple[Path, Path]:
    save_dir.mkdir(parents=True, exist_ok=True)
    ts = dt.datetime.now().strftime("%Y%m%d_%H%M%S")
    model_path = save_dir / f"gbdt_model_{ts}.txt"
    meta_path = save_dir / f"metadata_{ts}.json"
    model.save_model(str(model_path), num_iteration=model.best_iteration)
    with open(meta_path, "w", encoding="utf-8") as f:
        json.dump(meta, f, ensure_ascii=False, indent=2)
    print(f"[INFO] 模型已保存: {model_path}")
    print(f"[INFO] 元数据已保存: {meta_path}")
    return model_path, meta_path


# ----------------- 主流程 -----------------
if __name__ == "__main__":
    try:
        print("[INFO] 开始加载数据 ...")
        data_df = load_all_csv_files(DATA_DIR, yesterday_only=YESTERDAY_ONLY)

        # 基本检查
        if 'label' not in data_df.columns:
            raise ValueError("数据缺少 label 列。")
        label_counts = data_df['label'].value_counts(dropna=False)
        print(f"[INFO] 标签分布:\n{label_counts}")

        # 训练
        model, meta = train_gbdt_model(data_df)

        # 保存
        model_path, meta_path = save_model_and_meta(model, meta, MODEL_SAVE_DIR)
        print(f"[DONE] 模型训练与保存完成。\nModel: {model_path}\nMeta : {meta_path}")

    except Exception as e:
        print(f"[FATAL] 程序执行出错: {e}")
        import traceback
        traceback.print_exc()
