# model/trainer.py

import os
import joblib
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
import time

from data.feature_loader import FeatureLoader
from config.features import FEATURE_SETS, TARGET_COLUMN
from config import CONFIG


class ModelTrainer:
    def __init__(self, feature_loader=None, model_output_dir=None, feature_output_dir=None):
        self.feature_loader = feature_loader or FeatureLoader()
        self.model_output_dir = model_output_dir or CONFIG.get("model", {}).get("output_path", "models")
        self.feature_output_dir = feature_output_dir or CONFIG.get("paths", {}).get("feature_path", "features")

        # 模型超参数
        self.n_estimators = CONFIG.get("model", {}).get("n_estimators", 100)
        self.learning_rate = CONFIG.get("model", {}).get("learning_rate", 0.1)

        # 确保模型和特征保存目录存在
        os.makedirs(self.model_output_dir, exist_ok=True)
        os.makedirs(self.feature_output_dir, exist_ok=True)

    def train_single_feature_set(self, feature_keys, start_date=None, end_date=None, model_name="model"):
        start_time = time.time()
        print(f"\n[Train] 🔧 模型名: {model_name}")
        print(f"[Train] ✅ 使用特征: {feature_keys}")

        # 加载数据
        df = self.feature_loader.get_training_data(start_date, end_date)
        print(f"👉 原始数据量: {len(df)}")
        print(f"👉 字段列表: {df.columns.tolist()}")
        print(f"👉 缺失字段: {[col for col in feature_keys if col not in df.columns]}")

        # 字段校验
        missing = [col for col in feature_keys if col not in df.columns]
        if missing:
            raise ValueError(f"[❌ Error] 缺失特征列: {missing}，请检查 FEATURE_SETS 配置是否正确")

        # print(df.shape)  # 打印原始数据行数
        # df.to_csv("debug.csv", index=False)  # 保存原始数据到 CSV 文件，便于调试
        df = df.dropna(subset=feature_keys + [TARGET_COLUMN])
        # print(df.shape)  # 打印删除缺失值后的数据行数


        df = df.dropna(subset=feature_keys + [TARGET_COLUMN])
        print("[DEBUG] dropna 之后样本数量:", len(df))
        print("[DEBUG] 正样本占比:", df[TARGET_COLUMN].mean())
        print("[DEBUG] 任意字段空值统计:")
        print(df[feature_keys + [TARGET_COLUMN]].isnull().sum())
        if df.empty:
            raise ValueError(f"[❌ Error] 训练数据为空，无法训练{model_name}模型")

        y = df[TARGET_COLUMN]
        X = df[feature_keys]
        print(f"[Data] 📊 样本总数: {len(df)}, 正样本占比: {y.mean():.2%}")

        # 保存完整训练数据（包含 target）
        export_cols = ["stock_code", "add_date"] + feature_keys + [TARGET_COLUMN]
        feature_data_path = os.path.join(self.feature_output_dir, f"{model_name}_features.csv")
        df[export_cols].to_csv(feature_data_path, index=False)
        print(f"[Saved] 💾 特征数据保存至: {feature_data_path}")

        # 拆分训练集和验证集
        X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

        model = lgb.LGBMClassifier(n_estimators=self.n_estimators, learning_rate=self.learning_rate)
        model.fit(X_train, y_train)

        # 验证集评估
        y_pred = model.predict(X_val)
        y_prob = model.predict_proba(X_val)[:, 1]
        acc = accuracy_score(y_val, y_pred)
        f1 = f1_score(y_val, y_pred)
        auc = roc_auc_score(y_val, y_prob)

        print(f"[Metrics] ✅ acc: {acc:.4f}, f1: {f1:.4f}, auc: {auc:.4f}")

        # 保存评估指标
        metrics_path = os.path.join(self.feature_output_dir, f"{model_name}_metrics.txt")
        with open(metrics_path, "w") as f:
            f.write(f"模型: {model_name}\n")
            f.write(f"样本数量: {len(df)}\n")
            f.write(f"正样本占比: {y.mean():.2%}\n")
            f.write(f"acc: {acc:.4f}\n")
            f.write(f"f1: {f1:.4f}\n")
            f.write(f"auc: {auc:.4f}\n")
        print(f"[Saved] 📄 评估报告保存至: {metrics_path}")

        # 保存模型
        model_path = os.path.join(self.model_output_dir, f"{model_name}.pkl")
        joblib.dump(model, model_path)
        print(f"[Saved] 💾 模型保存至: {model_path}")
        print(f"[Time] ⏱️ 耗时: {time.time() - start_time:.2f}s")


    def train_multiple_feature_sets(self, start_date=None, end_date=None):
        for key, feature_keys in FEATURE_SETS.items():
            model_name = f"lgbm_{key}"
            self.train_single_feature_set(feature_keys, start_date, end_date, model_name)
