import os
import numpy as np
import pandas as pd
import traceback
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
import trainer.utils as utils
from typing import Dict, Tuple, Optional, Any
from trainer.model_train.common_train import BaseModelTrainer


class FocalLoss(nn.Module):
    def __init__(self, gamma=2, alpha=None, reduction='mean'):
        super().__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.reduction = reduction

    def forward(self, input, target):
        ce_loss = nn.BCELoss(reduction='none')(input, target)
        pt = torch.exp(-ce_loss)
        focal_loss = (1 - pt) ** self.gamma * ce_loss
        if self.alpha is not None:
            alpha_t = self.alpha[target.long()]
            focal_loss = alpha_t * focal_loss
        if self.reduction == 'mean':
            return focal_loss.mean()
        elif self.reduction == 'sum':
            return focal_loss.sum()
        else:
            return focal_loss

class LSTMModel(nn.Module):
    """LSTM核心模型定义(增强正则化+特征提取)"""
    def __init__(self, input_size: int, hidden_size: int, num_layers: int, dropout: float = 0.3):
        super().__init__()
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0.0
        )
        self.layer_norm = nn.LayerNorm(hidden_size)
        self.fc1 = nn.Linear(hidden_size, hidden_size // 2)
        self.fc2 = nn.Linear(hidden_size // 2, 1)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        lstm_out, _ = self.lstm(x)
        last_step_out = lstm_out[:, -1, :]
        norm_out = self.layer_norm(last_step_out)
        fc1_out = self.relu(self.fc1(norm_out))
        drop_out = self.dropout(fc1_out)
        logits = self.fc2(drop_out)
        return self.sigmoid(logits)

class LSTMModelTrainer(BaseModelTrainer):
    """LSTM模型训练器(支持完全续训+训练过程数据保存)"""    
    def __init__(self, label_col: str, data_dict: Dict,load_version: str = None,run_type: str = 'train'):
        super().__init__(label_col, data_dict, model_type="lstm",load_version=load_version,run_type=run_type)
        self.feat_df = None
        self.X_train = None
        self.X_val = None
        self.y_train = None
        self.y_val = None
        self.feature_cols = None
        self.label_threshold = None
        
        self.models = {}
        self.model_metrics = {}
        self.torch_device = utils.init_torch_device()
        
        # 初始化续训相关存储
        self.train_process_info["training_info"] = self.train_process_info.get("training_info", {})  # 训练过程中间数据
        self.train_process_info["model_type"] = "lstm"  # 模型类型标记, 用于续训校验
        self.train_process_info["training_history"] = self.train_process_info.get("training_history", [])  # 轮次损失记录
        self.train_process_info["checkpoint_info"] = self.train_process_info.get("checkpoint_info", {})  # checkpoint信息
        
        # 固定超参数（统一管理）
        self.fixed_lstm_params = {
            "hidden_size": 128,
            "num_layers": 2,
            "dropout": 0.3,
            "seq_len": 15,
            "batch_size": 64,
            "learning_rate": 0.0003,
            "weight_decay": 1e-4,
            "epochs": 200,
            "early_stopping_rounds": 35
        }
        # 赋值给实例变量（保持兼容性）
        self.hidden_size = self.fixed_lstm_params["hidden_size"]
        self.num_layers = self.fixed_lstm_params["num_layers"]
        self.dropout = self.fixed_lstm_params["dropout"]
        self.seq_len = self.fixed_lstm_params["seq_len"]
        self.batch_size = self.fixed_lstm_params["batch_size"]
        self.learning_rate = self.fixed_lstm_params["learning_rate"]
        self.weight_decay = self.fixed_lstm_params["weight_decay"]
        self.epochs = self.fixed_lstm_params["epochs"]
        self.early_stop_rounds = self.fixed_lstm_params["early_stopping_rounds"]
        
        # 续训相关变量
        self.loaded_optimizer_state = None
        self.loaded_scheduler_state = None
        self.loaded_epoch = 0
        self.loaded_best_val_loss = float("inf")
        self.loaded_early_stop_counter = 0
        self.loaded_training_history = []
        
        # 打印超参数
        logger.info(f"[{self.label_col}]LSTM固定超参数: seq_len={self.seq_len}, hidden_size={self.hidden_size}, "
                   f"batch_size={self.batch_size}, lr={self.learning_rate}")

    def _load_model(self) -> bool:
        """加载模型(适配续训, 增加参数校验和优化器状态加载)"""
        try:
            model_path = self.train_process_info.get("model_path", "")
            if not model_path or not os.path.exists(model_path):
                model_files = [f for f in os.listdir(self.model_process_dir) if f.endswith(".pth") and f.startswith(f"lstm_{self.label_col}")]
                if model_files:
                    model_path = os.path.join(self.model_process_dir, model_files[0])
                else:
                    logger.warning(f"[{self.label_col}]未找到LSTM模型文件, 将重新训练")
                    return False
            
            # 续训校验：模型类型一致性
            if self.train_process_info.get("model_type") != "lstm":
                raise ValueError(f"历史参数属于{self.train_process_info.get('model_type')}, 与LSTM不兼容")
            
            # 获取输入维度（优先从历史特征信息读取）
            if self.check_version_exists and "feature_info" in self.train_process_info:
                input_size = len(self.train_process_info["feature_info"].get("feature_columns", []))
            else:
                input_size = self.X_train.shape[1] if (self.X_train is not None and hasattr(self, 'X_train')) else 50
            
            # 使用历史参数初始化模型（保证续训结构一致）
            saved_params = self.train_process_info.get("model_params", self.fixed_lstm_params)
            model = LSTMModel(
                input_size=input_size,
                hidden_size=saved_params["hidden_size"],
                num_layers=saved_params["num_layers"],
                dropout=saved_params["dropout"]
            ).to(self.torch_device)
            
            # 加载模型权重和优化器状态（支持续训）
            checkpoint = torch.load(model_path, map_location=self.torch_device)
            if "model_state_dict" in checkpoint:
                model.load_state_dict(checkpoint["model_state_dict"])
            else:
                model.load_state_dict(checkpoint)  # 兼容旧模型文件
            
            self.model = model
            logger.info(f"[{self.label_col}]LSTM模型加载成功: {model_path}")
            
            # 保存加载的优化器状态、训练进度等（用于续训）
            self.loaded_optimizer_state = checkpoint.get("optimizer_state_dict", None)
            self.loaded_scheduler_state = checkpoint.get("scheduler_state_dict", None)
            self.loaded_epoch = checkpoint.get("epoch", 0)  # 记录已训练轮次
            self.loaded_best_val_loss = checkpoint.get("best_val_loss", float("inf"))
            self.loaded_early_stop_counter = checkpoint.get("early_stop_counter", 0)
            self.loaded_training_history = checkpoint.get("training_history", [])
            
            # 同步训练历史到train_process_info
            if self.loaded_training_history:
                self.train_process_info["training_history"] = self.loaded_training_history
                self._save_train_process_info()
            
            logger.info(f"[{self.label_col}]续训信息: 已训练轮次={self.loaded_epoch}, 最佳验证损失={self.loaded_best_val_loss:.4f}, "
                       f"早停计数器={self.loaded_early_stop_counter}")
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]加载LSTM模型失败: {str(e)}")
            self.model = None
            return False

    def _init_data(self) -> bool:
        """初始化数据(增加续训特征列一致性校验)"""
        try:
            self.X_train = self.data_dict["X_train"]
            self.X_val = self.data_dict["X_val"]
            self.y_train = self.data_dict["y_train"]
            self.y_val = self.data_dict["y_val"]
            self.feature_cols = self.data_dict["feature_cols"]
            self.label_threshold = self.data_dict["threshold"]
            
            # 续训时校验特征一致性 + 按历史顺序对齐（非续训时跳过）
            if self.check_version_exists:
                saved_feat_cols = self.train_process_info["feature_info"].get("feature_columns", [])
                logger.info(f'saved_feat_cols:{saved_feat_cols}')
                logger.info(f'当前原始特征列:{self.feature_cols}')

                # 校验数量和列名一致性（原有逻辑不变）
                if len(saved_feat_cols) != len(self.feature_cols):
                    raise ValueError(f"__init_data(): 特征列不一致！历史{len(saved_feat_cols)}个, 当前{len(self.feature_cols)}个")
                
                if set(saved_feat_cols) != set(self.feature_cols):
                    missing_in_current = set(saved_feat_cols) - set(self.feature_cols)
                    missing_in_saved = set(self.feature_cols) - set(saved_feat_cols)
                    raise ValueError(
                        f"特征列名不一致！历史有但当前没有: {missing_in_current}\n"
                        f"当前有但历史没有: {missing_in_saved}"
                    )

                # 仅续训时：按历史特征列的顺序重新排列（确保与历史一致）
                self.feature_cols = [col for col in saved_feat_cols if col in self.feature_cols]
                logger.info(f"续训场景 - 特征列顺序已对齐：按历史顺序排列，共{len(self.feature_cols)}个特征")
            else:
                # 非续训场景（第一次训练）：直接保留原始特征列，不做排列
                logger.info(f"非续训场景 - 保留原始特征列：共{len(self.feature_cols)}个特征")
            
            logger.info(f"[{self.label_col}]数据形状: X_train={self.X_train.shape}, X_val={self.X_val.shape}")
            logger.info(f"[{self.label_col}]标签阈值: {self.label_threshold}")

            # 计算并保存数据统计（供续训追溯）
            train_pos_count = int(np.sum(self.y_train == 1))
            train_neg_count = int(np.sum(self.y_train == 0))
            val_pos_count = int(np.sum(self.y_val == 1))
            val_neg_count = int(np.sum(self.y_val == 0))
            data_stats = {
                "train_set": {
                    "sample_count": len(self.X_train),
                    "feature_count": len(self.feature_cols),
                    "positive_count": train_pos_count,
                    "negative_count": train_neg_count,
                    "positive_ratio": round(train_pos_count / len(self.y_train), 4) if len(self.y_train) else 0.0
                },
                "val_set": {
                    "sample_count": len(self.X_val),
                    "feature_count": len(self.feature_cols),
                    "positive_count": val_pos_count,
                    "negative_count": val_neg_count,
                    "positive_ratio": round(val_pos_count / len(self.y_val), 4) if len(self.y_val) else 0.0
                }
            }
            self._save_data_statistics(data_stats)
            self._save_feature_columns(self.feature_cols)

            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]数据初始化失败: {str(e)}")
            return False

    def _detrend_sequence(self, X: np.ndarray) -> np.ndarray:
        """时序去趋势, 减少噪声"""
        X_detrend = []
        for seq in X:
            seq_mean = seq.mean(axis=0, keepdims=True)
            X_detrend.append(seq - seq_mean)
        return np.array(X_detrend)

    def _create_sequences(self, X: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """构建时序序列"""
        sequences = []
        targets = []
        for i in range(len(X) - self.seq_len):
            seq = X[i:i + self.seq_len]
            target = y[i + self.seq_len]
            sequences.append(seq)
            targets.append(target)
        return np.array(sequences), np.array(targets)

    def _check_label_distribution(self):
        """检查标签分布"""
        train_counts = np.bincount(self.y_train)
        if len(train_counts) < 2:
            logger.warning(f"[{self.label_col}]训练集标签仅含1个类别！")
        else:
            train_pos_ratio = train_counts[1] / len(self.y_train)
            logger.info(f"[{self.label_col}]训练集正样本占比: {train_pos_ratio:.2%}")
        
        val_counts = np.bincount(self.y_val)
        if len(val_counts) < 2:
            logger.warning(f"[{self.label_col}]验证集标签仅含1个类别！")
        else:
            val_pos_ratio = val_counts[1] / len(self.y_val)
            logger.info(f"[{self.label_col}]验证集正样本占比: {val_pos_ratio:.2%}")

    def find_optimal_threshold(self, y_true: np.ndarray, y_pred_proba: np.ndarray) -> Tuple[float, float]:
        """寻找最优阈值"""
        thresholds = np.arange(0.3, 0.7, 0.01)
        f1_scores = []
        for thresh in thresholds:
            y_pred = np.where(y_pred_proba >= thresh, 1, 0)
            f1 = f1_score(y_true, y_pred, average="binary")
            f1_scores.append(f1)
        best_idx = np.argmax(f1_scores)
        best_thresh = thresholds[best_idx]
        best_f1 = f1_scores[best_idx]
        logger.info(f"最优F1阈值: {best_thresh:.4f}(F1分数: {best_f1:.4f})")
        return best_thresh, best_f1

    def evaluate_model(self, y_true: np.ndarray, y_pred: np.ndarray, y_pred_proba: np.ndarray) -> dict:
        """评估模型"""
        try:
            auc = roc_auc_score(y_true, y_pred_proba) if len(np.unique(y_true)) > 1 else 0.5
            return {
                "auc": round(auc, 4),
                "f1": round(f1_score(y_true, y_pred, average="binary"), 4),
                "precision": round(precision_score(y_true, y_pred, average="binary"), 4),
                "recall": round(recall_score(y_true, y_pred, average="binary"), 4)
            }
        except Exception as e:
            logger.error(f"[{self.label_col}]评估失败: {str(e)}")
            return {}

    def train(self) -> dict:
        """训练核心逻辑(完善续训支持+保存训练过程数据)"""
        logger.info(f"开始训练[LSTM]- 目标标签: {self.label_col}, 实际设备: {self.torch_device}")
        self.train_process_info["status"] = "running"
        self.train_process_info["device"] = str(self.torch_device)
        self._save_train_process_info()
        
        try:
            # 1. 初始化数据
            if not self._init_data():
                self.train_process_info["status"] = "fail"
                self.train_process_info["error"] = "数据初始化失败"
                self._save_train_process_info()
                return {"status": "fail", "label_col": self.label_col, "error": "数据初始化失败"}

            # 2. 标签处理
            self.y_train = np.where(self.y_train >= self.label_threshold, 1, 0)
            self.y_val = np.where(self.y_val >= self.label_threshold, 1, 0)

            pos_count = len(self.y_train[self.y_train == 1])
            neg_count = len(self.y_train[self.y_train == 0])
            pos_weight = neg_count / pos_count if pos_count > 0 else 1.0
            logger.info(f"[{self.label_col}]类别权重: 正样本数={pos_count}, 权重={pos_weight:.2f}")
            
            # 3. 构建时序序列
            logger.info(f"构建时序序列(seq_len={self.seq_len})...")
            X_train_seq, y_train_seq = self._create_sequences(self.X_train, self.y_train)
            X_val_seq, y_val_seq = self._create_sequences(self.X_val, self.y_val)
            
            logger.info(f"训练集序列形状: {X_train_seq.shape}, 标签形状: {y_train_seq.shape}")
            logger.info(f"验证集序列形状: {X_val_seq.shape}, 标签形状: {y_val_seq.shape}")
            
            # 4. 构建张量
            X_train_tensor = torch.FloatTensor(X_train_seq).to(self.torch_device)
            y_train_tensor = torch.FloatTensor(y_train_seq).unsqueeze(1).to(self.torch_device)
            X_val_tensor = torch.FloatTensor(X_val_seq).to(self.torch_device)
            y_val_tensor = torch.FloatTensor(y_val_seq).unsqueeze(1).to(self.torch_device)

            # 5. 数据加载器
            train_loader = torch.utils.data.DataLoader(
                torch.utils.data.TensorDataset(X_train_tensor, y_train_tensor),
                batch_size=self.batch_size,
                shuffle=True,
                drop_last=False
            )
            val_loader = torch.utils.data.DataLoader(
                torch.utils.data.TensorDataset(X_val_tensor, y_val_tensor),
                batch_size=self.batch_size,
                shuffle=False
            )

            # 6. 模型参数配置(支持续训复用)
            input_size = self.X_train.shape[1]
            if not self.check_version_exists or "model_params" not in self.train_process_info:
                # 新训：使用固定参数
                lstm_params = self.fixed_lstm_params.copy()
                lstm_params["input_size"] = input_size
                logger.info(f"[{self.label_col}]使用新训练参数, input_size={input_size}")
            else:
                # 续训：复用历史参数
                lstm_params = self.train_process_info["model_params"]
                # 补全缺失参数(兼容旧版本)
                for key, val in self.fixed_lstm_params.items():
                    if key not in lstm_params:
                        lstm_params[key] = val
                logger.info(f"[{self.label_col}]复用历史LSTM参数, 续训模式开启")

            # 保存参数到训练过程信息
            self.train_process_info["model_params"] = lstm_params
            self._save_train_process_info()  # 立即保存到文件

            # 7. 模型初始化(续训时使用保存的参数)
            if self.model is None:
                # 新训：使用当前参数初始化
                self.model = LSTMModel(
                    input_size=lstm_params["input_size"],
                    hidden_size=lstm_params["hidden_size"],
                    num_layers=lstm_params["num_layers"],
                    dropout=lstm_params["dropout"]
                ).to(self.torch_device)
            else:
                # 续训：验证当前参数与保存的参数一致性
                if (lstm_params["input_size"] != self.model.lstm.input_size or
                    lstm_params["hidden_size"] != self.model.lstm.hidden_size or
                    lstm_params["num_layers"] != self.model.lstm.num_layers):
                    raise ValueError(f"模型结构参数不匹配, 无法续训！历史参数与当前参数不一致")

            logger.info(f'LSTMModel parameters: ')
            logger.info(f'input_size: {lstm_params["input_size"]}, 特征列数: {len(self.feature_cols)}')
            logger.info(f'hidden_size: {lstm_params["hidden_size"]}')
            logger.info(f'num_layers: {lstm_params["num_layers"]}')
            logger.info(f'dropout: {lstm_params["dropout"]}')

            # 8. 损失函数+优化器+学习率调度器
            criterion = FocalLoss(gamma=2)
            optimizer = optim.Adam(
                self.model.parameters(),
                lr=lstm_params["learning_rate"],
                weight_decay=lstm_params["weight_decay"]
            )
            scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, min_lr=1e-6)
            
            # 续训时恢复优化器和调度器状态
            if self.loaded_optimizer_state is not None:
                optimizer.load_state_dict(self.loaded_optimizer_state)
                logger.info(f"[{self.label_col}]恢复优化器状态")
            if self.loaded_scheduler_state is not None:
                scheduler.load_state_dict(self.loaded_scheduler_state)
                logger.info(f"[{self.label_col}]恢复学习率调度器状态")

            # 9. 初始化训练状态变量(续训时使用加载的值)
            best_val_loss = self.loaded_best_val_loss if self.loaded_best_val_loss != float("inf") else float("inf")
            early_stop_counter = self.loaded_early_stop_counter if self.loaded_early_stop_counter > 0 else 0
            best_model_state = self.model.state_dict()
            start_epoch = self.loaded_epoch  # 续训起始轮次
            training_history = self.loaded_training_history.copy()  # 复制历史训练记录
            
            logger.info(f"[{self.label_col}]早停配置: {lstm_params['early_stopping_rounds']}轮无提升则停止")
            logger.info(f"[{self.label_col}]训练轮次范围: {start_epoch+1} - {lstm_params['epochs']}")

            # 10. 训练循环
            logger.info(f"[{self.label_col}]开始训练(总epochs: {lstm_params['epochs']}, 设备: {self.torch_device})")
            for epoch in range(start_epoch, lstm_params["epochs"]):
                self.model.train()
                train_loss = 0.0
                for batch_x, batch_y in train_loader:
                    optimizer.zero_grad()
                    outputs = self.model(batch_x)
                    loss = criterion(outputs, batch_y)
                    loss.backward()
                    optimizer.step()
                    train_loss += loss.item() * batch_x.size(0)
                avg_train_loss = train_loss / len(train_loader.dataset)

                self.model.eval()
                val_loss = 0.0
                with torch.no_grad():
                    for batch_x, batch_y in val_loader:
                        outputs = self.model(batch_x)
                        loss = criterion(outputs, batch_y)
                        val_loss += loss.item() * batch_x.size(0)
                avg_val_loss = val_loss / len(val_loader.dataset)

                scheduler.step(avg_val_loss)

                # 记录训练历史
                history_entry = {
                    "epoch": epoch + 1,
                    "train_loss": round(avg_train_loss, 6),
                    "val_loss": round(avg_val_loss, 6),
                    "lr": optimizer.param_groups[0]["lr"]
                }
                training_history.append(history_entry)
                
                # 每5轮更新训练过程信息
                if (epoch + 1) % 5 == 0:
                    self.train_process_info["training_history"] = training_history
                    self.train_process_info["training_info"] = {
                        "current_epoch": epoch + 1,
                        "best_val_loss": round(best_val_loss, 6),
                        "early_stop_counter": early_stop_counter
                    }
                    self._save_train_process_info()
                    logger.info(f"[{self.label_col}]轮次[{epoch+1}/{lstm_params['epochs']}] 训练损失: {avg_train_loss:.4f} 验证损失: {avg_val_loss:.4f}")

                # 检查是否为最佳模型
                if avg_val_loss < best_val_loss:
                    best_val_loss = avg_val_loss
                    best_model_state = self.model.state_dict()
                    early_stop_counter = 0
                    
                    # 保存中间checkpoint
                    checkpoint_path = os.path.join(self.model_process_dir, f"lstm_{self.label_col}_checkpoint.pth")
                    torch.save({
                        "epoch": epoch + 1,
                        "model_state_dict": best_model_state,
                        "optimizer_state_dict": optimizer.state_dict(),
                        "scheduler_state_dict": scheduler.state_dict(),
                        "best_val_loss": best_val_loss,
                        "early_stop_counter": early_stop_counter,
                        "training_history": training_history
                    }, checkpoint_path)
                    self.train_process_info["checkpoint_info"] = {
                        "path": checkpoint_path,
                        "epoch": epoch + 1,
                        "best_val_loss": round(best_val_loss, 6)
                    }
                    self._save_train_process_info()
                else:
                    early_stop_counter += 1
                    if early_stop_counter >= lstm_params["early_stopping_rounds"]:
                        logger.info(f"[{self.label_col}]早停触发, 最佳轮次: {epoch - lstm_params['early_stopping_rounds'] + 1}")
                        break

            # 11. 加载最优模型+评估
            self.model.load_state_dict(best_model_state)
            logger.info(f"[{self.label_col}]训练完成, 最佳验证损失: {best_val_loss:.4f}")

            self.model.eval()
            with torch.no_grad():
                y_pred_proba = self.model(X_val_tensor).cpu().numpy().flatten()
            
            # 12. 计算最优阈值并保存
            threshold, best_f1 = self.find_optimal_threshold(y_val_seq, y_pred_proba)
            logger.info(f"[{self.label_col}]最优F1阈值: {threshold:.4f}(F1: {best_f1:.4f})")
            self._save_threshold({
                "optimal_threshold": round(threshold, 4),
                "best_f1_score": round(best_f1, 4)
            })

            # 13. 评估指标
            y_pred = np.where(y_pred_proba >= threshold, 1, 0)
            metrics = self.evaluate_model(y_val_seq, y_pred, y_pred_proba)
            self.train_process_info["evaluation_metrics"] = metrics
            logger.info(f"[{self.label_col}]评估结果: AUC={metrics['auc']}, F1={metrics['f1']}")

            # 14. 保存完整模型(包含训练状态)
            model_name = f"lstm_{self.label_col}"
            save_path = model_utils.save_model(
                model=self.model,
                model_name=model_name,
                model_type="pytorch",
                feature_cols=self.feature_cols,
                model_params=lstm_params,
                threshold=threshold,
                version=self.train_version,
                extra_data={
                    "epoch": len(training_history),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "scheduler_state_dict": scheduler.state_dict(),
                    "best_val_loss": best_val_loss,
                    "training_history": training_history
                }
            )
            
            if save_path:
                self.models["lstm"] = self.model
                self.model_metrics["lstm"] = metrics
                logger.info(f"[{self.label_col}]模型保存成功: {save_path}")
                self.train_process_info["model_path"] = save_path
                self.train_process_info["training_history"] = training_history
                self.train_process_info["training_info"] = {
                    "total_epochs": len(training_history),
                    "best_val_loss": round(best_val_loss, 6),
                    "final_lr": optimizer.param_groups[0]["lr"]
                }
            else:
                logger.warning(f"[{self.label_col}]模型保存失败")

            # 15. 保存最终训练状态
            self.train_process_info["status"] = "success"
            self._save_train_process_info()

            logger.info(f"[LSTM-def train()]-[{self.label_col}]训练完成, 返回设备: {str(self.torch_device)}")
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "device": self.torch_device,
                "version": self.train_version,
                "model_path": save_path,
                "total_epochs": len(training_history)
            }

        except Exception as e:
            logger.error(f"[LSTM-def train()]-[{self.label_col}]训练失败: {str(e)}\n{traceback.format_exc()}")
            self.train_process_info["status"] = "fail"
            self.train_process_info["error"] = str(e)
            self.train_process_info["error_traceback"] = traceback.format_exc()
            self._save_train_process_info()
            return {
                "status": "fail",
                "label_col": self.label_col,
                "error": str(e)
            }
    
    @staticmethod
    def print_training_summary(all_results: list):
        """打印汇总"""
        logger.info("="*100)
        logger.info("                      LSTM 训练结果汇总表格")
        logger.info("="*100)
        
        summary_data = []
        version = "unknown"
        for res in all_results:
            label_col = res["label_col"]
            status = res["status"]
            metrics = res.get("metrics", {})
            train_device = res.get("device", "cpu")
            version = res.get("version", "unknown")
            total_epochs = res.get("total_epochs", 0)
            summary_data.append({
                "标签列": label_col,
                "训练版本": version,
                "训练设备": train_device,
                "总轮次": total_epochs,
                "AUC": metrics.get("auc", 0.0),
                "F1分数": metrics.get("f1", 0.0),
                "精确率": metrics.get("precision", 0.0),
                "召回率": metrics.get("recall", 0.0),
                "训练状态": status
            })
        
        summary_df = pd.DataFrame(summary_data)
        numeric_cols = ["AUC", "F1分数", "精确率", "召回率"]
        for col in numeric_cols:
            summary_df[col] = summary_df[col].round(4)
        
        logger.info("\n" + summary_df.to_string(index=False, col_space=10))
        
        # 统计信息
        total = len(all_results)
        success_count = sum(1 for res in all_results if res["status"] == "success")
        success_rate = (success_count / total) * 100 if total > 0 else 0.0
        avg_auc = summary_df[summary_df["训练状态"] == "success"]["AUC"].mean() if success_count > 0 else 0.0
        avg_f1 = summary_df[summary_df["训练状态"] == "success"]["F1分数"].mean() if success_count > 0 else 0.0
        
        logger.info("-"*100)
        logger.info(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        logger.info(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        logger.info("="*100 + "\n")
        
        # 屏幕打印
        print("="*100)
        print("                      LSTM 训练结果汇总表格")
        print("="*100)
        print(summary_df.to_string(index=False, col_space=8))
        print("-"*100)
        print(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        print(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        print("="*100 + "\n")


if __name__ == "__main__":
    try:
        sample_data = {
            "X_train": np.random.rand(1000, 50),
            "X_val": np.random.rand(200, 50),
            "y_train": np.random.randint(0, 2, 1000),
            "y_val": np.random.randint(0, 2, 200),
            "feature_cols": [f"feat_{i}" for i in range(50)],
            "threshold": 0.5
        }
        
        # 首次训练
        logger.info("====== 首次训练 ======")
        trainer = LSTMModelTrainer(label_col="r1_next", data_dict=sample_data)
        result = trainer.train()
        version = result["version"]
        
        # 续训(加载已有版本)
        logger.info("\n====== 续训 ======")
        retrainer = LSTMModelTrainer(label_col="r1_next", data_dict=sample_data, load_version=version)
        retrain_result = retrainer.train()
        
        LSTMModelTrainer.print_training_summary([result, retrain_result])
    except Exception as e:
        logger.critical(f"训练示例执行失败: {str(e)}\n{traceback.format_exc()}")
    