"""深度学习模型实现."""

from __future__ import annotations

import logging
from pathlib import Path
from typing import Any, Dict

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

from .base import BaseQuantModel, ModelConfig

logger = logging.getLogger(__name__)


class LSTMModel(nn.Module):
    """LSTM 网络结构."""

    def __init__(
        self,
        input_size: int,
        hidden_size: int = 64,
        num_layers: int = 2,
        dropout: float = 0.2,
    ) -> None:
        super().__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.lstm = nn.LSTM(
            input_size,
            hidden_size,
            num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
        )
        self.fc = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        lstm_out, _ = self.lstm(x)
        # 取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        output = self.fc(last_output)
        return self.sigmoid(output)


class LSTMModelWrapper(BaseQuantModel):
    """LSTM 模型包装器."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="lstm",
                random_seed=42,
                params={
                    "sequence_len": 30,
                    "hidden_size": 64,
                    "num_layers": 2,
                    "dropout": 0.2,
                    "epochs": 30,
                    "batch_size": 32,
                    "learning_rate": 0.001,
                    "weight_decay": 0.0,
                    "early_stop_patience": 15,
                    "early_stop_min_delta": 0.001,
                },
            )
        super().__init__(config)
        self.model: LSTMModel | None = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._scaler_mean: np.ndarray | None = None
        self._scaler_std: np.ndarray | None = None

    def _set_random_seed(self) -> None:
        """设置随机种子."""
        torch.manual_seed(self.config.random_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(self.config.random_seed)
            torch.cuda.manual_seed_all(self.config.random_seed)
        np.random.seed(self.config.random_seed)

    def _create_sequences(
        self, X: np.ndarray, y: np.ndarray | None = None, sequence_len: int = 30
    ) -> tuple[np.ndarray, np.ndarray | None]:
        """创建时间序列数据."""
        X_seq = []
        y_seq = None if y is None else []

        for i in range(sequence_len, len(X)):
            X_seq.append(X[i - sequence_len : i])
            if y is not None:
                y_seq.append(y[i])

        X_seq = np.array(X_seq)
        if y is not None:
            y_seq = np.array(y_seq)

        return X_seq, y_seq

    def _normalize_features(self, X_train: np.ndarray, X_val: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]:
        """特征标准化."""
        self._scaler_mean = np.mean(X_train, axis=0)
        self._scaler_std = np.std(X_train, axis=0)
        self._scaler_std[self._scaler_std == 0] = 1.0

        X_train_norm = (X_train - self._scaler_mean) / self._scaler_std
        X_val_norm = None
        if X_val is not None:
            X_val_norm = (X_val - self._scaler_mean) / self._scaler_std

        return X_train_norm, X_val_norm

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_random_seed()
        self._set_feature_names(X_train)

        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        sequence_len = self.config.params.get("sequence_len", 30)

        # 标准化特征
        X_val_array = None
        y_val_array = None
        if X_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val

        X_train_norm, X_val_norm = self._normalize_features(X_train_array, X_val_array)

        # 创建序列
        X_train_seq, y_train_seq = self._create_sequences(X_train_norm, y_train_array, sequence_len)
        X_val_seq, y_val_seq = None, None
        if X_val_norm is not None and y_val_array is not None:
            X_val_seq, y_val_seq = self._create_sequences(X_val_norm, y_val_array, sequence_len)

        # 创建数据加载器
        train_dataset = TensorDataset(
            torch.FloatTensor(X_train_seq), torch.FloatTensor(y_train_seq)
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.params.get("batch_size", 32),
            shuffle=True,
        )

        val_loader = None
        if X_val_seq is not None and y_val_seq is not None:
            val_dataset = TensorDataset(
                torch.FloatTensor(X_val_seq), torch.FloatTensor(y_val_seq)
            )
            val_loader = DataLoader(
                val_dataset,
                batch_size=self.config.params.get("batch_size", 32),
                shuffle=False,
            )

        # 初始化模型
        input_size = X_train_seq.shape[2]
        self.model = LSTMModel(
            input_size=input_size,
            hidden_size=self.config.params.get("hidden_size", 64),
            num_layers=self.config.params.get("num_layers", 2),
            dropout=self.config.params.get("dropout", 0.2),
        ).to(self.device)

        # 优化器和损失函数
        optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.params.get("learning_rate", 0.001),
            weight_decay=self.config.params.get("weight_decay", 0.0),
        )
        criterion = nn.BCELoss()

        # 训练循环
        epochs = self.config.params.get("epochs", 30)
        loss_history = []
        val_loss_history = []
        val_auc_history = []  # 记录验证集 AUC 历史
        patience = self.config.params.get("early_stop_patience", 15)
        min_delta = self.config.params.get("early_stop_min_delta", 0.001)
        best_val_auc = -float("inf")  # 改为监控 AUC（越大越好）
        best_epoch = 0  # 记录最佳 AUC 对应的 epoch
        patience_counter = 0
        early_stop_triggered = False
        early_stop_epoch = None
        best_model_state = None  # 保存最佳模型权重

        for epoch in range(epochs):
            early_stop = False
            # 训练
            self.model.train()
            epoch_loss = 0.0
            for batch_X, batch_y in train_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)

                optimizer.zero_grad()
                outputs = self.model(batch_X).squeeze()
                loss = criterion(outputs, batch_y)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()

            avg_loss = epoch_loss / len(train_loader) if len(train_loader) > 0 else 0.0
            loss_history.append(avg_loss)

            # 验证：计算验证集 AUC（而非 loss）
            if val_loader is not None and len(val_loader) > 0 and y_val_seq is not None:
                self.model.eval()
                val_loss = 0.0
                y_val_pred_list = []
                y_val_true_list = []
                
                with torch.no_grad():
                    for batch_X, batch_y in val_loader:
                        batch_X = batch_X.to(self.device)
                        batch_y = batch_y.to(self.device)
                        outputs = self.model(batch_X).squeeze()
                        loss = criterion(outputs, batch_y)
                        val_loss += loss.item()
                        
                        # 收集预测结果用于计算 AUC
                        y_val_pred_list.append(outputs.cpu().numpy())
                        y_val_true_list.append(batch_y.cpu().numpy())
                
                avg_val_loss = val_loss / len(val_loader)
                val_loss_history.append(avg_val_loss)
                
                # 计算验证集 AUC
                from sklearn.metrics import roc_auc_score
                y_val_pred_flat = np.concatenate(y_val_pred_list)
                y_val_true_flat = np.concatenate(y_val_true_list)
                current_val_auc = roc_auc_score(y_val_true_flat, y_val_pred_flat)
                val_auc_history.append(current_val_auc)
                
                # 基于 AUC 改进来触发早停（而非 loss）
                if current_val_auc > best_val_auc + min_delta:
                    best_val_auc = current_val_auc
                    best_epoch = epoch
                    patience_counter = 0
                    # 保存最佳模型权重（用于 restore_best_weights）
                    best_model_state = self.model.state_dict().copy()
                else:
                    patience_counter += 1
                
                # 初始化 best_model_state（如果还没有保存过）
                if best_model_state is None and epoch == 0:
                    best_model_state = self.model.state_dict().copy()
                
                if patience > 0 and patience_counter >= patience:
                    early_stop = True
                    early_stop_triggered = True
                    early_stop_epoch = epoch + 1
                    # 恢复最佳权重（restore_best_weights=True）
                    if best_model_state is not None:
                        self.model.load_state_dict(best_model_state)
                    logger.info(
                        "LSTM 早停触发：连续 %s 轮验证 AUC 无改进（最佳 AUC=%.4f @ epoch %s）",
                        patience, best_val_auc, best_epoch + 1
                    )
                    break
            else:
                # 如果没有验证集，只记录 loss
                val_auc_history.append(None)

            if (epoch + 1) % 10 == 0:
                logger.debug(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}")
                if val_auc_history and val_auc_history[-1] is not None:
                    logger.debug(f"Epoch {epoch+1}/{epochs}, Val AUC: {val_auc_history[-1]:.4f}")

        self._is_trained = True

        # 计算验证集指标
        summary: Dict[str, Any] = {
            "loss_history": loss_history,
        }
        
        # 记录早停信息（用于后续欠拟合检测）
        # 注意：即使没有触发早停，也要记录训练信息（用于检测欠拟合）
        summary["early_stop_info"] = {
            "triggered": early_stop_triggered,
            "best_epoch": best_epoch + 1 if best_epoch >= 0 else None,
            "total_epochs": epoch + 1,  # 实际训练的 epoch 数
            "best_val_auc": float(best_val_auc) if best_val_auc > -float("inf") else None,
        }
        
        if val_loader is not None and len(val_loader) > 0 and y_val_seq is not None:
            summary["val_loss_history"] = val_loss_history
            summary["val_auc_history"] = [float(auc) if auc is not None else None for auc in val_auc_history]
            
            # 计算验证集 AUC、准确率、F1（使用最佳模型权重）
            self.model.eval()
            y_pred_proba_list = []
            with torch.no_grad():
                for batch_X, _ in val_loader:
                    batch_X = batch_X.to(self.device)
                    outputs = self.model(batch_X).squeeze()
                    y_pred_proba_list.append(outputs.cpu().numpy())
            y_pred_proba = np.concatenate(y_pred_proba_list)
            y_pred = (y_pred_proba >= 0.5).astype(int)

            from sklearn.metrics import accuracy_score, f1_score, roc_auc_score

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_seq, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_seq, y_pred)),
                "f1": float(f1_score(y_val_seq, y_pred)),
            }

        logger.debug(f"LSTM 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        X_array = X.values if isinstance(X, pd.DataFrame) else X

        # 标准化
        if self._scaler_mean is None or self._scaler_std is None:
            raise RuntimeError("模型未训练，缺少标准化参数")
        X_norm = (X_array - self._scaler_mean) / self._scaler_std

        # 创建序列
        sequence_len = self.config.params.get("sequence_len", 30)
        if len(X_norm) < sequence_len:
            raise ValueError(f"输入数据长度 {len(X_norm)} 小于序列长度 {sequence_len}")

        X_seq, _ = self._create_sequences(X_norm, None, sequence_len)

        # 预测
        self.model.eval()
        X_tensor = torch.FloatTensor(X_seq).to(self.device)
        with torch.no_grad():
            outputs = self.model(X_tensor).squeeze().cpu().numpy()

        # 返回最后一个预测值（如果需要所有预测，可以返回整个数组）
        # 这里返回最后一个，因为通常我们关心最新的预测
        if len(outputs.shape) == 0:
            return np.array([outputs])
        return outputs

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        save_dict = {
            "model_state_dict": self.model.state_dict(),
            "config": self.config,
            "scaler_mean": self._scaler_mean,
            "scaler_std": self._scaler_std,
            "feature_names": self._feature_names,
            "model_params": {
                "input_size": self.model.lstm.input_size,
                "hidden_size": self.model.hidden_size,
                "num_layers": self.model.num_layers,
                "dropout": self.config.params.get("dropout", 0.2),
            },
        }
        torch.save(save_dict, path)
        logger.info(f"LSTM 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        checkpoint = torch.load(path, map_location=self.device)

        # 恢复配置
        if "config" in checkpoint:
            self.config = checkpoint["config"]

        # 恢复标准化参数
        self._scaler_mean = checkpoint.get("scaler_mean")
        self._scaler_std = checkpoint.get("scaler_std")
        self._feature_names = checkpoint.get("feature_names")

        # 恢复模型
        model_params = checkpoint["model_params"]
        self.model = LSTMModel(
            input_size=model_params["input_size"],
            hidden_size=model_params["hidden_size"],
            num_layers=model_params["num_layers"],
            dropout=model_params["dropout"],
        ).to(self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.model.eval()

        self._is_trained = True
        logger.info(f"LSTM 模型已从 {path} 加载")


class GRUModel(nn.Module):
    """GRU 网络结构."""

    def __init__(
        self,
        input_size: int,
        hidden_size: int = 64,
        num_layers: int = 2,
        dropout: float = 0.2,
    ) -> None:
        super().__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.gru = nn.GRU(
            input_size,
            hidden_size,
            num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
        )
        self.fc = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        gru_out, _ = self.gru(x)
        # 取最后一个时间步的输出
        last_output = gru_out[:, -1, :]
        output = self.fc(last_output)
        return self.sigmoid(output)


class GRUModelWrapper(BaseQuantModel):
    """GRU 模型包装器."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="gru",
                random_seed=42,
                params={
                    "sequence_len": 30,
                    "hidden_size": 64,
                    "num_layers": 2,
                    "dropout": 0.2,
                    "epochs": 30,
                    "batch_size": 32,
                    "learning_rate": 0.001,
                    "weight_decay": 0.0,
                    "early_stop_patience": 15,
                    "early_stop_min_delta": 0.001,
                },
            )
        super().__init__(config)
        self.model: GRUModel | None = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._scaler_mean: np.ndarray | None = None
        self._scaler_std: np.ndarray | None = None

    def _set_random_seed(self) -> None:
        """设置随机种子."""
        torch.manual_seed(self.config.random_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(self.config.random_seed)
            torch.cuda.manual_seed_all(self.config.random_seed)
        np.random.seed(self.config.random_seed)

    def _create_sequences(
        self, X: np.ndarray, y: np.ndarray | None = None, sequence_len: int = 30
    ) -> tuple[np.ndarray, np.ndarray | None]:
        """创建时间序列数据."""
        X_seq = []
        y_seq = None if y is None else []

        for i in range(sequence_len, len(X)):
            X_seq.append(X[i - sequence_len : i])
            if y is not None:
                y_seq.append(y[i])

        X_seq = np.array(X_seq)
        if y is not None:
            y_seq = np.array(y_seq)

        return X_seq, y_seq

    def _normalize_features(self, X_train: np.ndarray, X_val: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]:
        """特征标准化."""
        self._scaler_mean = np.mean(X_train, axis=0)
        self._scaler_std = np.std(X_train, axis=0)
        self._scaler_std[self._scaler_std == 0] = 1.0

        X_train_norm = (X_train - self._scaler_mean) / self._scaler_std
        X_val_norm = None
        if X_val is not None:
            X_val_norm = (X_val - self._scaler_mean) / self._scaler_std

        return X_train_norm, X_val_norm

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_random_seed()
        self._set_feature_names(X_train)

        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        sequence_len = self.config.params.get("sequence_len", 30)

        # 标准化特征
        X_val_array = None
        y_val_array = None
        if X_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val

        X_train_norm, X_val_norm = self._normalize_features(X_train_array, X_val_array)

        # 创建序列
        X_train_seq, y_train_seq = self._create_sequences(X_train_norm, y_train_array, sequence_len)
        X_val_seq, y_val_seq = None, None
        if X_val_norm is not None and y_val_array is not None:
            X_val_seq, y_val_seq = self._create_sequences(X_val_norm, y_val_array, sequence_len)

        # 创建数据加载器
        train_dataset = TensorDataset(
            torch.FloatTensor(X_train_seq), torch.FloatTensor(y_train_seq)
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.params.get("batch_size", 32),
            shuffle=True,
        )

        val_loader = None
        if X_val_seq is not None and y_val_seq is not None:
            val_dataset = TensorDataset(
                torch.FloatTensor(X_val_seq), torch.FloatTensor(y_val_seq)
            )
            val_loader = DataLoader(
                val_dataset,
                batch_size=self.config.params.get("batch_size", 32),
                shuffle=False,
            )

        # 初始化模型
        input_size = X_train_seq.shape[2]
        self.model = GRUModel(
            input_size=input_size,
            hidden_size=self.config.params.get("hidden_size", 64),
            num_layers=self.config.params.get("num_layers", 2),
            dropout=self.config.params.get("dropout", 0.2),
        ).to(self.device)

        # 优化器和损失函数
        optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.params.get("learning_rate", 0.001),
            weight_decay=self.config.params.get("weight_decay", 0.0),
        )
        criterion = nn.BCELoss()

        # 训练循环
        epochs = self.config.params.get("epochs", 30)
        loss_history = []
        val_loss_history = []
        patience = self.config.params.get("early_stop_patience", 15)
        min_delta = self.config.params.get("early_stop_min_delta", 0.001)
        best_val_loss = float("inf")
        patience_counter = 0

        for epoch in range(epochs):
            early_stop = False
            # 训练
            self.model.train()
            epoch_loss = 0.0
            for batch_X, batch_y in train_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)

                optimizer.zero_grad()
                outputs = self.model(batch_X).squeeze()
                loss = criterion(outputs, batch_y)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()

            avg_loss = epoch_loss / len(train_loader) if len(train_loader) > 0 else 0.0
            loss_history.append(avg_loss)

            # 验证
            if val_loader is not None and len(val_loader) > 0:
                self.model.eval()
                val_loss = 0.0
                with torch.no_grad():
                    for batch_X, batch_y in val_loader:
                        batch_X = batch_X.to(self.device)
                        batch_y = batch_y.to(self.device)
                        outputs = self.model(batch_X).squeeze()
                        loss = criterion(outputs, batch_y)
                        val_loss += loss.item()
                avg_val_loss = val_loss / len(val_loader)
                val_loss_history.append(avg_val_loss)
                if avg_val_loss + min_delta < best_val_loss:
                    best_val_loss = avg_val_loss
                    patience_counter = 0
                else:
                    patience_counter += 1
                if patience > 0 and patience_counter >= patience:
                    early_stop = True

            if (epoch + 1) % 10 == 0:
                logger.debug(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}")
            if early_stop:
                logger.info("GRU 早停触发：连续 %s 轮验证损失无改进", patience)
                break

        self._is_trained = True

        # 计算验证集指标
        summary: Dict[str, Any] = {
            "loss_history": loss_history,
        }
        if val_loader is not None and len(val_loader) > 0 and y_val_seq is not None:
            summary["val_loss_history"] = val_loss_history
            # 计算验证集 AUC、准确率、F1
            self.model.eval()
            y_pred_proba_list = []
            with torch.no_grad():
                for batch_X, _ in val_loader:
                    batch_X = batch_X.to(self.device)
                    outputs = self.model(batch_X).squeeze()
                    y_pred_proba_list.append(outputs.cpu().numpy())
            y_pred_proba = np.concatenate(y_pred_proba_list)
            y_pred = (y_pred_proba >= 0.5).astype(int)

            from sklearn.metrics import accuracy_score, f1_score, roc_auc_score

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_seq, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_seq, y_pred)),
                "f1": float(f1_score(y_val_seq, y_pred)),
            }

        logger.debug(f"GRU 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        X_array = X.values if isinstance(X, pd.DataFrame) else X

        # 标准化
        if self._scaler_mean is None or self._scaler_std is None:
            raise RuntimeError("模型未训练，缺少标准化参数")
        X_norm = (X_array - self._scaler_mean) / self._scaler_std

        # 创建序列
        sequence_len = self.config.params.get("sequence_len", 30)
        if len(X_norm) < sequence_len:
            raise ValueError(f"输入数据长度 {len(X_norm)} 小于序列长度 {sequence_len}")

        X_seq, _ = self._create_sequences(X_norm, None, sequence_len)

        # 预测
        self.model.eval()
        X_tensor = torch.FloatTensor(X_seq).to(self.device)
        with torch.no_grad():
            outputs = self.model(X_tensor).squeeze().cpu().numpy()

        # 返回最后一个预测值（如果需要所有预测，可以返回整个数组）
        if len(outputs.shape) == 0:
            return np.array([outputs])
        return outputs

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        save_dict = {
            "model_state_dict": self.model.state_dict(),
            "config": self.config,
            "scaler_mean": self._scaler_mean,
            "scaler_std": self._scaler_std,
            "feature_names": self._feature_names,
            "model_params": {
                "input_size": self.model.gru.input_size,
                "hidden_size": self.model.hidden_size,
                "num_layers": self.model.num_layers,
                "dropout": self.config.params.get("dropout", 0.2),
            },
        }
        torch.save(save_dict, path)
        logger.info(f"GRU 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        checkpoint = torch.load(path, map_location=self.device)

        # 恢复配置
        if "config" in checkpoint:
            self.config = checkpoint["config"]

        # 恢复标准化参数
        self._scaler_mean = checkpoint.get("scaler_mean")
        self._scaler_std = checkpoint.get("scaler_std")
        self._feature_names = checkpoint.get("feature_names")

        # 恢复模型
        model_params = checkpoint["model_params"]
        self.model = GRUModel(
            input_size=model_params["input_size"],
            hidden_size=model_params["hidden_size"],
            num_layers=model_params["num_layers"],
            dropout=model_params["dropout"],
        ).to(self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.model.eval()

        self._is_trained = True
        logger.info(f"GRU 模型已从 {path} 加载")


class TemporalCNNModel(nn.Module):
    """Temporal CNN 网络结构，用于时间序列分类."""

    def __init__(
        self,
        input_size: int,
        num_filters: int = 64,
        kernel_size: int = 3,
        dropout: float = 0.2,
    ) -> None:
        super().__init__()
        # 输入形状: (batch, sequence_len, input_size)
        # 需要转换为 (batch, input_size, sequence_len) 用于 Conv1d
        self.conv1 = nn.Conv1d(
            in_channels=input_size, out_channels=num_filters, kernel_size=kernel_size, padding=1
        )
        self.conv2 = nn.Conv1d(
            in_channels=num_filters, out_channels=num_filters * 2, kernel_size=kernel_size, padding=1
        )
        self.pool = nn.AdaptiveAvgPool1d(1)
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(num_filters * 2, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x shape: (batch, sequence_len, input_size)
        # 转换为 (batch, input_size, sequence_len) 用于 Conv1d
        x = x.transpose(1, 2)
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = self.pool(x)  # (batch, num_filters*2, 1)
        x = x.squeeze(-1)  # (batch, num_filters*2)
        x = self.dropout(x)
        x = self.fc(x)
        return self.sigmoid(x)


class TemporalCNNModelWrapper(BaseQuantModel):
    """Temporal CNN 模型包装器."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="temporal_cnn",
                random_seed=42,
                params={
                    "sequence_len": 30,
                    "num_filters": 64,
                    "kernel_size": 3,
                    "dropout": 0.2,
                    "epochs": 30,
                    "batch_size": 32,
                    "learning_rate": 0.001,
                    "weight_decay": 0.0,
                    "early_stop_patience": 15,
                    "early_stop_min_delta": 0.001,
                },
            )
        super().__init__(config)
        self.model: TemporalCNNModel | None = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._scaler_mean: np.ndarray | None = None
        self._scaler_std: np.ndarray | None = None

    def _set_random_seed(self) -> None:
        """设置随机种子."""
        torch.manual_seed(self.config.random_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(self.config.random_seed)
            torch.cuda.manual_seed_all(self.config.random_seed)
        np.random.seed(self.config.random_seed)

    def _create_sequences(
        self, X: np.ndarray, y: np.ndarray | None = None, sequence_len: int = 30
    ) -> tuple[np.ndarray, np.ndarray | None]:
        """创建时间序列数据."""
        X_seq = []
        y_seq = None if y is None else []

        for i in range(sequence_len, len(X)):
            X_seq.append(X[i - sequence_len : i])
            if y is not None:
                y_seq.append(y[i])

        X_seq = np.array(X_seq)
        if y is not None:
            y_seq = np.array(y_seq)

        return X_seq, y_seq

    def _normalize_features(self, X_train: np.ndarray, X_val: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]:
        """特征标准化."""
        self._scaler_mean = np.mean(X_train, axis=0)
        self._scaler_std = np.std(X_train, axis=0)
        self._scaler_std[self._scaler_std == 0] = 1.0

        X_train_norm = (X_train - self._scaler_mean) / self._scaler_std
        X_val_norm = None
        if X_val is not None:
            X_val_norm = (X_val - self._scaler_mean) / self._scaler_std

        return X_train_norm, X_val_norm

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_random_seed()
        self._set_feature_names(X_train)

        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        sequence_len = self.config.params.get("sequence_len", 30)

        # 标准化特征
        X_val_array = None
        y_val_array = None
        if X_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val

        X_train_norm, X_val_norm = self._normalize_features(X_train_array, X_val_array)

        # 创建序列
        X_train_seq, y_train_seq = self._create_sequences(X_train_norm, y_train_array, sequence_len)
        X_val_seq, y_val_seq = None, None
        if X_val_norm is not None and y_val_array is not None:
            X_val_seq, y_val_seq = self._create_sequences(X_val_norm, y_val_array, sequence_len)

        # 创建数据加载器
        train_dataset = TensorDataset(
            torch.FloatTensor(X_train_seq), torch.FloatTensor(y_train_seq)
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.params.get("batch_size", 32),
            shuffle=True,
        )

        val_loader = None
        if X_val_seq is not None and y_val_seq is not None:
            val_dataset = TensorDataset(
                torch.FloatTensor(X_val_seq), torch.FloatTensor(y_val_seq)
            )
            val_loader = DataLoader(
                val_dataset,
                batch_size=self.config.params.get("batch_size", 32),
                shuffle=False,
            )

        # 初始化模型
        input_size = X_train_seq.shape[2]
        self.model = TemporalCNNModel(
            input_size=input_size,
            num_filters=self.config.params.get("num_filters", 64),
            kernel_size=self.config.params.get("kernel_size", 3),
            dropout=self.config.params.get("dropout", 0.2),
        ).to(self.device)

        # 优化器和损失函数
        optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.params.get("learning_rate", 0.001),
            weight_decay=self.config.params.get("weight_decay", 0.0),
        )
        criterion = nn.BCELoss()

        # 训练循环
        epochs = self.config.params.get("epochs", 30)
        loss_history = []
        val_loss_history = []
        patience = self.config.params.get("early_stop_patience", 15)
        min_delta = self.config.params.get("early_stop_min_delta", 0.001)
        best_val_loss = float("inf")
        patience_counter = 0

        for epoch in range(epochs):
            early_stop = False
            # 训练
            self.model.train()
            epoch_loss = 0.0
            for batch_X, batch_y in train_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)

                optimizer.zero_grad()
                outputs = self.model(batch_X).squeeze()
                loss = criterion(outputs, batch_y)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()

            avg_loss = epoch_loss / len(train_loader) if len(train_loader) > 0 else 0.0
            loss_history.append(avg_loss)

            # 验证
            if val_loader is not None and len(val_loader) > 0:
                self.model.eval()
                val_loss = 0.0
                with torch.no_grad():
                    for batch_X, batch_y in val_loader:
                        batch_X = batch_X.to(self.device)
                        batch_y = batch_y.to(self.device)
                        outputs = self.model(batch_X).squeeze()
                        loss = criterion(outputs, batch_y)
                        val_loss += loss.item()
                avg_val_loss = val_loss / len(val_loader)
                val_loss_history.append(avg_val_loss)
                if avg_val_loss + min_delta < best_val_loss:
                    best_val_loss = avg_val_loss
                    patience_counter = 0
                else:
                    patience_counter += 1
                if patience > 0 and patience_counter >= patience:
                    early_stop = True

            if (epoch + 1) % 10 == 0:
                logger.debug(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}")
            if early_stop:
                logger.info("TemporalCNN 早停触发：连续 %s 轮验证损失无改进", patience)
                break

        self._is_trained = True

        # 计算验证集指标
        summary: Dict[str, Any] = {
            "loss_history": loss_history,
        }
        if val_loader is not None and len(val_loader) > 0 and y_val_seq is not None:
            summary["val_loss_history"] = val_loss_history
            # 计算验证集 AUC、准确率、F1
            self.model.eval()
            y_pred_proba_list = []
            with torch.no_grad():
                for batch_X, _ in val_loader:
                    batch_X = batch_X.to(self.device)
                    outputs = self.model(batch_X).squeeze()
                    y_pred_proba_list.append(outputs.cpu().numpy())
            y_pred_proba = np.concatenate(y_pred_proba_list)
            y_pred = (y_pred_proba >= 0.5).astype(int)

            from sklearn.metrics import accuracy_score, f1_score, roc_auc_score

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_seq, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_seq, y_pred)),
                "f1": float(f1_score(y_val_seq, y_pred)),
            }

        logger.debug(f"TemporalCNN 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        X_array = X.values if isinstance(X, pd.DataFrame) else X

        # 标准化
        if self._scaler_mean is None or self._scaler_std is None:
            raise RuntimeError("模型未训练，缺少标准化参数")
        X_norm = (X_array - self._scaler_mean) / self._scaler_std

        # 创建序列
        sequence_len = self.config.params.get("sequence_len", 30)
        if len(X_norm) < sequence_len:
            raise ValueError(f"输入数据长度 {len(X_norm)} 小于序列长度 {sequence_len}")

        X_seq, _ = self._create_sequences(X_norm, None, sequence_len)

        # 预测
        self.model.eval()
        X_tensor = torch.FloatTensor(X_seq).to(self.device)
        with torch.no_grad():
            outputs = self.model(X_tensor).squeeze().cpu().numpy()

        # 返回最后一个预测值
        if len(outputs.shape) == 0:
            return np.array([outputs])
        return outputs

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        save_dict = {
            "model_state_dict": self.model.state_dict(),
            "config": self.config,
            "scaler_mean": self._scaler_mean,
            "scaler_std": self._scaler_std,
            "feature_names": self._feature_names,
            "model_params": {
                "input_size": self.model.conv1.in_channels,
                "num_filters": self.model.conv1.out_channels,
                "kernel_size": self.model.conv1.kernel_size[0],
                "dropout": self.config.params.get("dropout", 0.2),
            },
        }
        torch.save(save_dict, path)
        logger.info(f"TemporalCNN 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        checkpoint = torch.load(path, map_location=self.device)

        # 恢复配置
        if "config" in checkpoint:
            self.config = checkpoint["config"]

        # 恢复标准化参数
        self._scaler_mean = checkpoint.get("scaler_mean")
        self._scaler_std = checkpoint.get("scaler_std")
        self._feature_names = checkpoint.get("feature_names")

        # 恢复模型
        model_params = checkpoint["model_params"]
        self.model = TemporalCNNModel(
            input_size=model_params["input_size"],
            num_filters=model_params["num_filters"],
            kernel_size=model_params["kernel_size"],
            dropout=model_params["dropout"],
        ).to(self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.model.eval()

        self._is_trained = True
        logger.info(f"TemporalCNN 模型已从 {path} 加载")


# 向后兼容：旧配置可能仍引用 cnn_1d
CNN1DModelWrapper = TemporalCNNModelWrapper


class MLPClassifierNet(nn.Module):
    """多层感知机网络结构."""

    def __init__(self, input_size: int, hidden_layers: list[int], dropout: float) -> None:
        super().__init__()
        layers: list[nn.Module] = []
        last_size = input_size
        for hidden in hidden_layers:
            layers.append(nn.Linear(last_size, hidden))
            layers.append(nn.ReLU())
            if dropout > 0:
                layers.append(nn.Dropout(dropout))
            last_size = hidden
        layers.append(nn.Linear(last_size, 1))
        self.network = nn.Sequential(*layers)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.sigmoid(self.network(x))


class MLPModelWrapper(BaseQuantModel):
    """MLP 模型包装器."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="mlp",
                random_seed=42,
                params={
                    "hidden_layers": [128, 64],
                    "dropout": 0.3,
                    "batch_size": 32,
                    "epochs": 50,
                    "learning_rate": 0.001,
                    "weight_decay": 1e-4,
                    "early_stop_patience": 15,
                    "early_stop_min_delta": 0.001,
                },
            )
        super().__init__(config)
        self.model: MLPClassifierNet | None = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self._scaler_mean: np.ndarray | None = None
        self._scaler_std: np.ndarray | None = None

    def _set_random_seed(self) -> None:
        torch.manual_seed(self.config.random_seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(self.config.random_seed)
        np.random.seed(self.config.random_seed)

    def _normalize_features(
        self, X_train: np.ndarray, X_val: np.ndarray | None = None
    ) -> tuple[np.ndarray, np.ndarray | None]:
        self._scaler_mean = np.mean(X_train, axis=0)
        self._scaler_std = np.std(X_train, axis=0)
        self._scaler_std[self._scaler_std == 0] = 1.0
        X_train_norm = (X_train - self._scaler_mean) / self._scaler_std
        X_val_norm = None
        if X_val is not None:
            X_val_norm = (X_val - self._scaler_mean) / self._scaler_std
        return X_train_norm, X_val_norm

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_random_seed()
        self._set_feature_names(X_train)

        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        X_val_array = None
        y_val_array = None
        if X_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val

        X_train_norm, X_val_norm = self._normalize_features(X_train_array, X_val_array)

        train_dataset = TensorDataset(
            torch.FloatTensor(X_train_norm), torch.FloatTensor(y_train_array)
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.params.get("batch_size", 32),
            shuffle=True,
        )

        val_loader = None
        if X_val_norm is not None and y_val_array is not None:
            val_dataset = TensorDataset(
                torch.FloatTensor(X_val_norm), torch.FloatTensor(y_val_array)
            )
            val_loader = DataLoader(
                val_dataset,
                batch_size=self.config.params.get("batch_size", 32),
                shuffle=False,
            )

        input_size = X_train_norm.shape[1]
        hidden_layers = self.config.params.get("hidden_layers", [128, 64])
        dropout = self.config.params.get("dropout", 0.3)
        self.model = MLPClassifierNet(input_size, hidden_layers, dropout).to(self.device)

        optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config.params.get("learning_rate", 0.001),
            weight_decay=self.config.params.get("weight_decay", 1e-4),
        )
        criterion = nn.BCELoss()

        epochs = self.config.params.get("epochs", 50)
        patience = self.config.params.get("early_stop_patience", 15)
        min_delta = self.config.params.get("early_stop_min_delta", 0.001)

        loss_history = []
        val_loss_history = []
        best_val_loss = float("inf")
        patience_counter = 0

        for epoch in range(epochs):
            self.model.train()
            epoch_loss = 0.0
            for batch_X, batch_y in train_loader:
                batch_X = batch_X.to(self.device)
                batch_y = batch_y.to(self.device)

                optimizer.zero_grad()
                outputs = self.model(batch_X).squeeze()
                loss = criterion(outputs, batch_y)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()

            avg_loss = epoch_loss / len(train_loader) if len(train_loader) > 0 else 0.0
            loss_history.append(avg_loss)

            if val_loader is not None:
                self.model.eval()
                val_loss = 0.0
                with torch.no_grad():
                    for batch_X, batch_y in val_loader:
                        batch_X = batch_X.to(self.device)
                        batch_y = batch_y.to(self.device)
                        outputs = self.model(batch_X).squeeze()
                        loss = criterion(outputs, batch_y)
                        val_loss += loss.item()
                avg_val_loss = val_loss / len(val_loader)
                val_loss_history.append(avg_val_loss)

                if avg_val_loss + min_delta < best_val_loss:
                    best_val_loss = avg_val_loss
                    patience_counter = 0
                else:
                    patience_counter += 1

                if patience > 0 and patience_counter >= patience:
                    logger.info("MLP 早停触发：连续 %s 轮验证损失无改进", patience)
                    break

        self._is_trained = True

        summary: Dict[str, Any] = {"loss_history": loss_history}
        if val_loader is not None and y_val_array is not None:
            summary["val_loss_history"] = val_loss_history
            self.model.eval()
            y_pred_proba_list = []
            with torch.no_grad():
                for batch_X, _ in val_loader:
                    batch_X = batch_X.to(self.device)
                    outputs = self.model(batch_X).squeeze()
                    y_pred_proba_list.append(outputs.cpu().numpy())
            y_pred_proba = np.concatenate(y_pred_proba_list)
            y_pred = (y_pred_proba >= 0.5).astype(int)

            from sklearn.metrics import accuracy_score, f1_score, roc_auc_score

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"MLP 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        X_array = X.values if isinstance(X, pd.DataFrame) else X
        if self._scaler_mean is None or self._scaler_std is None:
            raise RuntimeError("模型未训练，缺少标准化参数")
        X_norm = (X_array - self._scaler_mean) / self._scaler_std

        self.model.eval()
        X_tensor = torch.FloatTensor(X_norm).to(self.device)
        with torch.no_grad():
            outputs = self.model(X_tensor).squeeze().cpu().numpy()

        if len(outputs.shape) == 0:
            return np.array([outputs])
        return outputs

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")

        save_dict = {
            "model_state_dict": self.model.state_dict(),
            "config": self.config,
            "scaler_mean": self._scaler_mean,
            "scaler_std": self._scaler_std,
            "feature_names": self._feature_names,
        }
        torch.save(save_dict, path)
        logger.info(f"MLP 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        checkpoint = torch.load(path, map_location=self.device)
        if "config" in checkpoint:
            self.config = checkpoint["config"]
        self._scaler_mean = checkpoint.get("scaler_mean")
        self._scaler_std = checkpoint.get("scaler_std")
        self._feature_names = checkpoint.get("feature_names")

        input_size = len(self._scaler_mean) if self._scaler_mean is not None else 0
        hidden_layers = self.config.params.get("hidden_layers", [128, 64])
        dropout = self.config.params.get("dropout", 0.3)
        self.model = MLPClassifierNet(input_size, hidden_layers, dropout).to(self.device)
        self.model.load_state_dict(checkpoint["model_state_dict"])
        self.model.eval()
        self._is_trained = True
        logger.info(f"MLP 模型已从 {path} 加载")
