import gc
import sys
from datetime import datetime
from typing import Literal

import joblib
import numpy as np
import seaborn as sns
import torch
from loguru import logger
from matplotlib import pyplot as plt
from numpy.random import SeedSequence
from pandas import DataFrame
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch import nn
from torch.utils.data import TensorDataset
from tqdm import tqdm

from .StreamLogHandler import StreamLogHandler
from .DataLoader import DataLoader
from .Detector import DDoSDetector
from .ModelCreator import ModelCreator, ModelCreatorConfig
from .ModelPlotSaver import ModelPlotSaver


def create_dataloader(x: np.ndarray, y: np.ndarray, batch_size: int = 64, shuffle: bool = False) -> DataLoader:
    """
    创建数据加载器
    :param x: 输入数据
    :param y: 标签数据
    :param batch_size: 批大小
    :param shuffle: 是否打乱数据
    :return: 数据加载器
    """
    dataset = TensorDataset(
        torch.FloatTensor(x),
        torch.FloatTensor(y)
    )
    from torch.utils.data import DataLoader
    return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)


def create_sequences(data: np.ndarray,
                     labels: np.ndarray,
                     window_size: int = 30,
                     step: int = 5
                     ) -> tuple[np.ndarray, np.ndarray]:
    """
    生成时间序列样本
    Args:
        data (np.ndarray): 输入特征数据，形状为(n_samples, n_features)
        labels (np.ndarray): 对应标签数据，形状为(n_samples,)
        window_size (int, optional): 滑动窗口大小. Defaults to 30.
        step (int, optional): 滑动步长. Defaults to 5.
    Returns:
        tuple[np.ndarray, np.ndarray]:
            - 序列数据，形状为(n_sequences, window_size, n_features)
            - 对应标签，形状为(n_sequences,)
    """
    logger.info("生成时间序列样本，窗口大小：{}，步长：{}", window_size, step)
    sequences, target = [], []
    for i in range(0, len(data) - window_size, step):
        sequences.append(data[i:i + window_size])
        # 取窗口最后一个标签
        target.append(labels[i + window_size - 1])
    logger.info("生成{}条序列样本", len(sequences))
    return np.array(sequences), np.array(target)


class ModelTrainer:

    def __init__(self,
                 device: Literal['cpu', 'cuda'] = 'cuda',
                 seed: int = -1,
                 epochs: int = 50,
                 batch_size: int = 64,
                 lr: float = 0.001,
                 window_size: int = 30,
                 step: int = 5,
                 is_debug: bool = False,
                 model_creator_config: ModelCreatorConfig | None = None,
                 model_plot_saver: ModelPlotSaver | None = None,
                 classify_name: str = "Class",
                 classify_loader: list[str] | None | str = None,
                 *args, **kwargs):
        """
        基于1D-CNN-GRN-Attention的DDoS检测模型训练器
        :param device: 训练设备类型，'cpu'或'cuda'
        :param seed: 随机种子，默认为 -1，设置用于复现实验结果
        :param epochs: 训练轮数
        :param batch_size: 批处理大小：大小决定 GPU/CPU 内存的占用
        :param lr: 初始学习率
        :param window_size: 滑动窗口大小，用于生成时间序列样本，默认30：越大 => 时间序列越长，越难预测
        :param step: 滑动步长，默认5：越大 => 时间序列越长，越难预测
        :param is_debug: 是否开启调试模式
        :param model_save: 模型保存配置
        :param classify_name: 分类标签列名
        :param classify_loader: 类别标签映射
        :param args: 可变长度参数列表
        :param kwargs: 关键字参数，可接受：
            - features (list): 自定义特征列表
            - detector (nn.Module): 自定义检测器类
            - weight_decay (float): 优化器权重衰减参数
        """
        self.device = torch.device("cuda" if torch.cuda.is_available() and device == 'cuda' else "cpu")
        self.seed = seed if seed >= 0 else int(str(SeedSequence().entropy)[-9:])
        self.epochs = epochs
        self.batch_size = batch_size
        self.lr = lr
        self.window_size = window_size
        self.step = step
        self.is_debug = is_debug
        self.model_config = model_creator_config
        self.scaler_save_path = self.model_config.pkl_path
        self.model_save_path = self.model_config.pth_path
        self.model_static_path = self.model_config.static_pth_path
        self.features_columns = self.model_config.features_columns
        self.plot_train_save_path = model_plot_saver.plot_train_save_path
        self.plot_cm_save_path = model_plot_saver.plot_cm_save_path
        self.plot_pr_save_path = model_plot_saver.plot_pr_save_path
        self.classify_name = " " + classify_name.replace(" ", "")
        self.classify_loader = None
        if isinstance(classify_loader, str):
            self.classify_loader = ['Benign', classify_loader]
        elif isinstance(classify_loader, list):
            self.classify_loader = classify_loader
        elif classify_loader is None:
            self.classify_loader = ['Benign', 'Attack']
        else:
            raise ValueError("classify_loader参数类型错误")
        self.args = args
        self.kwargs = kwargs

        self.features = [
            # 基础流量特征
            'Flow Duration',
            'Total Fwd Packets',
            'Total Backward Packets',
            'Total Length of Fwd Packets',
            'Total Length of Bwd Packets',

            # 流量速率特征
            'Flow Bytes/s',
            'Flow Packets/s',

            # 时间间隔特征
            'Flow IAT Mean',
            'Flow IAT Std',
            'Fwd IAT Mean',
            'Bwd IAT Mean',

            # 协议标志特征
            'Fwd PSH Flags',
            'Bwd PSH Flags',
            'SYN Flag Count',
            'ACK Flag Count',
            'URG Flag Count',

            # 窗口/缓冲区特征
            'Init_Win_bytes_forward',  # 原Init Fwd Win Bytes
            'Init_Win_bytes_backward',  # 原Init Bwd Win Bytes

            # 新增有效特征
            'Packet Length Mean',  # 数据包长度均值
            'Packet Length Std',  # 数据包长度标准差
            'Active Mean',  # 活跃时间统计
            'Idle Mean',  # 空闲时间统计
            'Down/Up Ratio'  # 上下行流量比
        ] if not self.kwargs.get('features') else self.kwargs.get('features')

        self.detector = self.kwargs.get('detector', DDoSDetector)

        # 数据处理内容
        self.df: DataFrame | None = None
        self.X_seq: np.ndarray | None = None
        self.Y_seq: np.ndarray | None = None

        # 划分数据集 [X_train, X_test, X_val, Y_train, Y_test, Y_val]
        self.X_train: np.ndarray | None = None
        self.X_test: np.ndarray | None = None
        self.X_val: np.ndarray | None = None
        self.Y_train: np.ndarray | None = None
        self.Y_test: np.ndarray | None = None
        self.Y_val: np.ndarray | None = None

        # 模型
        self.model: nn.Module | None = None
        self.criterion: nn.Module | None = None
        self.optimizer: torch.optim.Optimizer | None = None
        self.scheduler: torch.optim.lr_scheduler.ReduceLROnPlateau | None = None
        self.pos_weight: torch.Tensor | None = None

        # 数据加载器
        self.train_loader: torch.utils.data.DataLoader | None = None
        self.val_loader: torch.utils.data.DataLoader | None = None
        self.test_loader: torch.utils.data.DataLoader | None = None

        # 训练过程记录
        self.best_val_loss: float = float('inf')
        self.history = {'train_loss': [], 'val_loss': []}

        # 模型加载
        self.data_loader: DataLoader | None = kwargs.get('data_loader', DataLoader())

        # 模型保存
        self.model_creator: ModelCreator | None = None

        self._init_model_()

    def _init_model_(self):
        """
        初始化模型
        :return: None
        """
        torch.manual_seed(self.seed)
        np.random.seed(self.seed)
        logger.remove()
        logger_format = "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <6}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
        logger.remove()
        # 创建全局日志处理器实例
        logger.add(sys.stderr, format=logger_format, level="DEBUG" if self.is_debug else "INFO")
        from ShieldNet import stream_log_handler
        logger.add(stream_log_handler.write, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <7}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>")

        # PLT 设置中文
        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.rcParams['axes.unicode_minus'] = False

        # 检查GPU
        logger.info(f"训练使用设备: {"NVIDIA GPU" if self.device.type == 'cuda' else "CPU"}")
        logger.info("初始化训练模型完毕")

        logger.info("本次训练使用全部参数：")
        logger.info(f"\t随机种子: {self.seed}")
        logger.info(f"\t训练轮数: {self.epochs}")
        logger.info(f"\t批处理大小: {self.batch_size}")
        logger.info(f"\t初始学习率: {self.lr}")
        logger.info(f"\t滑动窗口大小: {self.window_size}")
        logger.info(f"\t滑动步长: {self.step}")
        logger.info(f"\t预应用特征列: {self.features}")
        logger.info(f"\t分类标签列名: {self.classify_name}")
        logger.info(f"\t分类标签映射: {self.classify_loader}")
        logger.info(f"\t模型保存路径: {self.model_save_path}")
        logger.info(f"\t模型静态图保存路径: {self.model_static_path}")
        logger.info(f"\t训练过程记录保存路径: {self.plot_train_save_path}")
        logger.info(f"\t混淆矩阵保存路径: {self.plot_cm_save_path}")
        logger.info(f"\tPR曲线保存路径: {self.plot_pr_save_path}")
        logger.info(f"\t其他参数: {self.kwargs}")

    def _predict_single(self, model, features, scaler, window_size=30) -> np.ndarray:
        """
        预测单条流量（修正维度问题）
        Args:
            model (nn.Module): 训练好的模型
            features (np.ndarray): 单条流量特征，形状为(n_features,)或(1, n_features)
            scaler (StandardScaler): 已拟合的标准化器
            window_size (int, optional): 与训练时一致的窗口大小. Defaults to 30.
        Returns:
            np.ndarray: 预测概率值，范围[0,1]
        Raises:
            ValueError: 输入特征维度不正确时抛出
        """
        logger.info("开始预测单条流量")
        # 1. 预处理
        features = np.array(features).reshape(1, -1)
        scaled = scaler.transform(features)  # [1, features]

        # 2. 构建时序数据（模拟滑动窗口）
        # 复制相同特征模拟历史数据（实际应用需真实历史窗口）
        seq = np.tile(scaled, (window_size, 1))  # [window_size, features]
        seq = seq[np.newaxis, ...]  # 添加batch维度 -> [1, window_size, features]

        # 3. 预测
        model.eval()
        with torch.no_grad():
            input_tensor = torch.FloatTensor(seq).to(self.device)
            print(f"输入张量形状: {input_tensor.shape}")  # 调试用
            prob = model(input_tensor).item()
        logger.info("预测结果：{}", prob)
        return prob

    def _evaluate(self, model: nn.Module, dataloader: torch.utils.data.DataLoader, criterion: nn.Module) -> dict[
        str, float]:
        """
        验证模型
        :param model: 模型
        :param dataloader: 数据集
        :param criterion: 损失函数
        :return: 字典，包含loss、report、cm
        """
        logger.info("开始评估模型")
        start_time = datetime.now()
        model.eval()
        y_true, y_pred, total_loss = [], [], 0
        with torch.no_grad():
            for X_batch, y_batch in tqdm(dataloader, desc="模型评估"):
                X_batch, y_batch = X_batch.to(self.device), y_batch.to(self.device)
                outputs = model(X_batch).squeeze()
                loss = criterion(outputs, y_batch)
                total_loss += loss.item()
                y_true.extend(y_batch.cpu().numpy())
                y_pred.extend((outputs.cpu().numpy() > 0.5).astype(int))
        loss = total_loss / len(dataloader)
        end_time = datetime.now()
        logger.info("验证模型用时：{}", end_time - start_time)
        logger.info("验证集平均损失：{}", loss)
        return {
            'loss': loss,
            'report': classification_report(y_true, y_pred, digits=4),
            'cm': confusion_matrix(y_true, y_pred)
        }

    def _train_epoch(self, model: nn.Module, dataloader: torch.utils.data.DataLoader, optimizer: torch.optim.Optimizer,
                     criterion: nn.Module, now_epoch: int) -> float:
        """
        训练一个epoch
        :param model: 模型
        :param dataloader: 数据集
        :param optimizer: 优化器
        :param criterion: 损失函数
        :return: 平均损失
        """
        logger.info("Epoch分支: 开始训练")
        start_time = datetime.now()
        model.train()
        total_loss = 0
        for X_batch, y_batch in tqdm(dataloader, desc=f"模型训练 {now_epoch} - {self.epochs}"):
            X_batch, y_batch = X_batch.to(self.device), y_batch.to(self.device)
            optimizer.zero_grad()
            outputs = model(X_batch).squeeze()
            loss = criterion(outputs, y_batch)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        end_time = datetime.now()
        result = total_loss / len(dataloader)
        logger.info("Epoch分支: 训练用时：{}", end_time - start_time)
        logger.info("Epoch分支: 训练集平均损失：{}", result)
        return result

    def load_data(self, dataset: np.ndarray | DataFrame | str) -> 'ModelTrainer':
        """
        加载数据
        :param dataset: 需要将非DataFrame数据转换为DataFrame
        :return: ModelTrainer
        """
        logger.info("开始加载数据")
        self.df = self.data_loader.load_data(dataset)

        logger.info("数据加载完毕")
        logger.debug("检测表头是否存在")
        if self.classify_name not in self.df.columns:
            logger.error(f"列 '{self.classify_name}' 不存在！可用列：{self.df.columns.tolist()}")
            raise ValueError("目标列缺失")

        logger.debug(f"原始类别分布:\n{self.df[self.classify_name].value_counts()}")

        # 二分类标签处理
        self.df['target'] = (
            self.df[self.classify_name]
            .map({self.classify_loader[0]: 0, self.classify_loader[1]: 1})
            .fillna(0)  # 填充未映射值和原始NA
            .astype(np.int8)
        )

        logger.debug(f"处理后标签分布:\n{self.df['target'].value_counts()}")
        # 更新列名
        logger.info("更新特征列")
        self.features = [col for col in self.features if col in self.df.columns]

        # 新增映射验证
        original_labels = self.df[self.classify_name].unique()
        if set(original_labels) - set(self.classify_loader):
            logger.warning(f"发现未映射标签: {set(original_labels) - set(self.classify_loader)}")

        logger.info("映射后标签分布:\n{}", self.df['target'].value_counts())
        logger.info("数据加载完毕")

        return self

    def deal_with(self) -> 'ModelTrainer':
        """
        加载文件后数据处理
        :return: ModelTrainer
        """
        logger.info("开始处理数据")
        start_time = datetime.now()
        # 处理特殊值
        X = self.df[self.features].replace([np.inf, -np.inf], np.nan).fillna(0).values
        y = self.df['target'].values
        # 标准化
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X)
        # 保存标准化参数
        joblib.dump(scaler, self.scaler_save_path)  # 保存标准化器
        logger.debug("特征矩阵形状:", X_scaled.shape)
        # 保存特征名
        self.features_columns = self.model_config.features_columns = self.features
        logger.debug("特征列保存完毕")
        # 生成序列数据
        self.X_seq, self.Y_seq = create_sequences(X_scaled, y)
        logger.debug("序列数据形状:", self.X_seq.shape, self.Y_seq.shape)

        # 新增标签验证
        unique_labels = np.unique(y)
        if len(unique_labels) < 2:
            missing_label = 1 if 0 in unique_labels else 0
            logger.error(f"严重错误：数据中缺少类别 {missing_label}，当前标签：{unique_labels}")
            raise ValueError("数据缺失必要类别")
        end_time = datetime.now()
        logger.info("数据处理完毕，用时：{}", end_time - start_time)
        logger.info("数据处理完毕")
        return self

    def divide_dataset(self) -> 'ModelTrainer':
        """
        划分数据集
        :return: ModelTrainer
        """
        logger.info("开始划分数据集")
        try:
            self.X_train, self.X_test, self.Y_train, self.Y_test = train_test_split(
                self.X_seq, self.Y_seq,
                test_size=0.2,
                random_state=self.seed,
                stratify=self.Y_seq  # 确保分层有效
            )
            self.X_train, self.X_val, self.Y_train, self.Y_val = train_test_split(
                self.X_train, self.Y_train,
                test_size=0.25,
                random_state=self.seed,
                stratify=self.Y_train
            )
            logger.info(f"数据集划分结果:")
            logger.info(f"训练集: {self.X_train.shape[0]} samples")
            logger.info(f"验证集: {self.X_val.shape[0]} samples")
            logger.info(f"测试集: {self.X_test.shape[0]} samples")
            logger.info("数据集划分完毕")
            return self
        except ValueError as e:
            logger.error("分层抽样失败，请检查数据分布")
            logger.error(f"详细错误: {str(e)}")
            raise ValueError("数据划分失败")

    def init_model(self, mode: Literal['min', 'max'] = 'min', patience: int = 2, factor: float = 0.5) -> 'ModelTrainer':
        """
        初始化模型
        :param mode: 学习率下降模式
        :param patience: 学习率下降延迟
        :param factor: 学习率下降因子
        :return: ModelTrainer
        """
        logger.info("开始初始化模型")
        start_time = datetime.now()

        self.model = self.detector(input_dim=len(self.features)).to(self.device)

        # 计算类别权重
        logger.info("计算类别权重")
        class_0 = np.sum(self.Y_train == 0)
        class_1 = np.sum(self.Y_train == 1)

        if class_1 == 0 or class_0 == 0:
            logger.error("训练集缺少必要类别，请检查数据划分或预处理")
            logger.error(f"类别分布 - 0: {class_0}, 1: {class_1}")
            raise ValueError("无效的类别分布")

        self.pos_weight = torch.tensor([class_0 / class_1]).to(self.device)
        self.criterion = nn.BCEWithLogitsLoss(pos_weight=self.pos_weight)
        # 优化器
        logger.info("初始化优化器")
        self.optimizer = torch.optim.AdamW(self.model.parameters(),
                                           lr=self.lr,
                                           weight_decay=self.kwargs.get('weight_decay', 1e-5))
        logger.info("初始化优化器完毕")
        logger.info("初始化学习率下降策略")
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode=mode, patience=patience, factor=factor
        )
        logger.info("初始化学习率下降策略完毕")

        self.train_loader = create_dataloader(self.X_train, self.Y_train, batch_size=self.batch_size, shuffle=True)
        self.val_loader = create_dataloader(self.X_val, self.Y_val, batch_size=self.batch_size)
        self.test_loader = create_dataloader(self.X_test, self.Y_test, batch_size=self.batch_size)
        end_time = datetime.now()
        logger.info("模型初始化完毕，用时：{}", end_time - start_time)
        logger.info("数据加载器初始化完毕")
        return self

    def train(self) -> 'ModelTrainer':
        """
        训练模型
        :return: ModelTrainer
        """
        try:
            logger.info("开始训练模型")
            start_time = datetime.now()
            for epoch in range(self.epochs):
                # 训练
                train_loss = self._train_epoch(self.model, self.train_loader, self.optimizer, self.criterion, epoch + 1)
                self.history['train_loss'].append(train_loss)

                # 验证
                val_results = self._evaluate(self.model, self.val_loader, self.criterion)
                val_loss = val_results['loss']
                self.history['val_loss'].append(val_loss)
                self.scheduler.step(val_loss)

                # 打印信息
                logger.info(f"训练轮数: {epoch + 1}/{self.epochs}")
                logger.info(f"训练损失: {train_loss:.8f} | 验证损失: {val_loss:.8f}")
                logger.info(f"验证集结果:\n{val_results['report']}")

                # 保存最佳模型
                if val_loss < self.best_val_loss:
                    self.best_val_loss = val_loss
                    self.save_model()
                    logger.info("更新保存最佳模型")
            logger.info("模型训练完毕")
            end_time = datetime.now()
            logger.info("训练用时：{}", end_time - start_time)
            return self
        except KeyboardInterrupt:
            logger.warning("手动停止训练")
            return self

    def save_model(self) -> None:
        """
        保存模型
        :return: None
        """
        torch.save(self.model, self.model_save_path)
        torch.save(self.model.state_dict(), self.model_static_path)
        logger.info(f"模型保存成功: {self.model_save_path}")
        logger.info(f"权重模型保存成功: {self.model_static_path}")

        # 开始保存合并模型文件，包括pth, pkl, features_columns
        self.model_creator = ModelCreator(self.model_config)
        self.model_creator.create()
        logger.info(f"模型配置文件保存成功: {self.model_config.model_save_path}")

    def show_train_plot(self) -> 'ModelTrainer':
        """
        可视化并保存训练过程损失曲线
        Returns:
            ModelTrainer: 返回self以支持链式调用
        Note:
            生成的图片会保存到model_save配置的plot_train路径
            图片格式为PNG，分辨率300dpi
        """
        plt.figure(figsize=(10, 5))
        plt.plot(self.history['train_loss'], label='训练集损失')
        plt.plot(self.history['val_loss'], label='验证集损失')
        plt.title("训练过程损失")
        plt.xlabel("训练轮数")
        plt.ylabel("损失值")
        plt.legend()  # 显示图例
        plt.savefig(self.plot_train_save_path, dpi=300, bbox_inches='tight')
        plt.close()  # 修复：关闭当前figure释放内存
        return self

    def model_evaluate(self) -> 'ModelTrainer':
        """
        在测试集上评估模型性能并保存混淆矩阵
        Returns:
            ModelTrainer: 返回self以支持链式调用
        Note:
            生成的混淆矩阵会保存到model_save配置的plot_cm路径
            图片格式为PNG，分辨率300dpi
        """
        self.model.load_state_dict(torch.load(self.model_static_path))
        test_results = self._evaluate(self.model, self.test_loader, self.criterion)
        logger.info(f"测试结果:\n{test_results['report']}")
        # 混淆矩阵
        plt.figure(figsize=(6, 6))
        sns.heatmap(test_results['cm'], annot=True, fmt='d',
                    xticklabels=['正常流量', '攻击流量'],
                    yticklabels=['正常流量', '攻击流量'])
        plt.title("混淆矩阵")
        plt.savefig(self.plot_cm_save_path, dpi=300, bbox_inches='tight')
        plt.close()  # 修复：关闭当前figure释放内存
        return self

    def plot_pr_curve(self) -> 'ModelTrainer':
        """
        绘制并保存精确率-召回率曲线(PR Curve)
        Returns:
            ModelTrainer: 返回self以支持链式调用
        Note:
            生成的图片会保存到model_save配置的plot_pr_curve路径
            图片格式为PNG，分辨率300dpi
        """
        self.model.eval()
        y_true, y_probs = [], []
        with torch.no_grad():
            for X_batch, y_batch in self.test_loader:
                outputs = self.model(X_batch.to(self.device)).squeeze()
                y_true.extend(y_batch.numpy())
                y_probs.extend(outputs.cpu().numpy())

        precision, recall, _ = precision_recall_curve(y_true, y_probs)
        auprc = auc(recall, precision)

        plt.figure(figsize=(8, 6))
        plt.plot(recall, precision, label=f'AUC = {auprc:.4f}')
        plt.xlabel('回归召回率')
        plt.ylabel('回归精确率')
        plt.title('PR曲线')
        plt.legend()
        plt.savefig(self.plot_pr_save_path, dpi=300, bbox_inches='tight')
        plt.close()  # 修复：关闭当前figure释放内存
        return self

    def clear_model(self, is_clear_pth_and_pkl: bool = True):
        """
        用于训练结束回收内存，防止内存泄漏，清除模型
        """
        del self.model
        if is_clear_pth_and_pkl:
            self.model_creator.remove_temp()
        self.__dict__.clear()
        gc.collect()
        torch.cuda.empty_cache()
        logger.info("模型训练结束，回收内存")
