"""
本文件修改自 perfd 项目中的 oracle/learn/mdn.py
[开源代码地址](https://github.com/perfd/perfd)

参考文献：
[1] Silvery Fu等. 机器学习在黑盒系统性能预测中的应用. NSDI 2021
"""
from typing import List
from typing import Tuple

import numpy as np
import torch
from torch import distributions as D  # PyTorch概率分布模块
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from torch.utils.data import random_split
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping

from .base import GMM
from .base import GMMPredictor


class _MDN(nn.Module):
    """
    混合密度网络的核心模型实现
    
    网络结构：
    1. 基础网络：两层全连接层+ReLU激活
    2. 输出层：分别预测混合权重(alphas)、均值(mus)和标准差(sigmas)
    """
    def __init__(self, num_input, neurons: int = 50, components: int = 2, **kwargs):
        """
        初始化网络结构
        
        Args:
            num_input: 输入特征维度
            neurons: 隐藏层神经元数量
            components: 高斯混合模型的组件数量
        """
        super().__init__(**kwargs)
        # 基础网络：特征提取器
        self._base = nn.Sequential(
            nn.Linear(num_input, neurons),
            nn.ReLU(),
            nn.Linear(neurons, neurons),
            nn.ReLU(),
        )
        # 三个输出头，分别预测混合权重、均值和标准差
        self._alphas = nn.Linear(neurons, components)  # 混合权重
        self._mus = nn.Linear(neurons, components)     # 均值
        self._sigmas = nn.Linear(neurons, components)  # 标准差

    def forward(
        self, data: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        前向传播计算
        
        Returns:
            (alphas, mus, sigmas): 混合权重、均值和标准差
        """
        latent = self._base(data)  # 特征提取
        # 混合权重通过softmax确保和为1
        alphas = F.softmax(self._alphas(latent), dim=-1)
        # 均值直接输出
        mus = self._mus(latent)
        # 标准差通过elu+1确保为正
        sigmas = F.elu(self._sigmas(latent)) + 1 + 1e-6
        return alphas, mus, sigmas


class MDN(pl.LightningModule):
    """
    PyTorch Lightning版本的混合密度网络
    用于简化训练流程和提供更好的工程实践
    """

    def __init__(self, learning_rate: float = 1e-3, **kwargs):
        """
        初始化MDN模型
        
        Args:
            learning_rate: 学习率
            **kwargs: 传递给_MDN的参数
        """
        super().__init__()
        self._model = _MDN(**kwargs)
        self._learning_rate = learning_rate

    def forward(
        self, data: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """前向传播，直接调用底层模型"""
        return self._model(data)

    def loss(self, data: torch.Tensor, target: torch.Tensor):
        """
        计算损失函数
        
        使用负对数似然作为损失函数：
        1. 构建高斯混合分布
        2. 计算目标值的对数似然
        3. 取负均值作为损失
        """
        alphas, mus, sigmas = self.forward(data)
        # 构建正态分布组件
        comp = D.Normal(loc=mus, scale=sigmas)
        # 构建类别分布（混合权重）
        mix = D.Categorical(alphas)
        # 组合成高斯混合模型
        gmm = D.MixtureSameFamily(mix, comp)
        # 计算对数似然
        likelihood = gmm.log_prob(target)
        return -likelihood.mean()

    def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], *args):
        """训练步骤"""
        data, target = batch
        loss = self.loss(data=data, target=target)
        self.log("train_loss", loss)
        return loss

    def validation_step(self, batch: Tuple[torch.Tensor, torch.Tensor], *args):
        """验证步骤"""
        data, target = batch
        loss = self.loss(data=data, target=target)
        self.log("val_loss", loss)
        return loss

    def configure_optimizers(self):
        """配置优化器"""
        return torch.optim.Adam(self._model.parameters(), lr=self._learning_rate)


class TorchGMM(GMM):
    """
    PyTorch实现的高斯混合模型
    封装PyTorch的概率分布功能，提供采样接口
    """

    def __init__(
        self,
        alphas: torch.Tensor,  # 混合权重
        mus: torch.Tensor,     # 均值
        sigmas: torch.Tensor,  # 标准差
    ):
        """构建GMM分布"""
        comp = D.Normal(loc=mus, scale=sigmas)  # 正态分布组件
        mix = D.Categorical(alphas)             # 混合权重分布
        gmm = D.MixtureSameFamily(mix, comp)    # 组合成GMM
        self._model = gmm

    def sample(self, n_samples: int = 1) -> np.ndarray:
        """从分布中采样"""
        return self._model.sample((n_samples,)).numpy()


class MDNPredictor(GMMPredictor):
    """
    基于混合密度网络的预测器
    提供完整的训练和预测接口
    """

    # 默认训练参数
    DEFAULT_TRAIN_PARAMS = dict(
        logger=False,                # 不使用日志记录器
        enable_checkpointing=False,  # 不保存检查点
        enable_progress_bar=False,   # 不显示进度条
        max_epochs=1000,            # 最大训练轮数
    )

    def __init__(
        self,
        batch_size: int = 128,      # 批次大小
        train_ratio: float = 0.8,   # 训练集比例
        train_params: dict = None,  # 训练参数
        model_params: dict = None,  # 模型参数
        **kwargs
    ):
        """初始化预测器"""
        super().__init__(**kwargs)
        self._batch_size = batch_size
        self._train_ratio = train_ratio
        # 合并默认训练参数和用户指定参数
        if train_params is None:
            train_params = {}
        self._train_params = {**self.DEFAULT_TRAIN_PARAMS, **train_params}
        self._model_params = model_params if model_params else {}
        self._model: MDN = None

    def train(self, train_x: np.ndarray, train_y: np.ndarray):
        """
        训练模型
        
        工作流程：
        1. 准备数据集（训练集和验证集）
        2. 初始化模型
        3. 配置训练器（包括早停策略）
        4. 执行训练
        """
        # 准备数据集
        num_samples, num_input = train_x.shape
        num_train = int(num_samples * self._train_ratio)
        data = TensorDataset(torch.FloatTensor(train_x), torch.FloatTensor(train_y))
        # 划分训练集和验证集
        data_train, data_val = random_split(
            data,
            lengths=[num_train, num_samples - num_train],
            generator=torch.Generator().manual_seed(self._seed) if self._seed else None,
        )

        # 初始化模型和训练器
        self._model = MDN(num_input=num_input, **self._model_params)
        trainer = pl.Trainer(
            callbacks=[
                # 使用早停策略防止过拟合
                EarlyStopping(monitor="val_loss", min_delta=0, patience=5, mode="min")
            ],
            **self._train_params,
        )
        # 执行训练
        trainer.fit(
            self._model,
            train_dataloaders=DataLoader(data_train),
            val_dataloaders=DataLoader(data_val),
        )

    def predict(self, test_x: np.ndarray) -> List[GMM]:
        """
        生成预测结果
        
        Returns:
            List[GMM]: 每个测试样本对应的高斯混合模型
        """
        # 准备测试数据加载器
        data_test = DataLoader(
            TensorDataset(torch.FloatTensor(test_x)), batch_size=self._batch_size
        )
        gmms: List[GMM] = []
        # 对每个批次进行预测
        for (data,) in data_test:
            output = self._model.forward(data)
            # 将预测结果转换为GMM对象
            for alphas, mus, sigmas in zip(*[item.cpu() for item in output]):
                gmms.append(TorchGMM(alphas=alphas, mus=mus, sigmas=sigmas))
        return gmms
