import os
import json
import tensorflow as tf
import numpy as np
from tqdm import tqdm

from tensorflow.keras.layers import Input, AveragePooling1D, MaxPooling1D
from tensorflow.keras.models import Model, model_from_json, Sequential
from tensorflow.data import Dataset
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import Callback, LearningRateScheduler
from normal_distribution import Normal
from utils import flatten_to_ndims, unflatten_from_ndims
import pywt as pwt

from conv1d import conv1d, deconv1d
from feature_fusion import CFM

class MSTVAEModel(Model):
    def __init__(self, cfg):
        super(MSTVAEModel, self).__init__()

        # 加载配置
        self.cfg = cfg

        # 初始化交叉注意力融合模块
        self.cfm = CFM(self.cfg['hidden_size'])

        # 初始化编码器和解码器
        self.encoder = self.encoder()
        self.decoder = self.decoder()
        
        # 存储每个epoch的平均损失
        self.average_epoch_eval = dict()

        # 构建模型输入形状
        self.build(input_shape=(1, self.cfg['window_size'], self.cfg['x_dim']))
    
    def encoder(self):
        # 定义编码器的输入层
        inputs = Input(shape=(self.cfg['window_size'], self.cfg['x_dim']))
        # 特征提取和融合模块 生成特征表示
        feature = self.feature_extraction_fusion(inputs)
        # 推理网络 计算潜在变量的均值和对数标准差
        qz_mean, qz_logstd = self.qnet(feature)
        # 构建编码器 输入为inputs，输出为潜变量的均值和对数标准差
        model = Model(inputs, (qz_mean, qz_logstd))

        return model

    def decoder(self):
        # 定义解码器的输入层
        inputs = Input(shape=(self.cfg['z_dim'], self.cfg['x_dim']))
        # 生成网络 计算重构输入数据的均值和对数标准差
        px_mean, px_logstd = self.pnet(inputs)
        # 构建解码器 输入为inputs，输出为重构数据的均值和对数标准差
        model = Model(inputs, (px_mean, px_logstd))

        return model

    def feature_extraction_fusion(self, x):
        '''
        消融实验：
            消融TFM + CFM
            消融FFM + CFM
            消融CFM (改用concat)
        '''
        
        # 时域特征提取模块（TFM）
        print(f"时域特征提取输入 x:{x.shape}")
        time_feature = self.time_feature_extraction(x)
        print(f"时域特征提取输出 time_feature:{time_feature.shape}")
        
        # 频域特征提取模块（FFM）
        print(f"频域特征提取输入 x:{x.shape}")
        frequency_feature = self.frequency_feature_extraction(x)
        print(f"频域特征提取输出 frequency_feature:{frequency_feature.shape}")
        feature_fusion = frequency_feature

        # 特征融合模块（CFM）
        feature_fusion = self.cfm(time_feature, frequency_feature)
        print(f"跨域特征融合输出 feature_fusion:{feature_fusion.shape}")
        
        return feature_fusion

    def qnet(self, feature_fusion):
        '''
        推理网络实现
        '''

        # 特征降维层
        print(f"特征降维输入 feature_fusion:{feature_fusion.shape}")
        # 降维通道数
        x = conv1d(feature_fusion, kernel_size=1, strides=1,
                             filters=self.cfg['x_dim'], activation='relu',
                             kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))
        # AveragePooling1D 来进一步降低时间步长
        x = AveragePooling1D(pool_size=self.cfg['pool_size'], strides=self.cfg['strides'])(x) # 与生成网络反卷积层对应
        print(f"特征降维输出 x:{x.shape}")

        # 特征映射层（均值和对数标准差，通过模型去学习，而不是依据固定的统计公式）
        print(f"特征映射层输入 x:{x.shape}")
        qz_mean = conv1d(x, kernel_size=1, filters=self.cfg['x_dim'])
        qz_logstd = conv1d(x, kernel_size=1, filters=self.cfg['x_dim'])
        qz_logstd = tf.clip_by_value(qz_logstd,
                                     clip_value_min=self.cfg['logstd_min'],
                                     clip_value_max=self.cfg['logstd_max'])
        print(f"特征映射层输出 qz_mean:{qz_mean.shape},qz_logstd:{qz_logstd.shape}")

        # 返回输入数据的均值和对数标准差
        return qz_mean, qz_logstd

    def pnet(self, z):
        '''
        生成网络实现
        '''

        # 反卷积层逐步上采样时间步长
        print(f"反卷积层输入 z:{z.shape}")
        x = deconv1d(
            z,
            filters=self.cfg['x_dim'], 
            kernel_size=self.cfg['pool_size'],  # 与推理网络池化层对应
            strides=self.cfg['strides'],        # 池化步幅为5，则反卷积步幅也为5
            padding='valid', 
            activation='relu',
            kernel_regularizer=regularizers.l2(self.cfg['l2_reg'])
        )
        print(f"反卷积层输出 x:{x.shape}")

        # 最终维度调整（匹配原始输入的通道数）
        px_mean = conv1d(x, 
                        kernel_size=1, 
                        filters=self.cfg['x_dim'], 
                        activation='linear',  # 无激活函数，直接输出均值
                        kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))
        px_logstd = conv1d(x, 
                        kernel_size=1, 
                        filters=self.cfg['x_dim'], 
                        activation='linear', 
                        kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))
        px_logstd = tf.clip_by_value(px_logstd,
                                    clip_value_min=self.cfg['logstd_min'],
                                    clip_value_max=self.cfg['logstd_max'])

        return px_mean, px_logstd

    def time_feature_extraction(self, x):
        '''
        时域特征提取(TFM)
        '''

        residual = x  # 保存原始输入用于残差连接

        # BatchNorm
        x = tf.keras.layers.BatchNormalization()(x)

        # 1×1卷积调整通道数
        x = conv1d(x, 
                kernel_size=1, 
                filters=self.cfg['adjusted_channels'], 
                strides=1,
                activation='relu', 
                padding='same',
                kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 多尺度特征提取
        # 短尺度特征（kernel_size=3）
        t_short = x
        for _ in range(self.cfg['short_scale_layers']):
            t_short = conv1d(t_short, 
                            kernel_size=3, 
                            filters=self.cfg['short_scale_filters'], 
                            strides=1,
                            activation='relu', 
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 长尺度特征（kernel_size=15）
        t_long = x
        for _ in range(self.cfg['long_scale_layers']):
            t_long = conv1d(t_long, 
                            kernel_size=15, 
                            filters=self.cfg['long_scale_filters'], 
                            strides=1,
                            activation='relu', 
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 拼接多尺度特征
        concat = tf.concat([t_short, t_long], axis=-1)

        # 1×1卷积整合特征
        concat = conv1d(concat, 
                        kernel_size=1, 
                        filters=self.cfg['final_channels'], 
                        strides=1,
                        activation='relu', 
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 残差连接
        if residual.shape[-1] != self.cfg['final_channels']:
            residual = conv1d(residual, 
                            kernel_size=1, 
                            filters=self.cfg['final_channels'], 
                            strides=1,
                            activation='linear',  # 无激活函数
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))
        output = tf.keras.layers.Add()([residual, concat])

        return output

    def frequency_feature_extraction(self, x):
        '''
        频域特征提取(FFM)
        '''

        # 对每个时间序列进行傅里叶变换
        # tf.signal.fft 需要的是复数类型的输入，所以需要将数据转换为复数格式
        complex_data = tf.complex(x, tf.zeros_like(x))

        # 傅里叶变换
        frequency_domain_data = tf.signal.fft(complex_data)
        
        # 提取虚部和实部
        real_part = tf.math.real(frequency_domain_data)
        imag_part = tf.math.imag(frequency_domain_data)
        print(f"频域实部维度 real_part:{real_part.shape}")
        print(f"频域虚部维度 imag_part:{imag_part.shape}")

        # 沿着最后一个维度（特征维度）拼接实部和虚部
        x = tf.concat([real_part, imag_part], axis=-1)
        print(f"拼接后的维度 concat:{x.shape}")

        residual = x  # 保存原始输入用于残差连接

        # BatchNorm
        x = tf.keras.layers.BatchNormalization()(x)

        # 1×1卷积调整通道数
        x = conv1d(x, 
                kernel_size=1, 
                filters=self.cfg['adjusted_channels'], 
                strides=1,
                activation='relu', 
                padding='same',
                kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 多尺度特征提取
        # 短尺度特征（kernel_size=2）
        t_short = x
        for _ in range(self.cfg['short_scale_layers']):
            t_short = conv1d(t_short, 
                            kernel_size=3, 
                            filters=self.cfg['short_scale_filters'], 
                            strides=1,
                            activation='relu', 
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 长尺度特征（kernel_size=7）
        t_long = x
        for _ in range(self.cfg['long_scale_layers']):
            t_long = conv1d(t_long, 
                            kernel_size=15, 
                            filters=self.cfg['long_scale_filters'], 
                            strides=1,
                            activation='relu', 
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 拼接多尺度特征
        concat = tf.concat([t_short, t_long], axis=-1)

        # 1×1卷积整合特征
        concat = conv1d(concat, 
                        kernel_size=1, 
                        filters=self.cfg['final_channels'], 
                        strides=1,
                        activation='relu', 
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))

        # 残差连接
        if residual.shape[-1] != self.cfg['final_channels']:
            residual = conv1d(residual, 
                            kernel_size=1, 
                            filters=self.cfg['final_channels'], 
                            strides=1,
                            activation='linear',  # 无激活函数
                            padding='same',
                            kernel_regularizer=regularizers.l2(self.cfg['l2_reg']))
        output = tf.keras.layers.Add()([residual, concat])

        return output

    def sgvb_loss(self, inputs, px_dist, qz_samples, prior_dist, posterior_dist):
        """
        变分推断的目标函数: 最大化 ELBO = Eq(z|x)[logpx_z + logpz - logqz_x]

        inputs: 真实数据输入
        px_dist: 生成模型的分布，计算重构误差
        qz_samples: 从后验分布中采样的潜在变量
        prior_dist: 潜在变量的先验分布
        posterior_dist: 潜在变量的后验分布
        """
        # 重构误差, 表示输入 inputs 在潜在变量 z 下的对数似然

        logpx_z = px_dist.log_prob(inputs)
        # 潜变量在先验分布下的对数似然
        logpz = prior_dist.log_prob(qz_samples)
        # 潜变量在后验分布下的对数似然
        logqz_x = posterior_dist.log_prob(qz_samples)
        # 重构误差的平均值
        recons_term = tf.reduce_mean(logpx_z)
        # KL散度，衡量后验分布与先验分布之间的差异
        kl_term = tf.reduce_mean(logqz_x - logpz)
        # 返回-ELBO（最大化ELBO，相当于最小化负ELBO），同时还返回重构项和KL散度的平均值。
        # 原值0.2
        return -tf.reduce_mean(logpx_z + 0.2 * (logpz - logqz_x)), recons_term, kl_term

    def get_config(self):
        # 获取当前模型配置
        config = {"cfg": self.cfg}
        return config

    @classmethod
    def from_config(cls, config):
        return cls(**config)

    @tf.function
    def train_step(self, inputs):
        '''
        定义训练步骤, 用于训练VAE模型
        '''
        with tf.GradientTape() as encoder_tape, tf.GradientTape() as decoder_tape:
            # 通过编码器生成隐变量的均值和对数标准差
            qz_mean, qz_logstd = self.encoder(inputs, training=True)

            # 创建后验分布 posterior_dist 并从中采样得到潜变量样本 qz_samples
            posterior_dist = Normal(mean=qz_mean, logstd=qz_logstd, group_ndims=2)
            qz_samples = posterior_dist.sample()

            # 创建先验分布 prior_dist，均值和对数标准差均为零
            prior_dist = Normal(mean=tf.zeros([self.cfg['z_dim'], self.cfg['x_dim']]),
                                logstd=tf.zeros([self.cfg['z_dim'], self.cfg['x_dim']]),
                                group_ndims=2)

            # 通过解码器处理潜变量 qz_samples，得到重构数据的均值和对数标准差
            px_mean, px_logstd = self.decoder(qz_samples, training=True)

            # x 的重构分布 px_dist
            px_dist = Normal(mean=px_mean, logstd=px_logstd, group_ndims=2)

            # 计算损失
            loss, recons_term, kl_term = self.sgvb_loss(inputs, px_dist, qz_samples, prior_dist, posterior_dist)

            # 加入编码器和解码器的正则化损失，避免过拟合
            loss += tf.add_n(self.encoder.losses)
            loss += tf.add_n(self.decoder.losses)

        # 计算梯度
        gradients_of_enc = encoder_tape.gradient(loss, self.encoder.trainable_variables)
        gradients_of_dec = decoder_tape.gradient(loss, self.decoder.trainable_variables)
        # 使用 tf.clip_by_norm() 对梯度进行裁剪，限制梯度的范围，防止梯度爆炸
        grads_of_enc = [tf.clip_by_norm(g, 10) for g in gradients_of_enc]
        grads_of_dec = [tf.clip_by_norm(g, 10) for g in gradients_of_dec]
        # 将裁剪后的梯度应用到编码器和解码器的可训练参数上
        self.optimizer.apply_gradients(zip(grads_of_enc, self.encoder.trainable_variables))
        self.optimizer.apply_gradients(zip(grads_of_dec, self.decoder.trainable_variables))
        # 返回损失信息
        return {'loss': loss, 'recons': recons_term, 'kl': kl_term}

    @tf.function
    def test_step(self, inputs):
        '''
        定义测试步骤，用于测试VAE模型
        '''

        # 样本数量检查
        if self.cfg['n_samples'] <= 1:
            raise ValueError('Number of samples drawing from latent '
                             'representation must be larger than 1 '
                             'current is {}'.format(self.cfg['n_samples']))

        # 获取潜变量的均值和对数标准差
        qz_mean, qz_logstd = self.encoder(inputs, training=False)

        # 创建后验分布 posterior_dist 并从中采样得到潜变量样本 qz_samples
        posterior_dist = Normal(mean=qz_mean, logstd=qz_logstd, group_ndims=2)
        qz_samples = posterior_dist.sample(self.cfg['n_samples'])

        # 创建先验分布 prior_dist，均值和对数标准差均为零
        prior_dist = Normal(mean=tf.zeros([self.cfg['z_dim'], self.cfg['x_dim']]),
                            logstd=tf.zeros([self.cfg['z_dim'], self.cfg['x_dim']]),
                            group_ndims=2)

        # 将 qz_samples 压 flatten 成三维
        hidden, static_front_shape, front_shape = flatten_to_ndims(qz_samples, 3)
        # 使用解码器将在潜在空间中得到的样本传递，得到重构数据的均值和对数标准差，并将其反压缩回原始形状
        px_mean, px_logstd = self.decoder(hidden, training=False)
        px_mean = unflatten_from_ndims(px_mean, static_front_shape, front_shape)
        px_logstd = unflatten_from_ndims(px_logstd, static_front_shape, front_shape)

        # 创建 x 的重构分布 px_dist
        px_dist = Normal(mean=px_mean, logstd=px_logstd, group_ndims=2)

        # 计算损失
        loss, recons_term, kl_term = self.sgvb_loss(inputs, px_dist, qz_samples, prior_dist, posterior_dist)
        # 返回损失信息
        return {'loss': loss, 'recons': recons_term, 'kl': kl_term}

    @tf.function
    def call(self, inputs):
        """
        根据输入数据进行预测 (a batch data)
        在 predict() 函数中使用
        """
        # 参数检查
        if self.cfg['n_samples'] <= 1:
            raise ValueError('Number of samples drawing from latent '
                             'representation must be larger than 1 '
                             'current is {}'.format(self.cfg['n_samples']))

        # 获取潜变量 z 的均值 qz_mean 和对数标准差 qz_logstd
        qz_mean, qz_logstd = self.encoder(inputs, training=False)

        # 构建潜变量的后验分布 posterior_dist，并从中抽取 n_samples 个样本 qz_samples
        posterior_dist = Normal(mean=qz_mean, logstd=qz_logstd, group_ndims=2)
        qz_samples = posterior_dist.sample(self.cfg['n_samples'])

        # 将抽取的样本经过解码器得到重构数据的均值 px_mean 和对数标准差 px_logstd
        hidden, static_front_shape, front_shape = flatten_to_ndims(qz_samples, 3)
        px_mean, px_logstd = self.decoder(hidden, training=False)
        px_mean = unflatten_from_ndims(px_mean, static_front_shape, front_shape)
        px_logstd = unflatten_from_ndims(px_logstd, static_front_shape, front_shape)

        # 构建重构数据的分布
        px_dist = Normal(mean=px_mean, logstd=px_logstd, group_ndims=2)
        # 计算输入数据在重构分布中的对数概率(use_group_ndims=False 表示不分组进行计算)
        px_log_prob = px_dist.log_prob(inputs, use_group_ndims=False)

        # 对重构的均值和对数概率进行平均，计算最终的输出
        px_mean = tf.reduce_mean(px_mean, axis=0)
        px_log_prob = tf.reduce_mean(px_log_prob, axis=0)

        # 返回重构平均值和对数概率，分别取最后一个时间步的结果
        return px_mean[:, -1, :], px_log_prob[:, -1, :]

    def evaluate(self, **kwargs):
        # 评估模型性能，使用平均损失来代替，而不是使用最后一批的损失来评估模型
        val_logs = super(MSTVAEModel, self).evaluate(**kwargs)

        if self.average_epoch_eval:
            val_logs = self.average_epoch_eval.copy()
        return val_logs

    @tf.function
    def _sampling_from_x(self, batch_inputs):
        '''
        对输入数据进行重构
        '''
        # 获取潜在变量的均值和对数标准差
        qz_mean, qz_logstd = self.encoder(batch_inputs, training=False)

        # 创建后验分布并进行采样
        posterior_dist = Normal(mean=qz_mean, logstd=qz_logstd, group_ndims=2)
        qz_samples = posterior_dist.sample()  # n_samples=1

        # 获取重构数据的均值和对数标准差
        px_mean, px_logstd = self.decoder(qz_samples, training=False)

        # 创建重构数据的分布
        px_dist = Normal(mean=px_mean, logstd=px_logstd, group_ndims=2)

        # 从重构分布 px_dist 中采样生成重构数据 x_samples
        x_samples = px_dist.sample()

        # 返回重构的样本 x_samples
        return x_samples

    @tf.function
    def _reconstruct_x_with_mcmc_recons(self, batch_inputs, recons):
        '''
        batch_inputs: 输入的批次数据
        recons: 经过重构的样本
        '''
        # 使用编码器处理重构样本 recons，得到潜变量的均值 qz_mean 和对数标准差 qz_logstd
        qz_mean, qz_logstd = self.encoder(recons, training=False)

        # 使用 Normal 类创建后验分布 posterior_dist，并从中采样得到潜在变量样本 qz_samples
        posterior_dist = Normal(mean=qz_mean, logstd=qz_logstd, group_ndims=2)
        qz_samples = posterior_dist.sample(self.cfg['n_samples'])

        # 将样本 qz_samples 展平为多维数据
        hidden, static_front_shape, front_shape = flatten_to_ndims(qz_samples, 3)
        # 通过解码器获得重建数据的均值 px_mean 和对数标准差 px_logstd
        px_mean, px_logstd = self.decoder(hidden, training=False)
        # 将结果还原为原始维度
        px_mean = unflatten_from_ndims(px_mean, static_front_shape, front_shape)
        px_logstd = unflatten_from_ndims(px_logstd, static_front_shape, front_shape)

        # 创建输入数据的分布
        px_dist = Normal(mean=px_mean, logstd=px_logstd, group_ndims=2)
        # 计算输入数据 batch_inputs 在分布 px_dist 下的对数概率 px_log_prob
        px_log_prob = px_dist.log_prob(batch_inputs, use_group_ndims=False)

        # 对重建均值和对数概率在采样维度上进行平均，以获得最终的输出
        px_mean = tf.reduce_mean(px_mean, axis=0)
        px_log_prob = tf.reduce_mean(px_log_prob, axis=0)
        # 返回平均重建值和平均对数概率
        return px_mean, px_log_prob

    def mcmc_reconstruct(self, inputs, n_mc_chain=10, mcmc_iter=10, get_last_obser=True):
        '''
        基于 MCMC 方法的重构过程
        inputs: 输入数据。
        n_mc_chain: MCMC 链的数量（默认为10）
        mcmc_iter: MCMC 的迭代次数（默认为10）
        get_last_obser: 是否只获取最后一个观察值（默认为True）
        '''
        # 结果初始化
        px_means = []
        px_log_probs = []

        # 使用 tqdm 进行进度条显示，遍历每一个批次的输入数据
        for batch_inputs in tqdm(inputs):
            # 创建一个与 batch_inputs 形状相同的掩码。
            # 掩码的作用是将最后一个时间点的所有维度设置为1（即该点将参与MCMC采样）。
            mask = np.zeros(batch_inputs.shape)
            mask[:, -1, :] = 1
            # 如果 n_mc_chain 大于1，则生成多个链的输入
            if n_mc_chain > 1:
                expand_inputs = tf.expand_dims(batch_inputs, 1)
                tiled_inputs = tf.tile(expand_inputs, [1, n_mc_chain, 1, 1])
                flatten_inputs, static_front_shape, front_shape = flatten_to_ndims(tiled_inputs, 3)
                expand_mask = tf.expand_dims(mask, 1)
                tiled_mask = tf.tile(expand_mask, [1, n_mc_chain, 1, 1])
                flatten_mask, _, _ = flatten_to_ndims(tiled_mask, 3)
                # MCMC 采样循环
                flatten_x_recons = None
                for i in range(mcmc_iter):
                    # 在第一次迭代中从输入数据进行采样，之后的迭代则基于前一次的结果进行采样
                    if flatten_x_recons is None:
                        flatten_x_recons = self._sampling_from_x(flatten_inputs)
                    else:
                        flatten_x_recons = self._sampling_from_x(flatten_x_recons)
                    # 每次采样后，通过掩码将最后一个点替换为原始输入
                    flatten_x_recons = tf.where(tf.cast(flatten_mask, dtype=tf.bool), flatten_x_recons, flatten_inputs)
                # 将采样后的数据从展平的形式还原为原始的多维形式，并对多个链的结果取平均
                x_mcmc = unflatten_from_ndims(flatten_x_recons, static_front_shape, front_shape)
                x_mcmc = tf.reduce_mean(x_mcmc, axis=1)
            else:
                # 如果 n_mc_chain 为1，直接从输入数据进行采样，并使用掩码处理
                x_mcmc = self._sampling_from_x(batch_inputs)
                x_mcmc = tf.where(tf.cast(mask, dtype=tf.bool), x_mcmc, batch_inputs)

            # 对重构数据进行处理，获取重构均值和对数概率。
            px_mean, px_log_prob = self._reconstruct_x_with_mcmc_recons(batch_inputs, x_mcmc)

            # 根据参数 get_last_obser 的值，选择性地将最后一时刻的均值和对数概率添加到结果列表中，或者添加所有结果
            if get_last_obser:
                px_means.append(px_mean.numpy()[:, -1, :])
                px_log_probs.append(px_log_prob.numpy()[:, -1, :])
            else:
                px_means.append(px_mean.numpy())
                px_log_probs.append(px_log_prob.numpy())

        # 最终将所有的均值和对数概率结果进行拼接，并返回这两项作为输出。
        px_means = np.concatenate(px_means, axis=0)
        px_log_probs = np.concatenate(px_log_probs, axis=0)

        return px_means, px_log_probs

    def compile(self, optimizer, **kwargs):
        '''
        对模型进行编译，准备模型进行训练
        '''
        super(MSTVAEModel, self).compile(**kwargs)
        # 设置优化器
        self.optimizer = optimizer

    def calculate_anomaly_scores(self, inputs, get_last_obser=True, batch_size=50):
        '''
        计算异常分数
        inputs: 输入数据
        get_last_obser: 表示是否获取最后的观测，默认为true
        batch_size: 每个批次的数据大小，默认值为 50
        '''
        if 'BatchDataset' not in str(type(inputs)):
            # 如果 inputs 不是 BatchDataset 类型，则执行将输入数据转换为数据集形式并按批次进行分割
            inputs = Dataset.from_tensor_slices(inputs).batch(batch_size)
        # 调用 mcmc_reconstruct 方法，传入处理后的输入数据和配置参数，获取重构的均值和对数概率
        px_means, px_log_probs = self.mcmc_reconstruct(inputs, self.cfg['n_mc_chain'], self.cfg['mcmc_iter'], get_last_obser)
        # 计算异常分数，通过对每个样本的对数概率进行求和。
        anomaly_scores = np.sum(px_log_probs, axis=-1)
        return px_means, anomaly_scores


class AverageLossCallback(Callback):
    '''
    继承自 Callback，主要用于在模型测试过程中计算和记录每个批次的损失值，并在每个训练周期结束时计算平均损失。
    '''

    def on_test_batch_end(self, epoch, logs=None):
        '''
        在每个测试批次结束时被调用
        epoch: 表示当前训练周期
        logs: 包含当前批次的日志信息
        '''
        if logs is not None:
            # 遍历日志信息，将每个度量指标（metric）和对应的值（value）记录到 self.epoch_losses_eval 字典中
            for metric, value in logs.items():
                if metric not in self.epoch_losses_eval:
                    self.epoch_losses_eval[metric] = [value]
                else:
                    self.epoch_losses_eval[metric].append(value)

    def on_epoch_begin(self, epoch, logs=None):
        '''
        在每个训练周期开始时被调用，主要用于初始化 self.epoch_losses_eval，以清空之前周期的损失记录
        '''
        self.epoch_losses_eval = {}

    def on_test_end(self, logs=None):
        '''
        该方法在所有测试结束时被调用
        计算每个度量指标的平均损失，并将结果存储在 self.model.average_epoch_eval 字典中
        '''
        avg_losses_eval = dict()
        if self.epoch_losses_eval:
            for metric, values in self.epoch_losses_eval.items():
                avg_losses_eval[metric] = np.mean(values)
            self.model.average_epoch_eval = avg_losses_eval