# This file is used to build the DeepMTT model by using tensorflow 2.17
import numpy as np
import scipy.io as scio
import tensorflow as tf
import random as rd
from tensorflow.keras import layers, callbacks, Input, initializers, regularizers, models, utils, optimizers
from tensorflow.keras.metrics import RootMeanSquaredError

import os
import datetime
from batchdata_derive3 import *
from custome_activations import noisy_activation

def piecewise(Xt):  # piecewise activation function
    #  y = (tf.nn.relu(0.5*Xt+0.5)-tf.nn.relu(0.5*Xt-0.5)-0.5) # piecewise activation function
    y = tf.nn.relu(Xt+1)-tf.nn.relu(Xt-1)-1
    return y

@tf.function
def tf_preprocess_data(data):
    """
    使用TensorFlow操作的预处理函数，支持批处理
    参数：
        data: 形状为[batch_size, time_steps, features]的张量
    返回：
        normalized_data: 归一化后的数据
        weights:用于归一化的权重
    """
    data = tf.cast(data, tf.float64)

    # 计算每个样本的最大值
    weights = tf.reduce_max(tf.abs(data),axis=[1, 2])

    # 重塑权重以便进行广播
    weights = tf.reshape(weights, (-1, 1, 1))

    # 归一化数据
    return data / weights, weights

def preprocess_data(data):
    weights = np.max(np.abs(data[:, 0, :]), axis=1)
    weights = np.transpose(np.array([[weights]]), [2, 1, 0])
    return data / weights, weights


class maxout_activation(layers.Layer):
    def __init__(self, num_units=64, axis=None, **kwargs):
       super(maxout_activation, self).__init__(**kwargs)
       self.num_units = num_units
       self.axis = axis

    def call(self, inputs):
        # shape = inputs.get_shape().as_list()
        # if shape[0] is None:
        #     shape[0] = -1
        # if self.axis is None:  # Assume that channel is the last dimension
        #     self.axis = -1
        # num_channels = shape[self.axis]
        # if num_channels % self.num_units:
        #     raise ValueError('number of features({}) is not '
        #                      'a multiple of num_units({})'.format(num_channels, self.num_units))
        # shape[self.axis] = self.num_units
        # shape += [num_channels // self.num_units]
        input_shape = tf.shape(inputs)
        batch_size = input_shape[0]
        time_steps = input_shape[1]
        shape = [batch_size, time_steps, 64, 1]
        outputs = tf.reduce_max(tf.reshape(inputs, shape), -1)
        return outputs

    def get_config(self):
        config = super(maxout_activation, self).get_config()
        config.update({'num_units': self.num_units, 'axis': self.axis})
        return config


# 构建DeepMTT网络模型

# 设置GPU/CPU模式
def device_config():
    print("设置GPU/CPU模式...")
    gpus = tf.config.experimental.list_physical_devices('GPU')  # get gpus list
    cpus = tf.config.experimental.list_physical_devices('CPU')  # get cpus list
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
                print(f"找到{len(gpus)}个GPU设备并启用内存增长模式")
        except RuntimeError as e:
            print(f"GPU内存增长模式设置失败: {e}")
    else:
        print("没有找到GPU设备，使用CPU进行训练")
    print("启用XLA加速...")
    tf.config.optimizer.set_jit(True)  # 启用XLA加速
    print("设备配置已全部完成")

# 超参数设置
# 设置RNN网络参数
my_learning_rate = 1e-5  # 学习率
input_size = 4  # 输入数据维度,因为一条轨迹每个时刻的输入特征是4维的，即(batch_size,time_size,input_size)，每行数据包括(px,py,vx,vy)
time_step_size = 50  # 输入数据的时间步长,即每一个batch的行数是time_step_size
hidden_size = 64  # 隐藏层的数量
layer_num = 3  # LSTM_layer的数量
hidden_size_1 = 128  # 第一层LSTM的隐藏层数量
hidden_size_2 = 256  # 第二层LSTM的隐藏层数量
hidden_size_3 = 256  # 第三层LSTM的隐藏层数量
max_out_size = 64  # max_out层的输出数量
output_size = 4     # 输出向量的维度，形状同输入向量相同，但是输出的是残差
lambda1 = 0.003  # 正则项系数
fir_size = 5  # FIR滤波层滤波阶数
EPOCHS = 100  # 训练的轮数
INIT_POSITION_NOISE = 30
INIT_VELOCITY_NOISE = 3
# 构建DeepMTT模型
# 定义输入函数


def deepmtt_build():
    inputs = Input(shape=(time_step_size, input_size))
    # 滤波层
    fir_output = layers.DepthwiseConv1D(
                                 kernel_size=fir_size,
                                 strides=1,
                                 padding='same',
                                 depth_multiplier=1,
                                 depthwise_initializer=initializers.Constant(value=1.0/fir_size),
                                 data_format='channels_last',
                                 activation=None,
                                 use_bias=False,
                                 )(inputs)
    # 双向LSTM层1
    lstm1_output = layers.Bidirectional(layers.LSTM(units=hidden_size_1,
                                                    return_sequences=True,
                                                    activation=noisy_activation,
                                                    unit_forget_bias=True
                                                    ), merge_mode='concat')(fir_output)
    # 双向LSTM层2
    lstm2_output = layers.Bidirectional(layers.LSTM(units=hidden_size_2,
                                                    return_sequences=True,
                                                    activation=noisy_activation,
                                                    unit_forget_bias=True
                                                    ), merge_mode='concat')(lstm1_output)
    # 双向LSTM层3
    lstm3_output = layers.Bidirectional(layers.LSTM(units=hidden_size_3,
                                                    return_sequences=True,
                                                    activation='tanh',
                                                    unit_forget_bias=True
                                                    ), merge_mode='concat')(lstm2_output)
    # max_out层
    max_out_output = layers.Dense(units=max_out_size,
                                  activation=maxout_activation(num_units=max_out_size, axis=2),
                                  kernel_regularizer=regularizers.l2(lambda1),
                                  use_bias=True,
                                  kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1, seed=None),
                                  bias_initializer=initializers.Constant(value=0.1)
                                  )(lstm3_output)
    # 线性输出层
    outputs = tf.keras.layers.Dense(units=output_size,
                                    activation=None,
                                    kernel_regularizer=regularizers.l2(lambda1),
                                    use_bias=True,
                                    kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1, seed=None),
                                    bias_initializer=initializers.Constant(value=0.1)
                                    )(max_out_output)
    # 创建模型
    DeepMTT_model = models.Model(inputs=inputs, outputs=outputs)  # 明确指定输入和输出
    DeepMTT_model.save('my_deepMTT_model.keras')
    return DeepMTT_model

# 定义优化器，原论文中使用的是minibatch gradient descent（MBGD),小批量梯度下降
# 而Adam，RMSprop等优化器本质是minibatch的进阶版，这里可以选择这两种优化器
# 原论文中使用的是RMSE作为损失函数，这里使用的是MSE
# tensorboard记录训练过程
# log_dir = "logs/fit/" + time.strftime("%Y%m%d-%H%M%S")
# tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)


class TrajectoryDataGenerator(utils.Sequence):
    def __init__(self, pos_noise=30, vel_noise=3, update_frequency=5, batch_size=256, batch_num=1000, generator_type="training"):
        self.pos_noise = pos_noise
        self.vel_noise = vel_noise
        self.update_frequency = update_frequency    # 每隔update_frequency个epoch更新一次数据
        self.epoch_counter = 0
        self.batch_size = batch_size
        self.batch_num = batch_num
        self.generator_type = generator_type  # 区分训练/验证生成器
        self.last_epoch = -1    # 记录上一次执行epoch更新的epoch计数
        self._num_samples = batch_num * batch_size  # 1000个批次，每个批次batch_size条轨迹

        # 初始化变量但不赋值
        self.ori_traj = None
        self.est_traj = None
        self.error_traj = None
        self.processed_inputs = None
        self.indices = None

        # 第一次调用，生成初始数据
        print(f"正在初始化{self.generator_type}数据生成器...")
        self._generate_data()
        self._preprocess_all_data()

        # 初始化索引并打乱顺序 - 在_preprocess_all-data()中已经完成
        print(f"{self.generator_type}数据生成器初始化完成（__init__函数运行结束）")

    def _generate_data(self):
        """生成训练数据"""
        current_pos_noise = self.pos_noise * (0.8 + 0.4 * np.random.random())  # 在原始噪声的80%-120%范围内随机变化
        current_vel_noise = self.vel_noise * (0.8 + 0.4 * np.random.random())

        print(f"正在生成源数据（未归一化）：pos_noise={current_pos_noise:.2f}, vel_noise={current_vel_noise:.2f}")

        self.ori_traj, _, self.est_traj, self.error_traj = create_all_data(batch_size=self.batch_size,
                                                                           data_len=50,
                                                                           ini_pos_noise=current_pos_noise,
                                                                           ini_vel_noise=current_vel_noise,
                                                                           batch_num=self.batch_num)
        print(f"数据生成完成，数据集大小：{self._num_samples}个样本")
        print("-------------------------------------------------------------")

    def _preprocess_all_data(self):
        """一次性预处理所有数据"""
        print("正在预处理所有数据...")
        if self.est_traj is None or self.error_traj is None:
            raise ValueError("数据还未生成，没有数据可供预处理")

        # 将所有数据转化为float32以减少内存使用并提高性能
        self.est_traj = self.est_traj.astype(np.float32)
        self.error_traj = self.error_traj.astype(np.float32)

        # 计算所有样本的权重 - 使用所有时间点的最大值
        weights = np.max(np.abs(self.est_traj), axis=(1, 2))
        weights = weights.reshape(-1, 1, 1)  # 重塑为[batch_size, 1, 1]的形状

        # 标准化所有轨迹
        self.processed_inputs = self.est_traj / weights

        # 打乱数据顺序
        self.indices = np.arange(len(self.processed_inputs))
        np.random.shuffle(self.indices)

        print(f"数据预处理（归一化+打乱顺序）完成，数据集大小: {len(self.processed_inputs)}个样本")
        print("##################################################################################")

    def __len__(self):
        """返回每个epoch的批次数"""
        return self.batch_num

    def __getitem__(self, idx):
        """获取一个批次的数据"""
        start_idx = idx * self.batch_size
        end_idx = min(start_idx + self.batch_size, len(self.processed_inputs))
        batch_indices = self.indices[start_idx:end_idx]

        batch_x = self.processed_inputs[batch_indices]
        batch_y = self.error_traj[batch_indices]

        # 减少日志输出，仅每100个批次输出一次
        # if idx % 100 == 0:
        #     print(f"Batch {idx}: shape X={batch_x.shape}, shape Y = {batch_y.shape}")
        return (
            self.processed_inputs[batch_indices],
            self.error_traj[batch_indices]
        )

    def on_epoch_end(self):
        # 在每个epoch结束时调用
        # 验证生成器不需要更新数据，仅打乱数据顺序
        if self.generator_type == "validation":
            np.random.shuffle(self.indices)
            print(f"\n当前时刻为每一epoch训练结束后的验证阶段，仅打乱数据顺序")
            return
        if self.generator_type == "training":
            # 训练生成器进行逻辑处理
            current_epoch = self.epoch_counter
            self.epoch_counter += 1

            # 防止多次调用导致重复处理
            if current_epoch == self.last_epoch:
                print(f"检测到重复调用 on_epoch_end （epoch={current_epoch}），忽略...")
                return

            self.last_epoch = current_epoch


            print(f"\n完成第 {self.epoch_counter} 个epoch训练")
            # 重新打乱数据顺序
            np.random.shuffle(self.indices)

            # 每隔update_frequency个epoch更新一次数据
            if self.epoch_counter % self.update_frequency == 0:
                print(f"达到更新周期（{self.update_frequency}个epoch），生成新数据")
                self._generate_data()
                self._preprocess_all_data()
            else:
                print(f"当前epoch={self.epoch_counter}，未达到更新更新周期（{self.update_frequency}个epoch），仅打乱数据顺序，为下一epoch训练做准备")


def create_tf_dataset(batch_size=64, buffer_size=10000, update_frequency=5):
    """创建TensorFlow数据集"""

    def data_generator():
        """创建数据生成函数"""
        epoch_counter = 0
        samples = None

        while True:  # 无限生成数据
            epoch_counter += 1

            # 每隔update_frequency个epoch重新生成数据
            if epoch_counter == 1 or epoch_counter % update_frequency == 0:
                # 生成新数据
                pos_noise = 30 * (0.8 + 0.4 * np.random.random())
                vel_noise = 3 * (0.8 + 0.4 * np.random.random())

                # 使用固定的batch_num=1000
                ori_traj, _, est_traj, error_traj = create_all_data(
                    batch_size=batch_size,
                    data_len=50,
                    ini_pos_noise=pos_noise,
                    ini_vel_noise=vel_noise,
                    batch_num=1000)  # 固定为1000批次

                # 转换为float32
                est_traj = est_traj.astype(np.float32)
                error_traj = error_traj.astype(np.float32)

                # 计算权重并预处理 - 使用所有时间点的最大值
                weights = np.max(np.abs(est_traj), axis=(1, 2))
                weights = weights.reshape(-1, 1, 1)
                est_traj = est_traj / weights

                # 创建样本集
                samples = list(zip(est_traj, error_traj))
                np.random.shuffle(samples)

            # 返回当前epoch的所有样本
            for sample in samples:
                yield sample

    # 创建tf.data.Dataset
    dataset = tf.data.Dataset.from_generator(
        data_generator,
        output_signature=(
            tf.TensorSpec(shape=(50, 4), dtype=tf.float32),  # 处理后的输入
            tf.TensorSpec(shape=(50, 4), dtype=tf.float32)  # 误差输出
        )
    )

    # 优化数据流水线
    dataset = dataset.shuffle(buffer_size)
    dataset = dataset.batch(batch_size)
    dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)

    return dataset


def train():
    device_config()
    DeepMTT_model = models.load_model('/root/lanyun-tmp/MTT419/tmp/ckpt/DeepMTT_086-455.93-425.00.keras', custom_objects={'maxout_activation': maxout_activation})
    # DeepMTT_model = deepmtt_build()
    # DeepMTT_model.summary()
    # DeepMTT_model.compile(optimizer=optimizers.Adam(learning_rate=my_learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
    #                       loss='mse',  # 虽然最小化MSE也会导致RMSE最小化，但它们的梯度不同。MSE的梯度计算更简单、更稳定，尤其是在误差接近0时。
    #                       metrics=[RootMeanSquaredError(name='rmse')]
    #                       )
    train_generator = TrajectoryDataGenerator()
    val_generator = TrajectoryDataGenerator(update_frequency=9999, generator_type="validation")  # 验证生成器不需要更新数据
    checkpoint_filepath = '/root/lanyun-tmp/MTT419/tmp/ckpt/DeepMTT_{epoch:03d}-{loss:.2f}-{val_loss:.2f}.keras'    # 设置保存全模型的路径
    os.makedirs(os.path.dirname(checkpoint_filepath), exist_ok=True)  # 创建保存路径
    log_dir = "/root/lanyun-tmp/MTT419/logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    print(f"开始训练，模型会被保存到： {os.path.dirname(checkpoint_filepath)}")
    print(f"TensorBoard日志文件会被保存到: {os.path.dirname(log_dir)}")
    callbacks_list = [
                # 模型检查点回调
                callbacks.ModelCheckpoint(filepath=checkpoint_filepath,
                                          monitor='val_loss',
                                          verbose=1,  # 回调执行操作时显示消息ls
                                          save_best_only=False,  # 不只保存最好的模型
                                          mode='min',  # 根据监控量损失最小化来保存是否覆盖当前保存模型
                                          save_weights_only=False,  # 保存完整模型
                                          save_freq='epoch'),
                # TensorBoard回调
                callbacks.TensorBoard(
                          log_dir=log_dir,
                          histogram_freq=0,  # 不计算直方图（模型层激活和权重直方图）
                          write_graph=False,  # 不再TensorBoard中显示计算图
                          write_images=False,
                          write_steps_per_second=False,
                          update_freq='epoch',  # 每个epoch更新一次TensorBoard，其中包括更新训练指标、损失等
                          profile_batch='100,120',  # 分析第100到120个批次的性能
                          embeddings_freq=0
                      ),
                # 早停
                callbacks.EarlyStopping(
                    monitor='val_loss',  # 监控验证集损失
                    min_delta=0,  # 监控指标的最小变化值，被视为改进的阈值，绝对值变化小于min_delta被视为无改进
                    patience=10,  # 如果在patience个epoch内没有改进，则停止训练
                    verbose=1,  # 训练停止时显示消息
                    mode='min',  # 监控指标最小化
                    restore_best_weights=True,  # 恢复训练期间监控指标的最佳权重
                    ),
                # 学习率平台下降法
                callbacks.ReduceLROnPlateau(
                    monitor='val_loss',  # 监控验证集损失
                    factor=0.1,  # 学习率下降的因子（new_lr = lr * factor）
                    patience=5,  # 如果在patience个epoch内没有改进，则下降学习率
                    verbose=1,  # 显示学习率下降消息
                    mode='min',  # 监控指标最小化
                    cooldown=0,  # 学习率下降后不再训练的epoch数
                    min_lr=1e-6  # 最小学习率
                )
                      ]

    my_history = DeepMTT_model.fit(
                      train_generator,
                      initial_epoch=87,
                      epochs=100,
                      verbose=1,
                      callbacks=callbacks_list,
                      validation_data=val_generator,
                      validation_steps=50,  # 验证集中抽取50个批次进行验证
                      # workers=4,  # 指定用于数据生成的并行工作线程数量
                      # use_multiprocessing=True,  # 使用多进程数据生成
                      # max_queue_size=10,  # 指定准备好等待被模型消费的最大批次数量
                      )


if __name__ == '__main__':
    utils.get_custom_objects().update({'noisy_activation': noisy_activation})
    train()