import argparse
import csv
import pandas as pd
import numpy as np

import tensorflow as tf
from tensorflow.image import ssim
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import TimeDistributed, Conv2D, MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, \
    Dropout, ConvLSTM2D, BatchNormalization, Flatten, Attention, Input, LayerNormalization, Add
from tensorflow.keras.regularizers import l2

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_2x2, plot_images_2x2, save_images_3x2, plot_images_3x2


import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


# ============================================= 损失函数相关 =======================================
def ssim_loss(y_true, y_pred):
    """
    结构相似性损失（SSIM Loss）
    特点：衡量输入和输出之间的结构相似性，而不仅仅是像素级差异。
    优点：能够更好地保留图像的结构信息，重建结果更符合人类视觉感知。
    缺点：计算复杂度较高。
    适用场景：如果你希望重建结果在结构上与原始数据更相似。
    """
    return 1 - tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))


def mixed_loss(y_true, y_pred):
    """
    混合损失（MSE + SSIM）
    特点：结合MSE和SSIM的优点，既能保留细节，又能保持结构相似性。
    适用场景：如果你希望重建结果在细节和结构上都与原始数据接近。
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))
    ssim_val = tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))
    return mse + (1 - ssim_val)


def smooth_loss(y_true, y_pred):
    """
    自定义损失函数，加入平滑性约束。
    :param y_true: 真实值
    :param y_pred: 预测值
    :return: 损失值
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))  # 均方误差
    # 计算二阶导数（平滑性约束）
    dy_true = y_true[:, 1:] - y_true[:, :-1]
    dy_pred = y_pred[:, 1:] - y_pred[:, :-1]
    d2y_true = dy_true[:, 1:] - dy_true[:, :-1]
    d2y_pred = dy_pred[:, 1:] - dy_pred[:, :-1]
    smoothness = tf.reduce_mean(tf.square(d2y_true - d2y_pred))
    return mse + 0.1 * smoothness  # 调整平滑性权重


# ============================================= 读取自编码器相关 =======================================
def load_models(path_encoder, path_decoder):
    encoder = load_model(path_encoder)
    decoder = load_model(path_decoder)
    return encoder, decoder

# ============================================= LSTM相关 =======================================
def prepare_sequences(phaseBits, phaseBitEncs, traces, sequence_length=10):
    """
    将码阵序列转换为LSTM输入格式。
    :param phaseBits: 码阵列表-原始码阵，每个码阵为16x16的二维数组
    :param phaseBitEncs: 码阵列表-编码后，编码之后的序列，每个码阵为16x16的二维数组
    :param traces: 实际指向角列表，用于记录生成对比图
    :param sequence_length: 历史序列长度
    :return: X_enc (输入序列-编码后), y_enc (目标码阵-编码后), y_phaseBit (目标码阵-原始码阵)
    """
    X_enc, y_enc, y_phaseBit, y_trace = [], [], [], []
    for i in range(len(phaseBits) - sequence_length):
        # 输入序列-编码后
        X_enc.append(phaseBitEncs[i:i + sequence_length])
        # 目标码阵-编码后
        y_enc.append(phaseBitEncs[i + sequence_length])
        # 目标码阵-原始码阵
        y_phaseBit.append(phaseBits[i + sequence_length])
        # 目标码阵-实际波束指向
        y_trace.append(traces[i + sequence_length])
    return np.array(X_enc), np.array(y_enc), np.array(y_phaseBit), np.array(y_trace)


def build_lstm_model(input_shape):
    """
    构建LSTM模型，用于预测下一个16x16的编码数据。
    :param input_shape: 输入形状 (sequence_length, 16, 16)
    :return: LSTM模型
    """
    model = Sequential([
        # LSTM层
        TimeDistributed(Dense(64, activation='relu'), input_shape=input_shape),  # 对每个时间步的16x16数据进行全连接
        LSTM(128, return_sequences=False),  # 不返回序列
        # 全连接层
        Dense(16 * 16, activation='relu'),  # 输出16x16的数据
        Reshape((16, 16))  # 将输出reshape为16x16
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


def build_convlstm_model(input_shape):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: ConvLSTM模型
    """
    model = Sequential([
        # ConvLSTM层
        ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', return_sequences=False, input_shape=input_shape),
        BatchNormalization(),
        # 将输出转换为 (16, 16, 1)
        Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='relu')
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    return model


def build_convlstm_attention_model(input_shape):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: ConvLSTM模型
    """
    # 定义输入
    inputs = Input(shape=input_shape)
    # 第一层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=True)(inputs)
    x = BatchNormalization()(x)
    # 第二层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=False)(x)
    x = BatchNormalization()(x)
    # 注意力机制
    x = Attention()([x, x])  # 使用函数式 API 定义 Attention
    # 将输出转换为 (16, 16, 1)
    outputs = Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='linear')(x)
    # 构建模型
    model = Model(inputs, outputs)
    # 编译模型
    # model.compile(optimizer=Adam(learning_rate=0.001),
    #               loss=smooth_loss,  # 使用自定义损失函数
    #               metrics=['accuracy'])
    model.compile(optimizer=RMSprop(learning_rate=1e-4),
                  loss=mixed_loss,
                  metrics=['accuracy'])
    return model


def build_cnn_lstm_model(input_shape):
    """
    构建CNN + LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # CNN层
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),  # 池化层，减少序列长度
        TimeDistributed(Dropout(0.3)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # LSTM层
        LSTM(128, return_sequences=False),  # 不返回序列
        Dropout(0.3),
        # 全连接层
        Dense(64, activation='relu'),
        Dropout(0.3),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # 编译模型
    optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


def build_cnn_blstm_model(input_shape):
    """
    构建改进的CNN + 双向LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # 输入形状: (batch_size, sequence_length, 16, 16, 1)
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),
        TimeDistributed(Dropout(0.5)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # 双向 LSTM
        Bidirectional(LSTM(128, return_sequences=False)),  # 使用双向 LSTM
        Dropout(0.5),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    # model.compile(optimizer=optimizer,
    #               loss='mean_squared_error',  # 回归任务
    #               metrics=['accuracy'])
    model.compile(optimizer='adam', loss=mixed_loss)  # 如果你希望重建结果在细节和结构上都与原始数据接近
    return model


####################################### 优化 build_convlstm_model() -- begin ###########################################
def build_improved_convlstm_model(input_shape):
    """
    构建改进的ConvLSTM模型，解决损失函数收敛问题
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 改进的ConvLSTM模型
    """
    model = Sequential([
        # 第一个ConvLSTM层 - 增加滤波器数量
        ConvLSTM2D(filters=128, kernel_size=(3, 3), padding='same',
                   return_sequences=True, input_shape=input_shape,
                   kernel_regularizer=l2(1e-4)),
        LayerNormalization(),
        Dropout(0.3),

        # 第二个ConvLSTM层 - 中等滤波器数量
        ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same',
                   return_sequences=True,
                   kernel_regularizer=l2(1e-4)),
        LayerNormalization(),
        Dropout(0.3),

        # 第三个ConvLSTM层 - 减少滤波器数量，准备输出
        ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same',
                   return_sequences=False,
                   kernel_regularizer=l2(1e-4)),
        LayerNormalization(),
        Dropout(0.3),

        # 输出层
        Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='linear')
    ])

    # 使用不同的优化器和学习率
    optimizer = Adam(learning_rate=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    # 编译模型
    model.compile(
        optimizer=optimizer,
        loss='mean_squared_error',
        metrics=['mae', 'mse']
    )
    return model


def exponential_decay(epoch):
    """
    指数学习率衰减
    """
    initial_lr = 0.0005
    decay_rate = 0.95
    return initial_lr * (decay_rate ** epoch)


def train_improved_model(model, X_train, y_train, path_model_checkpoint, epochs=200, batch_size=16):
    """
    改进的训练函数，包含更多防止过拟合和优化策略
    :param model: 改进的模型
    :param X_train: 训练输入
    :param y_train: 训练目标
    :param path_model_checkpoint: 模型保存路径
    :param epochs: 训练轮数
    :param batch_size: 批次大小
    :return: 训练历史
    """
    callbacks = [
        # 早停策略 - 增加耐心值
        EarlyStopping(
            patience=20,
            monitor='val_loss',
            restore_best_weights=True,
            min_delta=0.001
        ),

        # 模型检查点
        ModelCheckpoint(
            path_model_checkpoint,
            save_best_only=True,
            monitor='val_loss',
            mode='min'
        ),

        # 学习率调度 - 更温和的衰减
        ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=8,
            min_lr=1e-7,
            verbose=1
        ),

        # 指数学习率衰减
        LearningRateScheduler(exponential_decay)
    ]

    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1,
        shuffle=True  # 增加数据打乱
    )
    return history


def build_residual_convlstm_model(input_shape):
    """
    构建带残差连接的ConvLSTM模型，防止梯度消失
    """
    inputs = Input(shape=input_shape)

    # 主路径
    x = ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same',
                   return_sequences=True, kernel_regularizer=l2(1e-4))(inputs)
    x = LayerNormalization()(x)
    x = Dropout(0.3)(x)

    # 残差连接
    residual = ConvLSTM2D(filters=64, kernel_size=(1, 1), padding='same')(inputs)

    x = Add()([x, residual])
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same',
                   return_sequences=False, kernel_regularizer=l2(1e-4))(x)
    x = LayerNormalization()(x)
    x = Dropout(0.3)(x)

    outputs = Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='linear')(x)

    model = Model(inputs=inputs, outputs=outputs)

    optimizer = Adam(learning_rate=0.0003, beta_1=0.9, beta_2=0.999)
    model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['mae'])

    return model


def train_with_different_loss(model, X_train, y_train, path_model_checkpoint, epochs=150, batch_size=16):
    """
    使用不同的损失函数训练模型
    """
    # 可以尝试不同的损失函数
    model.compile(
        optimizer=Adam(learning_rate=0.0005),
        loss='huber',  # Huber损失对异常值更鲁棒
        metrics=['mae', 'mse']
    )

    callbacks = [
        EarlyStopping(patience=15, monitor='val_loss', restore_best_weights=True),
        ModelCheckpoint(path_model_checkpoint, save_best_only=True),
        ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, min_lr=1e-6)
    ]

    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1
    )
    return history
####################################### 优化 build_convlstm_model() -- over ############################################



def train_model(model, X_train, y_train, path_model_checkpoint, epochs=100, batch_size=32):
    """
    训练模型。
    :param model: LSTM模型
    :param X_train: 训练输入
    :param y_train: 训练目标
    :param epochs: 训练轮数
    :param batch_size: 批次大小
    :return: 训练历史
    """
    callbacks = [
        EarlyStopping(patience=10, monitor='val_loss', restore_best_weights=True),
        ModelCheckpoint(path_model_checkpoint, save_best_only=True),
        ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-6)  # 学习率调度器
    ]
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1
    )
    return history


# ============================================= 结果相关 =======================================
def save_results(dir_compare, y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays, bit_num, name):
    total_correct = 0
    total_elements = 0
    accuracies = []
    res = []
    for idx, (y_enc, y_out, y_phaseBit, y_trace) \
            in enumerate(zip(y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays)):
        # 计算准确率
        correct = np.sum(y_phaseBit == y_out)
        total_correct += correct
        total_elements += y_phaseBit.size

        accuracy = correct / y_phaseBit.size
        accuracies.append(accuracy)

        if idx % 1 == 0:
            # 计算方向图, 指向, PSLL
            pattern_enc, point_enc, psll_enc = phaseBit_2_pattern_point(y_enc, bit_num)
            pattern_out, point_out, psll_out = phaseBit_2_pattern_point(y_out, bit_num)
            pattern_phaseBit, point_phaseBit, psll_phaseBit = phaseBit_2_pattern_point(y_phaseBit, bit_num)

            # 保存比较图片
            # plot_images_3x2(y_phaseBit, y_enc, y_out,
            #                 pattern_phaseBit, pattern_enc, pattern_out,
            #                 f"Accuracy = {accuracy:.2%}, "
            #                 f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}], "
            #                 f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}], "
            #                 f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]")
            save_images_3x2(dir_compare + str(idx) + ".jpg",
                            y_phaseBit, f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}]",
                            y_enc, f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}]",
                            y_out, f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]",
                            pattern_phaseBit, "pattern(real)",
                            pattern_enc, "pattern(enc)",
                            pattern_out, "pattern(out)",
                            f"trace = {y_trace}, Accuracy = {accuracy:.2%}")

            # 保存idx, psll, point, accuracy
            res.append([str(idx),
                        point_phaseBit[1], point_enc[1], point_out[1],
                        psll_phaseBit[0], psll_enc[0], psll_out[0],
                        psll_phaseBit[1], psll_enc[1], psll_out[1],
                        accuracy,
                        y_trace, y_trace[0], y_trace[1]])

            if name == "test":
                # 将测试集解码结果转换为整数类型并保存
                np.savetxt(os.path.join(dir_compare, f'y_out_{idx}.csv'), y_out, delimiter=',')

    # 保存psll和point信息
    # 将traces转换为DataFrame
    df = pd.DataFrame(res, columns=['idx', 'point_ori', 'point_enc', 'point_out', 'psll_ori', 'psll_enc', 'psll_out',
                                    'pos_sl_ori', 'pos_sl_enc', 'pos_sl_out', 'accuracy',
                                    'trace', 'trace_th_smooth', 'trace_ph_smooth'])
    # 保存到新的CSV文件
    df.to_csv(os.path.join(dir_compare, f'y_out_psll_point.csv'), index=False)

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


# ============================================= 主流程相关 =======================================
def get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits

def get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for theta in range(theta_start, theta_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_phi(theta=theta, phi_start=phi_start, phi_end=phi_end)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def read_csv_to_traces(file_path, key_th='th_smooth', key_ph='ph_smooth'):
    traces = []
    with open(file_path, mode='r', newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            th_smooth = float(row[key_th])
            ph_smooth = float(row[key_ph])
            traces.append([th_smooth, ph_smooth])
    return traces


def get_traces_phaseBits_by_uav_dataset(bit_num, path_csv, key_th='th_smooth', key_ph='ph_smooth'):
    """ 获取波束指向和对应码阵 """
    logger.info(f"path_csv={path_csv}")
    # 读取数据集中轨迹指向
    traces = read_csv_to_traces(path_csv, key_th, key_ph)
    traces = traces[:3000]
    # 生成码阵
    phaseRads = traces_2_phaseRads(traces)
    # 码阵bit化
    phaseBits = phases_rad_2_bit(phaseRads, bit_num)
    return traces, phaseRads, phaseBits


# 预测, t=t+item 时刻通用
def predict_t_item(path_dir, name, model, decoder, X, y_real, y_real_phaseBit, y_real_trace):
    # 获取LSTM模型预测结果
    y_out = model.predict(X)

    # 解码器解压
    y_out_dec = decoder.predict(y_out)
    y_real_dec = decoder.predict(y_real)
    # 将概率转换为0/1二进制值
    y_out_dec_bit = (y_out_dec > 0.5).astype(np.int32)
    y_real_dec_bit = (y_real_dec > 0.5).astype(np.int32)
    # 将解码结果还原为原始形状
    y_out_dec_bit_2d = [arr.reshape(64, 64) for arr in y_out_dec_bit]
    y_real_dec_bit_2d = [arr.reshape(64, 64) for arr in y_real_dec_bit]

    # 保存结果并获取统计信息
    stats = save_results(path_dir,
                         y_real_dec_bit_2d, y_out_dec_bit_2d, y_real_phaseBit, y_real_trace,
                         bit_num, name)
    return y_out, stats


# 生成预测序列, predict_t_item() 方法解析输入 X, y_real, y_real_phaseBit, y_real_trace
def format_predict_t_item(encoder, decoder, X, y_real, y_real_phaseBit, y_real_trace, y_out):
    # 解码器解压
    y_out_dec = decoder.predict(y_out)
    # 将概率转换为0/1二进制值
    y_out_dec_bit = (y_out_dec > 0.5).astype(np.int32)
    # 将解码结果还原为原始形状
    y_out_dec_bit_2d = [arr.reshape(64, 64) for arr in y_out_dec_bit]
    # 编码器编码
    y_out_dec_bit_2d = np.array([arr.reshape(64, 64, 1) for arr in y_out_dec_bit_2d])
    y_out = encoder.predict(y_out_dec_bit_2d)

    # 1. X，尺寸(232,10,16,16,1)，删除10维度的第一项，变为X_delete，尺寸(232,9,16,16,1)
    X_delete = X[:, 1:, :, :, :]  # 删除第1个维度（索引1）的第一项
    print(f"X_delete shape: {X_delete.shape}")
    # 2. X_delete，尺寸(232,9,16,16,1)，9维度后面连上y_out，尺寸(232,16,16,1)，变为X_tmp，尺寸(232,10,16,16,1)
    # 首先需要将y_out的维度扩展为(232, 1, 16, 16, 1)以便拼接
    y_out_expanded = y_out[:, np.newaxis, :, :, :]  # 扩展维度
    X_tmp = np.concatenate([X_delete, y_out_expanded], axis=1)  # 在第1个维度拼接
    print(f"X_tmp shape: {X_tmp.shape}")
    # 3. X_tmp，尺寸(232,10,16,16,1)，删除232维度第一项，变成X_new，尺寸(231,10,16,16,1)
    X_new = X_tmp[1:, :, :, :, :]  # 删除第0个维度（索引0）的第一项
    print(f"X_new shape: {X_new.shape}")
    # 4. y_real，尺寸(232,16,16,1)，删除232维度第一项，变成y_real_new，尺寸(231,16,16,1)
    y_real_new = y_real[1:, :, :, :]  # 删除第0个维度的第一项
    print(f"y_real_new shape: {y_real_new.shape}")
    # 5. y_real_phaseBit，尺寸(232,64,64)，删除232维度第一项，变成y_real_phaseBit_new，尺寸(231,64,64)
    y_real_phaseBit_new = y_real_phaseBit[1:, :, :]  # 删除第0个维度的第一项
    print(f"y_real_phaseBit_new shape: {y_real_phaseBit_new.shape}")
    # 6. y_real_trace，尺寸(232,2)，删除232维度第一项，变成y_real_trace_new，尺寸(231,2)
    y_real_trace_new = y_real_trace[1:, :]  # 删除第0个维度的第一项
    print(f"y_real_trace_new shape: {y_real_trace_new.shape}")
    return X_new, y_real_new, y_real_phaseBit_new, y_real_trace_new


def main_seq_repeat(path_dir, bit_num, mode, ae_enc_path, ae_dec_path,
                    theta_start, theta_end, phi_start, phi_end):
    # 读取编码器解码器
    encoder, decoder = load_models(ae_enc_path, ae_dec_path)

    # 获取数据
    if mode == 1:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 2:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 3:
        # path_dataset = "./files/archive/uav_dataset_thph_interp.csv"           # 读取较细的插值结果
        # key_th, key_ph = 'th_smooth', 'ph_smooth'
        path_dataset = "./files/archive/uav_dataset_interpl_2025-10-21.csv"    # 读取动态t三条插值插10倍的结果
        key_th, key_ph = 'theta_smooth', 'phi_smooth'
        logger.info(f"mode=3, path_dataset: {path_dataset}, key_th:{key_th}, key_ph:{key_ph}")
        traces, phaseRads, phaseBits = get_traces_phaseBits_by_uav_dataset(bit_num, path_dataset, key_th, key_ph)
    else:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(64, 64, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值
    # 编码器压缩
    X_encoded = encoder.predict(X)

    # 数据预处理
    sequence_length_input = 10  # 输入序列长度
    sequence_length_output = 5  # 输出序列长度
    X_enc, y_enc, y_phaseBit, y_trace = prepare_sequences(phaseBits, X_encoded, traces, sequence_length_input)

    # 划分训练集和测试集
    split = int(0.8 * len(X_enc))
    X_enc_train, X_enc_test = X_enc[:split], X_enc[split:]
    y_enc_train, y_enc_test = y_enc[:split], y_enc[split:]
    y_phaseBit_train, y_phaseBit_test = y_phaseBit[:split], y_phaseBit[split:]
    y_trace_train, y_trace_test = y_trace[:split], y_trace[split:]

    # 检查形状
    print("y_train shape:", y_enc_train.shape)
    print("X_train shape:", X_enc_train.shape)

    # 构建模型
    input_shape = (sequence_length_input, 16, 16, 1)  # 修改为 (10, 16, 16, 1)
    # model = build_lstm_model(input_shape)
    # model = build_cnn_lstm_model(input_shape)
    # model = build_cnn_blstm_model(input_shape)
    # model = build_convlstm_model(input_shape)                   # 当前最佳
    # model = build_convlstm_attention_model(input_shape)       # 结果不如 build_convlstm_model()
    # model.summary()

    # 训练模型
    # history = train_model(model, X_enc_train, y_enc_train, path_dir+'/best_lstm_model.h5', epochs=100, batch_size=32)

    # model = build_improved_convlstm_model(input_shape)
    model = build_residual_convlstm_model(input_shape)
    model.summary()
    history = train_improved_model(model, X_enc_train, y_enc_train,
                                   path_dir + '/best_lstm_model.h5', epochs=100, batch_size=32)

    # 在训练集和测试集上都进行预测
    datasets = {
        'train': {'X_enc': X_enc_train, 'y_enc': y_enc_train, 'y_phaseBit': y_phaseBit_train, 'y_trace': y_trace_train},
        'test': {'X_enc': X_enc_test, 'y_enc': y_enc_test, 'y_phaseBit': y_phaseBit_test, 'y_trace': y_trace_test}
    }
    all_accuracies = {
        0: {}, 1: {}, 2: {}, 3: {}, 4: {}
    }

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        X_enc = dataset['X_enc']
        y_enc = dataset['y_enc']
        y_phaseBit = dataset['y_phaseBit']
        y_trace = dataset['y_trace']

        # 预测t=t+0
        y_out_t0, stats_t0 = predict_t_item(path_dir + f'/dataset_{name}' + "/compare/0/", name, model, decoder,
                                            X_enc, y_enc, y_phaseBit, y_trace)
        all_accuracies[0][name] = stats_t0   # 保存统计信息

        # 预测t=t+1
        X_t1, y_real_t1, y_real_phaseBit_t1, y_real_trace_t1 \
            = format_predict_t_item(encoder, decoder, X_enc, y_enc, y_phaseBit, y_trace, y_out_t0)
        y_out_t1, stats_t1 \
            = predict_t_item(path_dir + f'/dataset_{name}' + "/compare/1/", name, model, decoder,
                             X_t1, y_real_t1, y_real_phaseBit_t1, y_real_trace_t1)
        all_accuracies[1][name] = stats_t1   # 保存统计信息

        # 预测t=t+2
        X_t2, y_real_t2, y_real_phaseBit_t2, y_real_trace_t2 \
            = format_predict_t_item(encoder, decoder, X_t1, y_real_t1, y_real_phaseBit_t1, y_real_trace_t1, y_out_t1)
        y_out_t2, stats_t2 \
            = predict_t_item(path_dir + f'/dataset_{name}' + "/compare/2/", name, model, decoder,
                             X_t2, y_real_t2, y_real_phaseBit_t2, y_real_trace_t2)
        all_accuracies[2][name] = stats_t2  # 保存统计信息

        # 预测t=t+3
        X_t3, y_real_t3, y_real_phaseBit_t3, y_real_trace_t3 \
            = format_predict_t_item(encoder, decoder, X_t2, y_real_t2, y_real_phaseBit_t2, y_real_trace_t2, y_out_t2)
        y_out_t3, stats_t3 \
            = predict_t_item(path_dir + f'/dataset_{name}' + "/compare/3/", name, model, decoder,
                             X_t3, y_real_t3, y_real_phaseBit_t3, y_real_trace_t3)
        all_accuracies[3][name] = stats_t3  # 保存统计信息

        # 预测t=t+3
        X_t4, y_real_t4, y_real_phaseBit_t4, y_real_trace_t4 \
            = format_predict_t_item(encoder, decoder, X_t3, y_real_t3, y_real_phaseBit_t3, y_real_trace_t3, y_out_t3)
        y_out_t4, stats_t4 \
            = predict_t_item(path_dir + f'/dataset_{name}' + "/compare/4/", name, model, decoder,
                             X_t4, y_real_t4, y_real_phaseBit_t4, y_real_trace_t4)
        all_accuracies[4][name] = stats_t4  # 保存统计信息

    # 保存统计信息
    for i in range(sequence_length_output):
        save_statistics({
            'train_max': all_accuracies[i]['train']['max'],
            'train_min': all_accuracies[i]['train']['min'],
            'train_mean': all_accuracies[i]['train']['mean'],
            'train_std': all_accuracies[i]['train']['std'],
            'test_max': all_accuracies[i]['test']['max'],
            'test_min': all_accuracies[i]['test']['min'],
            'test_mean': all_accuracies[i]['test']['mean'],
            'test_std': all_accuracies[i]['test']['std']
        }, path_dir + "/result_" + str(i) + ".txt")

    # 保存模型
    model.save(path_dir+'/lstm_phaseBits_predictor.h5')


if __name__ == "__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90)",
                        help="Base directory path (default: ../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90))")
    parser.add_argument("--ae_enc_path", type=str,
                        default="./files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5",
                        help="autocoder encoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5")
    parser.add_argument("--ae_dec_path", type=str,
                        default="./files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5",
                        help="autocoder decoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5")
    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--mode", type=int, default=2, help="1: by theta, 2: by phi (default: 2)")
    parser.add_argument("--theta_start", type=int, default=1, help="theta_start (default: 1)")
    parser.add_argument("--theta_end", type=int, default=60, help="theta_end (default: 60)")
    parser.add_argument("--phi_start", type=int, default=0, help="phi_start (default: 0)")
    parser.add_argument("--phi_end", type=int, default=360, help="phi_end (default: 360)")

    args = parser.parse_args()

    base_path = args.base_path
    ae_enc_path, ae_dec_path = args.ae_enc_path, args.ae_dec_path
    mode = args.mode
    bit_num = args.bit_num
    theta_start, theta_end, phi_start, phi_end = args.theta_start, args.theta_end, args.phi_start, args.phi_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"ae_enc_path: {ae_enc_path}, ae_dec_path: {ae_dec_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")

    main_seq_repeat(path_dir=base_path, bit_num=bit_num, mode=mode,
                    ae_enc_path=ae_enc_path, ae_dec_path=ae_dec_path,
                    theta_start=theta_start, theta_end=theta_end, phi_start=phi_start, phi_end=phi_end)
