import argparse
import csv
import pandas as pd
import numpy as np
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import ConvLSTM2D, BatchNormalization, Conv2D, Attention, Input, RepeatVector, TimeDistributed
from tensorflow.keras.layers import MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, Dropout
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_2x2, plot_images_2x2, save_images_3x2, plot_images_3x2


import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


# ============================================= 损失函数相关 =======================================
from tensorflow.image import ssim
import tensorflow as tf

def ssim_loss(y_true, y_pred):
    """
    结构相似性损失（SSIM Loss）
    特点：衡量输入和输出之间的结构相似性，而不仅仅是像素级差异。
    优点：能够更好地保留图像的结构信息，重建结果更符合人类视觉感知。
    缺点：计算复杂度较高。
    适用场景：如果你希望重建结果在结构上与原始数据更相似。
    """
    return 1 - tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))


def mixed_loss(y_true, y_pred):
    """
    混合损失（MSE + SSIM）
    特点：结合MSE和SSIM的优点，既能保留细节，又能保持结构相似性。
    适用场景：如果你希望重建结果在细节和结构上都与原始数据接近。
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))
    ssim_val = tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))
    return mse + (1 - ssim_val)


def smooth_loss(y_true, y_pred):
    """
    自定义损失函数，加入平滑性约束。
    :param y_true: 真实值
    :param y_pred: 预测值
    :return: 损失值
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))  # 均方误差
    # 计算二阶导数（平滑性约束）
    dy_true = y_true[:, 1:] - y_true[:, :-1]
    dy_pred = y_pred[:, 1:] - y_pred[:, :-1]
    d2y_true = dy_true[:, 1:] - dy_true[:, :-1]
    d2y_pred = dy_pred[:, 1:] - dy_pred[:, :-1]
    smoothness = tf.reduce_mean(tf.square(d2y_true - d2y_pred))
    return mse + 0.1 * smoothness  # 调整平滑性权重


# ============================================= 读取自编码器相关 =======================================
def load_models(path_encoder, path_decoder):
    encoder = load_model(path_encoder)
    decoder = load_model(path_decoder)
    return encoder, decoder

# ============================================= LSTM相关 =======================================
def prepare_sequences(phaseBits, phaseBitEncs, traces, sequence_length_input=10, sequence_length_output=5):
    """
    将码阵序列转换为LSTM输入格式。
    :param phaseBits: 码阵列表-原始码阵，每个码阵为16x16的二维数组
    :param phaseBitEncs: 码阵列表-编码后，编码之后的序列，每个码阵为16x16的二维数组
    :param traces: 实际指向角列表，用于记录生成对比图
    :param sequence_length_input: 输入序列长度
    :param sequence_length_output: 输出序列长度
    :return: X_enc (输入序列-编码后), y_enc (目标码阵-编码后), y_phaseBit (目标码阵-原始码阵)
    """
    X_enc, y_enc, y_phaseBit, y_trace = [], [], [], []
    for i in range(len(phaseBits) - sequence_length_input - sequence_length_output + 1):
        # 输入序列-编码后
        X_enc.append(phaseBitEncs[i:i + sequence_length_input])
        # 目标码阵序列-编码后
        y_enc.append(phaseBitEncs[i + sequence_length_input:i + sequence_length_input + sequence_length_output])
        # 目标码阵序列-原始码阵
        y_phaseBit.append(phaseBits[i + sequence_length_input:i + sequence_length_input + sequence_length_output])
        # 目标码阵序列-实际波束指向
        y_trace.append(traces[i + sequence_length_input:i + sequence_length_input + sequence_length_output])
    return np.array(X_enc), np.array(y_enc), np.array(y_phaseBit), np.array(y_trace)


def build_lstm_model(input_shape):
    """
    构建LSTM模型，用于预测下一个16x16的编码数据。
    :param input_shape: 输入形状 (sequence_length, 16, 16)
    :return: LSTM模型
    """
    model = Sequential([
        # LSTM层
        TimeDistributed(Dense(64, activation='relu'), input_shape=input_shape),  # 对每个时间步的16x16数据进行全连接
        LSTM(128, return_sequences=False),  # 不返回序列
        # 全连接层
        Dense(16 * 16, activation='relu'),  # 输出16x16的数据
        Reshape((16, 16))  # 将输出reshape为16x16
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


def build_convlstm_model(input_shape, sequence_length_output=5, nrow=64):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length_input, 16, 16, 1)
    :param sequence_length_output: 预测序列长度
    :return: ConvLSTM模型
    """
    model = Sequential([
        # ConvLSTM层 - 处理输入序列
        ConvLSTM2D(filters=nrow, kernel_size=(3, 3), padding='same', return_sequences=False, input_shape=input_shape),
        BatchNormalization(),
        # 将4D张量重塑为2D，以便RepeatVector层可以处理
        Reshape((nrow * 16 * 16,)),  # 展平为一维向量
        # 重复向量层 - 将编码后的表示重复prediction_length次
        RepeatVector(sequence_length_output),
        # 重塑回4D张量以适应后续的TimeDistributed层
        Reshape((sequence_length_output, 16, 16, nrow)),
        # 时间分布的卷积层 - 为每个时间步生成最终输出
        TimeDistributed(Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='relu'))
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    return model


def build_convlstm_attention_model(input_shape):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: ConvLSTM模型
    """
    # 定义输入
    inputs = Input(shape=input_shape)
    # 第一层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=True)(inputs)
    x = BatchNormalization()(x)
    # 第二层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=False)(x)
    x = BatchNormalization()(x)
    # 注意力机制
    x = Attention()([x, x])  # 使用函数式 API 定义 Attention
    # 将输出转换为 (16, 16, 1)
    outputs = Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='linear')(x)
    # 构建模型
    model = Model(inputs, outputs)
    # 编译模型
    # model.compile(optimizer=Adam(learning_rate=0.001),
    #               loss=smooth_loss,  # 使用自定义损失函数
    #               metrics=['accuracy'])
    model.compile(optimizer=RMSprop(learning_rate=1e-4),
                  loss=mixed_loss,
                  metrics=['accuracy'])
    return model


def build_cnn_lstm_model(input_shape):
    """
    构建CNN + LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # CNN层
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),  # 池化层，减少序列长度
        TimeDistributed(Dropout(0.3)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # LSTM层
        LSTM(128, return_sequences=False),  # 不返回序列
        Dropout(0.3),
        # 全连接层
        Dense(64, activation='relu'),
        Dropout(0.3),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # 编译模型
    optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


def build_cnn_blstm_model(input_shape):
    """
    构建改进的CNN + 双向LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # 输入形状: (batch_size, sequence_length, 16, 16, 1)
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),
        TimeDistributed(Dropout(0.5)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # 双向 LSTM
        Bidirectional(LSTM(128, return_sequences=False)),  # 使用双向 LSTM
        Dropout(0.5),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    # model.compile(optimizer=optimizer,
    #               loss='mean_squared_error',  # 回归任务
    #               metrics=['accuracy'])
    model.compile(optimizer='adam', loss=mixed_loss)  # 如果你希望重建结果在细节和结构上都与原始数据接近
    return model




def train_model(model, X_train, y_train, path_model_checkpoint, epochs=100, batch_size=32):
    """
    训练模型。
    :param model: LSTM模型
    :param X_train: 训练输入
    :param y_train: 训练目标
    :param epochs: 训练轮数
    :param batch_size: 批次大小
    :return: 训练历史
    """
    callbacks = [
        EarlyStopping(patience=10, monitor='val_loss', restore_best_weights=True),
        ModelCheckpoint(path_model_checkpoint, save_best_only=True),
        ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-6)  # 学习率调度器
    ]
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1
    )
    return history


# ============================================= 结果相关 =======================================
def save_results_seq(base_dir, seq_idx,
                     y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays, bit_num, name):
    dir_compare = base_dir + "/compare/" + str(seq_idx) + "/"
    total_correct = 0
    total_elements = 0
    accuracies = []
    res = []
    for idx, (y_enc, y_out, y_phaseBit, y_trace) \
            in enumerate(zip(y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays)):
        # 计算准确率
        correct = np.sum(y_phaseBit == y_out)
        total_correct += correct
        total_elements += y_phaseBit.size

        accuracy = correct / y_phaseBit.size
        accuracies.append(accuracy)

        if idx % 1 == 0:
            # 计算方向图, 指向, PSLL
            pattern_enc, point_enc, psll_enc = phaseBit_2_pattern_point(y_enc, bit_num)
            pattern_out, point_out, psll_out = phaseBit_2_pattern_point(y_out, bit_num)
            pattern_phaseBit, point_phaseBit, psll_phaseBit = phaseBit_2_pattern_point(y_phaseBit, bit_num)

            # 保存比较图片
            # plot_images_3x2(y_phaseBit, y_enc, y_out,
            #                 pattern_phaseBit, pattern_enc, pattern_out,
            #                 f"Accuracy = {accuracy:.2%}, "
            #                 f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}], "
            #                 f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}], "
            #                 f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]")
            save_images_3x2(dir_compare + str(idx) + ".jpg",
                            y_phaseBit, f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}]",
                            y_enc, f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}]",
                            y_out, f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]",
                            pattern_phaseBit, "pattern(real)",
                            pattern_enc, "pattern(enc)",
                            pattern_out, "pattern(out)",
                            f"trace = {y_trace}, Accuracy = {accuracy:.2%}")

            # 保存idx, psll, point, accuracy
            res.append([str(idx),
                        point_phaseBit[1], point_enc[1], point_out[1],
                        psll_phaseBit[0], psll_enc[0], psll_out[0],
                        psll_phaseBit[1], psll_enc[1], psll_out[1],
                        accuracy,
                        y_trace, y_trace[0], y_trace[1]])

            if name == "test":
                # 将测试集解码结果转换为整数类型并保存
                np.savetxt(os.path.join(dir_compare, f'y_out_{idx}.csv'), y_out, delimiter=',')

        # 保存.csv
        # np.savetxt(os.path.join(base_dir, f'y_real_{idx}.csv'), y_real, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'y_out_{idx}.csv'), y_out, delimiter=',')

    # 保存psll和point信息
    # 将traces转换为DataFrame
    df = pd.DataFrame(res, columns=['idx', 'point_ori', 'point_enc', 'point_out', 'psll_ori', 'psll_enc', 'psll_out',
                                    'pos_sl_ori', 'pos_sl_enc', 'pos_sl_out', 'accuracy',
                                    'trace', 'trace_th_smooth', 'trace_ph_smooth'])
    # 保存到新的CSV文件
    df.to_csv(os.path.join(dir_compare, f'y_out_psll_point.csv'), index=False)

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


# ============================================= 主流程相关 =======================================
def get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits

def get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for theta in range(theta_start, theta_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_phi(theta=theta, phi_start=phi_start, phi_end=phi_end)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def read_csv_to_traces(file_path, key_th='th_smooth', key_ph='ph_smooth'):
    traces = []
    with open(file_path, mode='r', newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            th_smooth = float(row[key_th])
            ph_smooth = float(row[key_ph])
            traces.append([th_smooth, ph_smooth])
    return traces


def get_traces_phaseBits_by_uav_dataset(bit_num, path_csv, key_th='th_smooth', key_ph='ph_smooth'):
    """ 获取波束指向和对应码阵 """
    logger.info(f"path_csv={path_csv}")
    # 读取数据集中轨迹指向
    traces = read_csv_to_traces(path_csv, key_th, key_ph)
    traces = traces[:3000]
    # 生成码阵
    phaseRads = traces_2_phaseRads(traces)
    # 码阵bit化
    phaseBits = phases_rad_2_bit(phaseRads, bit_num)
    return traces, phaseRads, phaseBits


def main(path_dir, bit_num, mode, ae_enc_path, ae_dec_path,
         theta_start, theta_end, phi_start, phi_end):
    # 读取编码器解码器
    encoder, decoder = load_models(ae_enc_path, ae_dec_path)

    # 获取数据
    if mode == 1:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 2:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 3:
        # path_dataset = "./files/archive/uav_dataset_thph_interp.csv"           # 读取较细的插值结果
        # key_th, key_ph = 'th_smooth', 'ph_smooth'
        path_dataset = "./files/archive/uav_dataset_interpl_2025-11-11.csv"    # 读取动态t三条插值插50倍的结果
        key_th, key_ph = 'theta_smooth', 'phi_smooth'
        logger.info(f"mode=3, path_dataset: {path_dataset}, key_th:{key_th}, key_ph:{key_ph}")
        traces, phaseRads, phaseBits = get_traces_phaseBits_by_uav_dataset(bit_num, path_dataset, key_th, key_ph)
    else:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)

    nRow = 48  # x方向单元个数，θ方向
    mCol = 48  # y方向单元个数，φ方向

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(nRow, mCol, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值
    # 编码器压缩
    X_encoded = encoder.predict(X)

    # 数据预处理
    sequence_length_input = 10  # 输入序列长度
    sequence_length_output = 5  # 输出序列长度
    X_enc, y_enc, y_phaseBit, y_trace = prepare_sequences(phaseBits, X_encoded, traces,
                                                          sequence_length_input, sequence_length_output)

    # 划分训练集和测试集
    split = int(0.8 * len(X_enc))
    X_enc_train, X_enc_test = X_enc[:split], X_enc[split:]
    y_enc_train, y_enc_test = y_enc[:split], y_enc[split:]
    y_phaseBit_train, y_phaseBit_test = y_phaseBit[:split], y_phaseBit[split:]
    y_trace_train, y_trace_test = y_trace[:split], y_trace[split:]

    # 检查形状
    print("y_train shape:", y_enc_train.shape)
    print("X_train shape:", X_enc_train.shape)

    # 构建模型
    input_shape = (sequence_length_input, 16, 16, 1)  # 修改为 (10, 16, 16, 1)
    # model = build_lstm_model(input_shape)
    # model = build_lstms_model(input_shape)
    # model = build_cnn_lstm_model(input_shape)
    # model = build_cnn_blstm_model(input_shape)
    model = build_convlstm_model(input_shape, sequence_length_output, nRow)
    # model = build_convlstm_attention_model(input_shape)
    model.summary()

    # 训练模型
    history = train_model(model, X_enc_train, y_enc_train, path_dir+'/best_lstm_model.h5', epochs=100, batch_size=32)

    # 在训练集和测试集上都进行预测
    datasets = {
        'train': {'X_enc': X_enc_train, 'y_enc': y_enc_train, 'y_phaseBit': y_phaseBit_train, 'y_trace': y_trace_train},
        'test': {'X_enc': X_enc_test, 'y_enc': y_enc_test, 'y_phaseBit': y_phaseBit_test, 'y_trace': y_trace_test}
    }
    all_accuracies = {
        0: {}, 1: {}, 2: {}, 3: {}, 4: {}
    }

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        X_enc = dataset['X_enc']
        y_enc = dataset['y_enc']
        y_phaseBit = dataset['y_phaseBit']
        y_trace = dataset['y_trace']

        # 获取LSTM模型预测结果
        y_out = model.predict(X_enc)

        # todo: 以下过程需要改为列表
        y_out_seq_dec = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        y_enc_seq_dec = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        y_phase_seq = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        y_trace_seq = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        y_out_seq_dec_bit_2d = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        y_enc_seq_dec_bit_2d = {
            0: [], 1: [], 2: [], 3: [], 4: []
        }
        # 编码结果解压
        for i in range(len(y_out)):
            y_phaseBit_item, y_trace_item = y_phaseBit[i], y_trace[i]
            y_out_item = y_out[i]
            y_enc_item = y_enc[i]
            # 解码器解压
            y_out_dec_item = decoder.predict(y_out_item)
            y_enc_dec_item = decoder.predict(y_enc_item)
            # 记录到按 t+i 的分组
            for j in range(sequence_length_output):
                y_out_seq_dec[j].append(y_out_dec_item[j])
                y_enc_seq_dec[j].append(y_enc_dec_item[j])
                y_phase_seq[j].append(y_phaseBit_item[j])
                y_trace_seq[j].append(y_trace_item[j])

        # 记录到按 t+i 的分组
        for i in range(sequence_length_output):
            # 将概率转换为0/1二进制值
            y_out_seq_dec_item = np.array(y_out_seq_dec[i])
            y_enc_seq_dec_item = np.array(y_enc_seq_dec[i])
            y_out_seq_dec_bit_item = (y_out_seq_dec_item > 0.5).astype(np.int32)
            y_enc_seq_dec_bit_item = (y_enc_seq_dec_item > 0.5).astype(np.int32)
            # 将解码结果还原为原始形状
            y_out_seq_dec_bit_2d_item = [arr.reshape(nRow, mCol) for arr in y_out_seq_dec_bit_item]
            y_enc_seq_dec_bit_2d_item = [arr.reshape(nRow, mCol) for arr in y_enc_seq_dec_bit_item]
            # 记录结果
            y_out_seq_dec_bit_2d[i] = y_out_seq_dec_bit_2d_item
            y_enc_seq_dec_bit_2d[i] = y_enc_seq_dec_bit_2d_item

        # 保存各 t+i 结果并获取统计信息
        for i in range(sequence_length_output):
            stats = save_results_seq(path_dir + f'/dataset_{name}', i,
                                 y_enc_seq_dec_bit_2d[i], y_out_seq_dec_bit_2d[i], y_phase_seq[i], y_trace_seq[i],
                                 bit_num, name)
            all_accuracies[i][name] = stats

    # 保存统计信息
    for i in range(sequence_length_output):
        save_statistics({
            'train_max': all_accuracies[i]['train']['max'],
            'train_min': all_accuracies[i]['train']['min'],
            'train_mean': all_accuracies[i]['train']['mean'],
            'train_std': all_accuracies[i]['train']['std'],
            'test_max': all_accuracies[i]['test']['max'],
            'test_min': all_accuracies[i]['test']['min'],
            'test_mean': all_accuracies[i]['test']['mean'],
            'test_std': all_accuracies[i]['test']['std']
        }, path_dir + "/result_" + str(i) + ".txt")

    # 保存模型
    model.save(path_dir+'/lstm_phaseBits_predictor.h5')



if __name__ == "__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90)",
                        help="Base directory path (default: ../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90))")
    # 初始可用的一套数据
    # parser.add_argument("--ae_enc_path", type=str,
    #                     default="./files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5",
    #                     help="autocoder encoder path. "
    #                          "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5")
    # parser.add_argument("--ae_dec_path", type=str,
    #                     default="./files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5",
    #                     help="autocoder decoder path. "
    #                          "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5")
    # RES-Conv2D 64x64 编解码器
    # parser.add_argument("--ae_enc_path", type=str,
    #                     default="./files/feature/[enc2d]cnn2d-attention-res/[ae-2d-cnn-res][loss-3part]64x64-2025-11-17/encoder.h5",
    #                     help="autocoder encoder path. "
    #                          "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5")
    # parser.add_argument("--ae_dec_path", type=str,
    #                     default="./files/feature/[enc2d]cnn2d-attention-res/[ae-2d-cnn-res][loss-3part]64x64-2025-11-17/decoder.h5",
    #                     help="autocoder decoder path. "
    #                          "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5")
    # RES-Conv2D 48x48 编解码器
    parser.add_argument("--ae_enc_path", type=str,
                        default="./files/feature/[enc2d]cnn2d-attention-res/[ae-2d-cnn-res][loss-3part]48x48-2025-11-14/encoder.h5",
                        help="autocoder encoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5")
    parser.add_argument("--ae_dec_path", type=str,
                        default="./files/feature/[enc2d]cnn2d-attention-res/[ae-2d-cnn-res][loss-3part]48x48-2025-11-14/decoder.h5",
                        help="autocoder decoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5")
    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--mode", type=int, default=2, help="1: by theta, 2: by phi (default: 2)")
    parser.add_argument("--theta_start", type=int, default=1, help="theta_start (default: 1)")
    parser.add_argument("--theta_end", type=int, default=60, help="theta_end (default: 60)")
    parser.add_argument("--phi_start", type=int, default=0, help="phi_start (default: 0)")
    parser.add_argument("--phi_end", type=int, default=360, help="phi_end (default: 360)")

    args = parser.parse_args()

    base_path = args.base_path
    ae_enc_path, ae_dec_path = args.ae_enc_path, args.ae_dec_path
    mode = args.mode
    bit_num = args.bit_num
    theta_start, theta_end, phi_start, phi_end = args.theta_start, args.theta_end, args.phi_start, args.phi_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"ae_enc_path: {ae_enc_path}, ae_dec_path: {ae_dec_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")

    main(path_dir=base_path, bit_num=bit_num, mode=mode, ae_enc_path=ae_enc_path, ae_dec_path=ae_dec_path,
         theta_start=theta_start, theta_end=theta_end, phi_start=phi_start, phi_end=phi_end)
