import argparse
import csv
import pandas as pd
import numpy as np
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, LSTM, Dense, Dropout, Flatten, Bidirectional
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_2x2, plot_images_2x2, save_images_3x2, plot_images_3x2


import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


# ============================================= 损失函数相关 =======================================
from tensorflow.image import ssim
import tensorflow as tf

def ssim_loss(y_true, y_pred):
    """
    结构相似性损失（SSIM Loss）
    特点：衡量输入和输出之间的结构相似性，而不仅仅是像素级差异。
    优点：能够更好地保留图像的结构信息，重建结果更符合人类视觉感知。
    缺点：计算复杂度较高。
    适用场景：如果你希望重建结果在结构上与原始数据更相似。
    """
    return 1 - tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))


def mixed_loss(y_true, y_pred):
    """
    混合损失（MSE + SSIM）
    特点：结合MSE和SSIM的优点，既能保留细节，又能保持结构相似性。
    适用场景：如果你希望重建结果在细节和结构上都与原始数据接近。
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))
    ssim_val = tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))
    return mse + (1 - ssim_val)


def smooth_loss(y_true, y_pred):
    """
    自定义损失函数，加入平滑性约束。
    :param y_true: 真实值
    :param y_pred: 预测值
    :return: 损失值
    """
    mse = tf.reduce_mean(tf.square(y_true - y_pred))  # 均方误差
    # 计算二阶导数（平滑性约束）
    dy_true = y_true[:, 1:] - y_true[:, :-1]
    dy_pred = y_pred[:, 1:] - y_pred[:, :-1]
    d2y_true = dy_true[:, 1:] - dy_true[:, :-1]
    d2y_pred = dy_pred[:, 1:] - dy_pred[:, :-1]
    smoothness = tf.reduce_mean(tf.square(d2y_true - d2y_pred))
    return mse + 0.1 * smoothness  # 调整平滑性权重


# ============================================= 读取自编码器相关 =======================================
def load_models(path_encoder, path_decoder):
    encoder = load_model(path_encoder)
    decoder = load_model(path_decoder)
    return encoder, decoder

# ============================================= LSTM相关 =======================================
def prepare_sequences(phaseBits, phaseBitEncs, traces, sequence_length=10):
    """
    将码阵序列转换为LSTM输入格式。
    :param phaseBits: 码阵列表-原始码阵，每个码阵为16x16的二维数组
    :param phaseBitEncs: 码阵列表-编码后，编码之后的序列，每个码阵为16x16的二维数组
    :param traces: 实际指向角列表，用于记录生成对比图
    :param sequence_length: 历史序列长度
    :return: X_enc (输入序列-编码后), y_enc (目标码阵-编码后), y_phaseBit (目标码阵-原始码阵)
    """
    X_enc, y_enc, y_phaseBit, y_trace = [], [], [], []
    for i in range(len(phaseBits) - sequence_length):
        # 输入序列-编码后
        X_enc.append(phaseBitEncs[i:i + sequence_length])
        # 目标码阵-编码后
        y_enc.append(phaseBitEncs[i + sequence_length])
        # 目标码阵-原始码阵
        y_phaseBit.append(phaseBits[i + sequence_length])
        # 目标码阵-实际波束指向
        y_trace.append(traces[i + sequence_length])
    return np.array(X_enc), np.array(y_enc), np.array(y_phaseBit), np.array(y_trace)


def build_lstm_model(input_shape):
    """
    构建LSTM模型，用于预测下一个16x16的编码数据。
    :param input_shape: 输入形状 (sequence_length, 16, 16)
    :return: LSTM模型
    """
    model = Sequential([
        # LSTM层
        TimeDistributed(Dense(64, activation='relu'), input_shape=input_shape),  # 对每个时间步的16x16数据进行全连接
        LSTM(128, return_sequences=False),  # 不返回序列
        # 全连接层
        Dense(16 * 16, activation='relu'),  # 输出16x16的数据
        Reshape((16, 16))  # 将输出reshape为16x16
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


from tensorflow.keras.layers import ConvLSTM2D, BatchNormalization, Dense, Reshape, Flatten, Conv2D

def build_convlstm_model(input_shape):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: ConvLSTM模型
    """
    model = Sequential([
        # ConvLSTM层
        ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', return_sequences=False, input_shape=input_shape),
        BatchNormalization(),
        # 将输出转换为 (16, 16, 1)
        Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='relu')
    ])
    # 编译模型
    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    return model


from tensorflow.keras.layers import ConvLSTM2D, BatchNormalization, Conv2D, Attention, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import RMSprop


def build_convlstm_attention_model(input_shape):
    """
    构建ConvLSTM模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: ConvLSTM模型
    """
    # 定义输入
    inputs = Input(shape=input_shape)
    # 第一层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=True)(inputs)
    x = BatchNormalization()(x)
    # 第二层 ConvLSTM
    x = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', return_sequences=False)(x)
    x = BatchNormalization()(x)
    # 注意力机制
    x = Attention()([x, x])  # 使用函数式 API 定义 Attention
    # 将输出转换为 (16, 16, 1)
    outputs = Conv2D(filters=1, kernel_size=(1, 1), padding='same', activation='linear')(x)
    # 构建模型
    model = Model(inputs, outputs)
    # 编译模型
    # model.compile(optimizer=Adam(learning_rate=0.001),
    #               loss=smooth_loss,  # 使用自定义损失函数
    #               metrics=['accuracy'])
    model.compile(optimizer=RMSprop(learning_rate=1e-4),
                  loss=mixed_loss,
                  metrics=['accuracy'])
    return model


from tensorflow.keras.layers import TimeDistributed, Conv2D, MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, Dropout


def build_cnn_lstm_model(input_shape):
    """
    构建CNN + LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # CNN层
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),  # 池化层，减少序列长度
        TimeDistributed(Dropout(0.3)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # LSTM层
        LSTM(128, return_sequences=False),  # 不返回序列
        Dropout(0.3),
        # 全连接层
        Dense(64, activation='relu'),
        Dropout(0.3),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # 编译模型
    optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',  # 回归任务
                  metrics=['accuracy'])
    return model


def build_cnn_blstm_model(input_shape):
    """
    构建改进的CNN + 双向LSTM混合模型。
    :param input_shape: 输入形状 (sequence_length, 16, 16, 1)
    :return: 混合模型
    """
    model = Sequential([
        # 输入形状: (batch_size, sequence_length, 16, 16, 1)
        TimeDistributed(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'), input_shape=input_shape),
        TimeDistributed(MaxPooling2D(pool_size=(2, 2))),
        TimeDistributed(Dropout(0.5)),
        # 将每个时间步的特征展平为一维
        TimeDistributed(Reshape((-1,))),  # 展平为 (batch_size, sequence_length, 64 * 8 * 8)
        # 双向 LSTM
        Bidirectional(LSTM(128, return_sequences=False)),  # 使用双向 LSTM
        Dropout(0.5),
        Dense(64, activation='relu'),
        Dropout(0.5),
        Dense(16 * 16),  # 输出 16x16 的一维数组
        Reshape((16, 16, 1))  # 调整为 (16, 16, 1)
    ])
    # optimizer = Adam(learning_rate=0.0001)  # 降低学习率
    # model.compile(optimizer=optimizer,
    #               loss='mean_squared_error',  # 回归任务
    #               metrics=['accuracy'])
    model.compile(optimizer='adam', loss=mixed_loss)  # 如果你希望重建结果在细节和结构上都与原始数据接近
    return model




def train_model(model, X_train, y_train, path_model_checkpoint, epochs=100, batch_size=32):
    """
    训练模型。
    :param model: LSTM模型
    :param X_train: 训练输入
    :param y_train: 训练目标
    :param epochs: 训练轮数
    :param batch_size: 批次大小
    :return: 训练历史
    """
    callbacks = [
        EarlyStopping(patience=10, monitor='val_loss', restore_best_weights=True),
        ModelCheckpoint(path_model_checkpoint, save_best_only=True),
        ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-6)  # 学习率调度器
    ]
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1
    )
    return history


# ============================================= 结果相关 =======================================
def save_results(base_dir, y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays, bit_num, name):
    dir_compare = base_dir + "/compare/"
    total_correct = 0
    total_elements = 0
    accuracies = []
    res = []
    for idx, (y_enc, y_out, y_phaseBit, y_trace) \
            in enumerate(zip(y_enc_arrays, y_out_arrays, y_phaseBit_arrays, y_trace_arrays)):
        # 计算准确率
        correct = np.sum(y_phaseBit == y_out)
        total_correct += correct
        total_elements += y_phaseBit.size

        accuracy = correct / y_phaseBit.size
        accuracies.append(accuracy)

        if idx % 1 == 0:
            # 计算方向图, 指向, PSLL
            pattern_enc, point_enc, psll_enc = phaseBit_2_pattern_point(y_enc, bit_num)
            pattern_out, point_out, psll_out = phaseBit_2_pattern_point(y_out, bit_num)
            pattern_phaseBit, point_phaseBit, psll_phaseBit = phaseBit_2_pattern_point(y_phaseBit, bit_num)

            # 保存比较图片
            # plot_images_3x2(y_phaseBit, y_enc, y_out,
            #                 pattern_phaseBit, pattern_enc, pattern_out,
            #                 f"Accuracy = {accuracy:.2%}, "
            #                 f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}], "
            #                 f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}], "
            #                 f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]")
            save_images_3x2(dir_compare + str(idx) + ".jpg",
                            y_phaseBit, f"phaseBit(real):[{point_phaseBit[1]}]-[{psll_phaseBit[0]:.2f},{psll_phaseBit[1]}]",
                            y_enc, f"phaseBit(enc):[{point_enc[1]}]-[{psll_enc[0]:.2f},{psll_enc[1]}]",
                            y_out, f"phaseBit(out):[{point_out[1]}]-[{psll_out[0]:.2f},{psll_out[1]}]",
                            pattern_phaseBit, "pattern(real)",
                            pattern_enc, "pattern(enc)",
                            pattern_out, "pattern(out)",
                            f"trace = {y_trace}, Accuracy = {accuracy:.2%}")

            # 保存idx, psll, point, accuracy
            res.append([str(idx),
                        point_phaseBit[1], point_enc[1], point_out[1],
                        psll_phaseBit[0], psll_enc[0], psll_out[0],
                        psll_phaseBit[1], psll_enc[1], psll_out[1],
                        accuracy,
                        y_trace, y_trace[0], y_trace[1]])

            if name == "test":
                # 将测试集解码结果转换为整数类型并保存
                np.savetxt(os.path.join(dir_compare, f'y_out_{idx}.csv'), y_out, delimiter=',')

        # 保存.csv
        # np.savetxt(os.path.join(base_dir, f'y_real_{idx}.csv'), y_real, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'y_out_{idx}.csv'), y_out, delimiter=',')

    # 保存psll和point信息
    # 将traces转换为DataFrame
    df = pd.DataFrame(res, columns=['idx', 'point_ori', 'point_enc', 'point_out', 'psll_ori', 'psll_enc', 'psll_out',
                                    'pos_sl_ori', 'pos_sl_enc', 'pos_sl_out', 'accuracy',
                                    'trace', 'trace_th_smooth', 'trace_ph_smooth'])
    # 保存到新的CSV文件
    df.to_csv(os.path.join(dir_compare, f'y_out_psll_point.csv'), index=False)

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


# ============================================= 主流程相关 =======================================
def get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits

def get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for theta in range(theta_start, theta_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_phi(theta=theta, phi_start=phi_start, phi_end=phi_end)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def read_csv_to_traces(file_path, key_th='th_smooth', key_ph='ph_smooth'):
    traces = []
    with open(file_path, mode='r', newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            th_smooth = float(row[key_th])
            ph_smooth = float(row[key_ph])
            traces.append([th_smooth, ph_smooth])
    return traces


def get_traces_phaseBits_by_uav_dataset(bit_num, path_csv, key_th='th_smooth', key_ph='ph_smooth'):
    """ 获取波束指向和对应码阵 """
    logger.info(f"path_csv={path_csv}")
    # 读取数据集中轨迹指向
    traces = read_csv_to_traces(path_csv, key_th, key_ph)
    traces = traces[:3000]
    # 生成码阵
    phaseRads = traces_2_phaseRads(traces)
    # 码阵bit化
    phaseBits = phases_rad_2_bit(phaseRads, bit_num)
    return traces, phaseRads, phaseBits


def main(path_dir, bit_num, mode, ae_enc_path, ae_dec_path,
         theta_start, theta_end, phi_start, phi_end):
    # 读取编码器解码器
    encoder, decoder = load_models(ae_enc_path, ae_dec_path)

    # 获取数据
    if mode == 1:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_theta(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 2:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)
    elif mode == 3:
        # path_dataset = "../files/archive/uav_dataset_thph_interp.csv"           # 读取较细的插值结果
        # key_th, key_ph = 'th_smooth', 'ph_smooth'
        path_dataset = "../files/archive/uav_dataset_interpl_2025-10-21.csv"    # 读取动态t三条插值插10倍的结果
        key_th, key_ph = 'theta_smooth', 'phi_smooth'
        logger.info(f"mode=3, path_dataset: {path_dataset}, key_th:{key_th}, key_ph:{key_ph}")
        traces, phaseRads, phaseBits = get_traces_phaseBits_by_uav_dataset(bit_num, path_dataset, key_th, key_ph)
    else:
        traces, phaseRads, phaseBits \
            = get_traces_phaseBits_by_phi(bit_num, theta_start, theta_end, phi_start, phi_end)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(64, 64, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值
    # 编码器压缩
    X_encoded = encoder.predict(X)

    # 数据预处理
    sequence_length = 10  # 历史序列长度
    X_enc, y_enc, y_phaseBit, y_trace = prepare_sequences(phaseBits, X_encoded, traces, sequence_length)

    # 划分训练集和测试集
    split = int(0.8 * len(X_enc))
    X_enc_train, X_enc_test = X_enc[:split], X_enc[split:]
    y_enc_train, y_enc_test = y_enc[:split], y_enc[split:]
    y_phaseBit_train, y_phaseBit_test = y_phaseBit[:split], y_phaseBit[split:]
    y_trace_train, y_trace_test = y_trace[:split], y_trace[split:]

    # 检查形状
    print("y_train shape:", y_enc_train.shape)
    print("X_train shape:", X_enc_train.shape)

    # 构建模型
    input_shape = (sequence_length, 16, 16, 1)  # 修改为 (10, 16, 16, 1)
    # model = build_lstm_model(input_shape)
    # model = build_lstms_model(input_shape)
    # model = build_cnn_lstm_model(input_shape)
    # model = build_cnn_blstm_model(input_shape)
    model = build_convlstm_model(input_shape)
    # model = build_convlstm_attention_model(input_shape)
    model.summary()

    # 训练模型
    history = train_model(model, X_enc_train, y_enc_train, path_dir+'/best_lstm_model.h5', epochs=50, batch_size=32)

    # 在训练集和测试集上都进行预测
    datasets = {
        'train': {'X_enc': X_enc_train, 'y_enc': y_enc_train, 'y_phaseBit': y_phaseBit_train, 'y_trace': y_trace_train},
        'test': {'X_enc': X_enc_test, 'y_enc': y_enc_test, 'y_phaseBit': y_phaseBit_test, 'y_trace': y_trace_test}
    }
    all_accuracies = {}

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        X_enc = dataset['X_enc']
        y_enc = dataset['y_enc']
        y_phaseBit = dataset['y_phaseBit']
        y_trace = dataset['y_trace']

        # 获取LSTM模型预测结果
        y_out = model.predict(X_enc)

        # 解码器解压
        y_out_dec = decoder.predict(y_out)
        y_enc_dec = decoder.predict(y_enc)
        # 将概率转换为0/1二进制值
        y_out_dec_bit = (y_out_dec > 0.5).astype(np.int32)
        y_enc_dec_bit = (y_enc_dec > 0.5).astype(np.int32)
        # 将解码结果还原为原始形状
        y_out_dec_bit_2d = [arr.reshape(64, 64) for arr in y_out_dec_bit]
        y_enc_dec_bit_2d = [arr.reshape(64, 64) for arr in y_enc_dec_bit]

        # 保存结果并获取统计信息
        stats = save_results(path_dir + f'/dataset_{name}', y_enc_dec_bit_2d, y_out_dec_bit_2d, y_phaseBit, y_trace,
                             bit_num, name)
        all_accuracies[name] = stats

    # 保存统计信息
    save_statistics({
        'train_max': all_accuracies['train']['max'],
        'train_min': all_accuracies['train']['min'],
        'train_mean': all_accuracies['train']['mean'],
        'train_std': all_accuracies['train']['std'],
        'test_max': all_accuracies['test']['max'],
        'test_min': all_accuracies['test']['min'],
        'test_mean': all_accuracies['test']['mean'],
        'test_std': all_accuracies['test']['std']
    }, path_dir + "/result.txt")

    # 保存模型
    model.save(path_dir+'/lstm_phaseBits_predictor.h5')


# ================================================= 测试代码 ======================================================
def test_autocoder():
    encoder, decoder = load_models("../files/dissertation/chapter_3/tf-ae-bit-cnn2d-cbam/encoder.h5",
                                   "../files/dissertation/chapter_3/tf-ae-bit-cnn2d-cbam/decoder.h5")
    bit_num = 1
    traces, phaseRads, phaseBits = get_traces_phaseBits_by_phi(bit_num=bit_num)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(64, 64, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值

    X = X[:5]
    X_enc = encoder.predict(X)
    # fixme: 这里加上LSTM
    X_dec = decoder.predict(X_enc)
    #
    # 将概率转换为0/1二进制值
    X_dec_bit = (X_dec > 0.5).astype(np.int32)
    # 将解码结果还原为原始形状
    phase_dec = [arr.reshape(64, 64) for arr in X_dec_bit]
    phase_enc = [arr.reshape(16, 16) for arr in X_enc]
    phase_ori = [arr.reshape(64, 64) for arr in X]
    #
    total_correct = 0
    total_elements = 0
    accuracies = []
    for idx, (y_real, x_enc, y_dec) in enumerate(zip(phase_ori, phase_enc, phase_dec)):
        # 计算准确率
        correct = np.sum(y_real == y_dec)
        total_correct += correct
        total_elements += y_real.size

        accuracy = correct / y_real.size
        accuracies.append(accuracy)

        pattern_real, point_real, psll_real = phaseBit_2_pattern_point(y_real, bit_num)
        pattern_dec, point_dec, psll_dec = phaseBit_2_pattern_point(y_dec, bit_num)

        # plot_images_2x2(y_real, y_dec, pattern_real, pattern_dec,
        #                 f"Accuracy = {accuracy:.2%}, "
        #                 f"phaseBit(real):[{point_real[1]}]-[{psll_real[0]:.2f},{psll_real[1]}], "
        #                 f"phaseBit(dec):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]")
        plot_images_3x2(y_real, x_enc, y_dec, pattern_real, x_enc, pattern_dec,
                        f"Accuracy = {accuracy:.2%}, "
                        f"phaseBit(real):[{point_real[1]}]-[{psll_real[0]:.2f},{psll_real[1]}], "
                        f"phaseBit(dec):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]")



if __name__ == "__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90)",
                        help="Base directory path (default: ../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90))")
    parser.add_argument("--ae_enc_path", type=str,
                        default="../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5",
                        help="autocoder encoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/encoder.h5")
    parser.add_argument("--ae_dec_path", type=str,
                        default="../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5",
                        help="autocoder decoder path. "
                             "default: ../files/feature/[enc2d]cnn2d-attention/[enc-16x16][loss-ML][dataset-theta(1,60)-phi(0,360)]/decoder.h5")
    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--mode", type=int, default=2, help="1: by theta, 2: by phi (default: 2)")
    parser.add_argument("--theta_start", type=int, default=1, help="theta_start (default: 1)")
    parser.add_argument("--theta_end", type=int, default=60, help="theta_end (default: 60)")
    parser.add_argument("--phi_start", type=int, default=0, help="phi_start (default: 0)")
    parser.add_argument("--phi_end", type=int, default=360, help="phi_end (default: 360)")

    args = parser.parse_args()

    base_path = args.base_path
    ae_enc_path, ae_dec_path = args.ae_enc_path, args.ae_dec_path
    mode = args.mode
    bit_num = args.bit_num
    theta_start, theta_end, phi_start, phi_end = args.theta_start, args.theta_end, args.phi_start, args.phi_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"ae_enc_path: {ae_enc_path}, ae_dec_path: {ae_dec_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")

    # test_autocoder()
    main(path_dir=base_path, bit_num=bit_num, mode=mode, ae_enc_path=ae_enc_path, ae_dec_path=ae_dec_path,
         theta_start=theta_start, theta_end=theta_end, phi_start=phi_start, phi_end=phi_end)
