import argparse
import numpy as np
import os
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Dense, Reshape, Dropout, BatchNormalization
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_3x2, plot_images_3x2


# ============================================= 注意力相关 =======================================
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, GlobalAveragePooling2D, Dense, Multiply, Reshape, Concatenate, Lambda
from tensorflow.keras.models import Model
import tensorflow as tf

def se_block(input_tensor, ratio=16):
    """通道注意力（SE）模块"""
    channels = input_tensor.shape[-1]
    # Squeeze
    x = GlobalAveragePooling2D()(input_tensor)
    x = Reshape((1, 1, channels))(x)
    # Excitation
    x = Dense(channels // ratio, activation='relu', use_bias=False)(x)
    x = Dense(channels, activation='sigmoid', use_bias=False)(x)
    # Scale
    return Multiply()([input_tensor, x])

def cbam_block(input_tensor, ratio=16):
    """空间注意力（CBAM）模块"""
    # 通道注意力
    x = se_block(input_tensor, ratio)
    # 空间注意力
    channel_axis = 1 if tf.keras.backend.image_data_format() == "channels_first" else -1
    avg_pool = Lambda(lambda x: tf.reduce_mean(x, axis=channel_axis, keepdims=True))(x)
    max_pool = Lambda(lambda x: tf.reduce_max(x, axis=channel_axis, keepdims=True))(x)
    concat = Concatenate(axis=channel_axis)([avg_pool, max_pool])
    spatial_attention = Conv2D(1, (7, 7), padding='same', activation='sigmoid')(concat)
    return Multiply()([x, spatial_attention])


# ============================================= 残差相关 =========================================
from tensorflow.keras.layers import (Input, Conv2D, MaxPooling2D, UpSampling2D,
                                   BatchNormalization, Activation, Add, Reshape,
                                   Lambda, Dense, Flatten, Dropout)

def residual_block(x, filters):
    """残差连接模块"""
    shortcut = x
    x = Conv2D(filters, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    # 维度匹配
    if shortcut.shape[-1] != filters:
        shortcut = Conv2D(filters, (1, 1), padding='same')(shortcut)
    x = Add()([shortcut, x])
    x = Activation('relu')(x)
    return x


def build_conv2d_res_autoencoder_16x16(input_shape=(64, 64, 1)):
    """构建 卷积-残差 自编码器模型"""
    logger.info("build_conv2d_cbam_autoencoder_16x16.")

    inputs = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), padding='same', activation='relu')(inputs)
    x = residual_block(x, 32)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu')(x)
    x = residual_block(x, 64)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu')(x)
    x = residual_block(x, 128)

    encoded = Conv2D(1, (3, 3), activation='relu', padding='same')(x)
    # encoded = MaxPooling2D((2, 2))(x)

    # 解码器
    x = Conv2D(128, (3, 3), padding='same', activation='relu')(encoded)
    x = residual_block(x, 128)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu')(x)
    x = residual_block(x, 64)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(32, (3, 3), padding='same', activation='relu')(x)
    x = residual_block(x, 32)
    # x = UpSampling2D((2, 2))(x)

    outputs = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(inputs, outputs)
    # 编码模型
    encoder = Model(inputs, encoded)
    # 解码模型
    decoder = Model(encoded, outputs)

    return autoencoder, encoder, decoder



# ============================================= 适应度函数相关 =======================================
from tensorflow.image import ssim
import tensorflow as tf

def ssim_loss(y_true, y_pred):
    """
    结构相似性损失（SSIM Loss）
    特点：衡量输入和输出之间的结构相似性，而不仅仅是像素级差异。
    优点：能够更好地保留图像的结构信息，重建结果更符合人类视觉感知。
    缺点：计算复杂度较高。
    适用场景：如果你希望重建结果在结构上与原始数据更相似。
    """
    return 1 - tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))


def mixed_loss(y_true, y_pred):
    """
    混合损失（MSE + SSIM）
    特点：结合MSE和SSIM的优点，既能保留细节，又能保持结构相似性。
    适用场景：如果你希望重建结果在细节和结构上都与原始数据接近。
    """
    # logger.info(f"y_true.shape:{y_true.shape}, y_pred.shape:{y_pred.shape}")
    mse = tf.reduce_mean(tf.square(y_true - y_pred))
    ssim_val = tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))
    return mse + (1 - ssim_val)


from beam_trace.util_fitness_pattern_32x32 import pattern_loss, pattern_loss_ssim, pattern_loss_correlation

def loss_phase_mse_ssim_pattern_ssim(y_true, y_pred):
    """
    三重损失函数：phase-MSE + phase-SSIM + pattern-SSIM
    """
    # 原始MSE和SSIM计算（保持原有结构）
    mse = tf.reduce_mean(tf.square(y_true - y_pred))
    ssim_val = tf.reduce_mean(ssim(y_true, y_pred, max_val=1.0))

    pattern_loss = pattern_loss_ssim(y_true, y_pred)

    logger.info(f"MSE:{mse}, ssim_val:{ssim_val}, pattern_loss:{pattern_loss}")

    # 加权组合（权重可根据任务调整）
    alpha = 0.2  # MSE权重
    beta = 0.6  # SSIM权重
    gamma = 0.2  # Pattern权重
    total_loss = alpha * mse + beta * (1 - ssim_val) + gamma * pattern_loss

    return total_loss


import math

def loss_phase_ssim_pattern_ssim(y_true, y_pred):
    """
    todo: [测试这部分][可能需要改指标NCC] 自适应alpha, alpha * pattern-SSIM + (1-alpha) * pattern-MSE
    """
    # 计算方向图的MSE和SSIM
    pattern_mse = pattern_loss(y_true, y_pred)
    pattern_ssim = pattern_loss_ssim(y_true, y_pred)
    # 倒S型参数 y=cos(90*ssim), 修改为张量计算
    s_para = tf.cos(tf.constant(math.pi / 2) * pattern_ssim)
    # 自适应权重加权组合
    total_loss = s_para * pattern_ssim + (1 - s_para) * pattern_mse

    return total_loss


# ============================================= 自编码器相关 =======================================
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

def build_conv2d_se_autoencoder_16x16(input_shape=(64, 64, 1)):
    """构建卷积自编码器模型，加入注意力机制"""
    logger.info("build_conv2d_se_autoencoder.")

    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = se_block(x)  # 添加 SE 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 64x64 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 32x32 -> 16x16
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    encoded = Conv2D(1, (3, 3), activation='relu', padding='same')(x)

    # 解码器
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(encoded)
    x = se_block(x)  # 添加 SE 注意力模块
    x = UpSampling2D((2, 2))(x)  # 16x16 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    x = UpSampling2D((2, 2))(x)  # 32x32 -> 64x64
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


def build_conv2d_cbam_autoencoder_16x16(input_shape=(64, 64, 1)):
    """构建卷积自编码器模型，并在编码器和解码器中添加空间注意力 CBAM 模块"""
    logger.info("build_conv2d_cbam_autoencoder_16x16.")

    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 64x64 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 32x32 -> 16x16
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    encoded = Conv2D(1, (3, 3), activation='relu', padding='same')(x)

    # 解码器
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(encoded)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = UpSampling2D((2, 2))(x)  # 16x16 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = UpSampling2D((2, 2))(x)  # 32x32 -> 64x64
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


def build_conv2d_se_cbam_autoencoder_16x16(input_shape=(64, 64, 1)):
    """构建卷积自编码器模型，加入注意力机制"""
    logger.info("build_conv2d_se_cbam_autoencoder.")

    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = se_block(x)  # 添加 SE 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 64x64 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = se_block(x)  # 添加 SE 注意力模块
    x = MaxPooling2D((2, 2), padding='same')(x)  # 32x32 -> 16x16
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = se_block(x)  # 添加 SE 注意力模块
    encoded = Conv2D(1, (3, 3), activation='relu', padding='same')(x)

    # 解码器
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(encoded)
    x = se_block(x)  # 添加 SE 注意力模块
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = UpSampling2D((2, 2))(x)  # 16x16 -> 32x32
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    x = UpSampling2D((2, 2))(x)  # 32x32 -> 64x64
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = se_block(x)  # 添加 SE 注意力模块
    x = cbam_block(x)  # 添加 CBAM 注意力模块
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder

# ============================================= 主流程相关 =======================================
# 比较 array1 相邻 array2 不相同的位数的矩阵
def compare_arrays(array1, array2):
    # 首先确保两个数组具有相同的形状
    if array1.shape != array2.shape:
        raise ValueError("两个数组的形状必须相同")
    # 结果是一个布尔数组，不同为 True，相同为 False
    comparison = np.not_equal(array1, array2)
    # 将布尔数组转换为整数数组，True 变为 1，False 变为 0
    result_matrix = comparison.astype(int)
    return result_matrix


def get_traces_phaseBits(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向, (theta, phi): (30,  phi) --> (35,  phi)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def evaluate_performance(originals, predictions, original_shape):
    """评估模型性能（修正形状处理）"""
    accuracies = []
    for orig, pred in zip(originals, predictions):
        # 确保比较的数组形状一致
        orig_flat = orig.flatten()
        pred_flat = pred.flatten() if pred.ndim > 1 else pred

        # 检查长度是否匹配
        if len(orig_flat) != len(pred_flat):
            pred_flat = pred.reshape(original_shape).flatten()

        acc = np.mean(orig_flat == pred_flat)
        accuracies.append(acc)

    return {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }


def save_results(base_dir, original_arrays, encoded_phaseBits, decoded_arrays, bit_num, name):
    dir_compare = base_dir + "/compare/"

    total_correct = 0
    total_elements = 0
    accuracies = []

    for idx, (orig, enc, dec) in enumerate(zip(original_arrays, encoded_phaseBits, decoded_arrays)):
        # 计算准确率
        correct = np.sum(orig == dec)
        total_correct += correct
        total_elements += orig.size

        accuracy = correct / orig.size
        accuracies.append(accuracy)

        if idx % 10 == 0:
            # 计算方向图, 指向, PSLL
            pattern_orig, point_orig, psll_orig = phaseBit_2_pattern_point(orig, bit_num)
            pattern_dec, point_dec, psll_dec = phaseBit_2_pattern_point(dec, bit_num)

            # 计算orig和dec区别
            diff = compare_arrays(orig, dec)

            # 保存比较图片
            # plot_images_3x2(orig, enc, dec,
            #                 pattern_orig, diff, pattern_dec,
            #                 f"Accuracy = {accuracy:.2%}, count(diff) = {np.sum(orig != dec)}, "
            #                 f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}], "
            #                 f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]")
            save_images_3x2(path_img=dir_compare + str(idx) + ".jpg",
                            data1=orig, text1=f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}]",
                            data2=enc, text2="encoded",
                            data3=dec, text3=f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]",
                            data4=pattern_orig, text4="pattern(origin)",
                            data5=diff, text5=f"count(diff): {np.sum(orig != dec)}",
                            data6=pattern_dec, text6="pattern(decoder)",
                            img_text=f"Accuracy = {accuracy:.2%}")

            if name == "test":
                # 将测试集解码结果转换为整数类型并保存
                np.savetxt(os.path.join(dir_compare, f'decoded_{idx}.csv'), dec, delimiter=',')

        # 保存.csv
        # np.savetxt(os.path.join(base_dir, f'original_{idx}.csv'), orig, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'encoded_{idx}.csv'), enc, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'decoded_{idx}.csv'), dec, delimiter=',')

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


def save_models(encoder, decoder, model_dir):
    os.makedirs(model_dir, exist_ok=True)
    encoder.save(os.path.join(model_dir, 'encoder.h5'))
    decoder.save(os.path.join(model_dir, 'decoder.h5'))


def load_models(model_dir='/files'):
    encoder = load_model(os.path.join(model_dir, 'encoder.h5'))
    decoder = load_model(os.path.join(model_dir, 'decoder.h5'))
    return encoder, decoder


def main(path_dir, bit_num, theta_start, theta_end, phi_start, phi_end):
    # 生成训练集数据
    traces, phaseRads, phaseBits = get_traces_phaseBits(bit_num, theta_start, theta_end, phi_start, phi_end)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(32, 32, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值

    # 将数据集分为训练集和测试集 (80%训练，20%测试)
    X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
    logger.info(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")

    # 构建卷积自编码器
    autoencoder, encoder, decoder = build_conv2d_res_autoencoder_16x16(input_shape=(32, 32, 1))
    # 打印模型结构
    autoencoder.summary()
    encoder.summary()
    decoder.summary()

    learning_rate = 0.001
    epochs = 100
    batch_size = 32

    logger.info(f"learning_rate={learning_rate}, epochs={epochs}, batch_size={batch_size}")

    # 编译自编码器 - 使用二元交叉熵损失
    # autoencoder.compile(optimizer='adam', loss=mixed_loss)  # 如果你希望重建结果在细节和结构上都与原始数据接近
    autoencoder.compile(optimizer='adam', loss=loss_phase_mse_ssim_pattern_ssim)     # 适应度增加主瓣位置和副瓣抑制

    # 添加学习率调度器和早停
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)
    early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

    # 训练自编码器 - 只需要在训练集上训练
    autoencoder.fit(X_train, X_train,
                    epochs=epochs,  # 增加训练轮数
                    batch_size=batch_size,
                    shuffle=True,
                    validation_data=(X_test, X_test),  # 添加验证集
                    callbacks=[reduce_lr, early_stopping])  # 添加回调

    # 在训练集和测试集上都进行预测
    datasets = {'train': X_train, 'test': X_test}
    all_accuracies = {}

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        # 编码和解码
        encoded_phaseBits = encoder.predict(dataset)
        decoded_probs = decoder.predict(encoded_phaseBits)  # 直接使用自编码器进行解码

        # 将概率转换为0/1二进制值
        decoded_phaseBits = (decoded_probs > 0.5).astype(np.int32)

        # 将解码结果还原为原始形状
        decoded_arrays = [arr.reshape(32, 32) for arr in decoded_phaseBits]
        original_arrays = [arr.reshape(32, 32) for arr in dataset]

        # 将编码结果还原
        # encoded_arrays = [arr.reshape(16, 16) for arr in encoded_phaseBits]      # 还原为16x16
        encoded_arrays = [arr.reshape(8, 8) for arr in encoded_phaseBits]     # 还原为8x8

        # 保存结果并获取统计信息
        stats = save_results(path_dir + f'/dataset_{name}', original_arrays, encoded_arrays, decoded_arrays,
                             bit_num, name)
        all_accuracies[name] = stats

    # 保存统计信息
    save_statistics({
        'train_max': all_accuracies['train']['max'],
        'train_min': all_accuracies['train']['min'],
        'train_mean': all_accuracies['train']['mean'],
        'train_std': all_accuracies['train']['std'],
        'test_max': all_accuracies['test']['max'],
        'test_min': all_accuracies['test']['min'],
        'test_mean': all_accuracies['test']['mean'],
        'test_std': all_accuracies['test']['std']
    }, path_dir + "/result.txt")

    # 保存模型
    save_models(encoder, decoder, path_dir)


if __name__=="__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90)",
                        help="Base directory path (default: ../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90))")
    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--theta_start", type=int, default=1, help="theta_start (default: 1)")
    parser.add_argument("--theta_end", type=int, default=60, help="theta_end (default: 60)")
    parser.add_argument("--phi_start", type=int, default=0, help="phi_start (default: 0)")
    parser.add_argument("--phi_end", type=int, default=360, help="phi_end (default: 360)")

    args = parser.parse_args()

    base_path = args.base_path
    bit_num = args.bit_num
    theta_start, theta_end, phi_start, phi_end = args.theta_start, args.theta_end, args.phi_start, args.phi_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")

    main(path_dir=base_path, bit_num=bit_num,
         theta_start=theta_start, theta_end=theta_end, phi_start=phi_start, phi_end=phi_end)