import argparse
import logging
import numpy as np

from tensorflow.keras.models import Model, load_model

from sklearn.model_selection import train_test_split

from util.util_log import setup_logging
from util.util_ris_pattern import point_2_phase, phase_2_pattern, phase_2_pattern_xyz, eps, \
    phase_2_bit, phaseBit_2_deg, phase_2_pattern_xyz_fft
from util.util_analysis_plane import get_peaks, get_peak_nth
from util.util_image import draw_img

from dissertation.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern, pattern_2_peaks
from dissertation.util_plot import save_images_2x2, plot_images_2x2, save_images_3x2, plot_images_3x2

from multi_beam.multi_beam_PS_complex import psm_complex_beam_n


import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'


# ============================================= 读取自编码器相关 =======================================
def load_models(path_encoder, path_decoder):
    encoder = load_model(path_encoder)
    decoder = load_model(path_decoder)
    return encoder, decoder


# ============================================= 主流程相关 =======================================
# 比较 array1 相邻 array2 不相同的位数的矩阵
def compare_arrays(array1, array2):
    # 首先确保两个数组具有相同的形状
    if array1.shape != array2.shape:
        raise ValueError("两个数组的形状必须相同")
    # 结果是一个布尔数组，不同为 True，相同为 False
    comparison = np.not_equal(array1, array2)
    # 将布尔数组转换为整数数组，True 变为 1，False 变为 0
    result_matrix = comparison.astype(int)
    return result_matrix

# =============================================== 预测网络 (TensorFlow) ================================================
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, ReLU
from sklearn.model_selection import train_test_split

# 设置随机种子以保证结果可复现
np.random.seed(42)
tf.random.set_seed(42)

def create_cnn_model():
    """
    创建CNN模型
    :return: 编译好的Keras模型
    """
    inputs = Input(shape=(8, 8, 2))
    # 第一层卷积
    x = Conv2D(32, kernel_size=(3, 3), padding='same')(inputs)
    x = ReLU()(x)
    # 第二层卷积
    x = Conv2D(64, kernel_size=(3, 3), padding='same')(x)
    x = ReLU()(x)
    # 第三层卷积
    x = Conv2D(128, kernel_size=(3, 3), padding='same')(x)
    x = ReLU()(x)
    # 第四层卷积
    outputs = Conv2D(1, kernel_size=(1, 1), padding='same')(x)

    model = Model(inputs, outputs)
    model.compile(optimizer='adam', loss='mse')
    return model


def train(X_train, Y_train, epochs=50, batch_size=8):
    """
    训练CNN模型
    :param X_train: 训练输入数据
    :param Y_train: 训练目标数据
    :param epochs: 训练轮数
    :param batch_size: 批处理大小
    :return: 训练好的模型
    """
    model = create_cnn_model()
    history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2)
    return model


def predict(model, X_test):
    """
    使用训练好的模型进行预测
    :param model: 训练好的模型
    :param X_test: 测试输入数据
    :return: 预测的目标数据列表
    """
    Y_pred = model.predict(X_test)
    return Y_pred.squeeze(axis=-1)  # 移除最后一个维度

# =============================================== 预测网络 (torch) ================================================
# import numpy as np
# import torch
# import torch.nn as nn
# import torch.optim as optim
# from torch.utils.data import DataLoader, Dataset
#
# # 设置随机种子以保证结果可复现
# np.random.seed(42)
# torch.manual_seed(42)
#
#
# # 数据集类
# class CustomDataset(Dataset):
#     def __init__(self, X_list, Y_list):
#         self.X = np.array(X_list)
#         self.Y = np.array(Y_list)
#
#     def __len__(self):
#         return len(self.X)
#
#     def __getitem__(self, idx):
#         x = self.X[idx]
#         y = self.Y[idx]
#         # return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
#         # 将 x 从 (2, 16, 16) 转换为 (2, 16, 16)
#         x_tensor = torch.tensor(x, dtype=torch.float32).permute(0, 2, 1)
#         y_tensor = torch.tensor(y, dtype=torch.float32)
#         return x_tensor, y_tensor
#
#
# # 定义CNN模型
# class CNNSupervised(nn.Module):
#     def __init__(self):
#         super(CNNSupervised, self).__init__()
#         self.conv1 = nn.Conv2d(in_channels=2, out_channels=32, kernel_size=3, padding=1)
#         self.relu = nn.ReLU()
#         self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
#         self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
#         self.conv4 = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1)
#
#     def forward(self, x):
#         x = self.conv1(x)
#         x = self.relu(x)
#         x = self.conv2(x)
#         x = self.relu(x)
#         x = self.conv3(x)
#         x = self.relu(x)
#         x = self.conv4(x)
#         return x
#
#
# def train(X_train_list, Y_train_list, epochs=50, batch_size=8, learning_rate=1e-3):
#     """
#     训练CNN模型
#     :param X_train_list: 输入数据列表，每个元素是形状为 (2, 16, 16) 的numpy数组
#     :param Y_train_list: 目标数据列表，每个元素是形状为 (16, 16) 的numpy数组
#     :param epochs: 训练轮数
#     :param batch_size: 批处理大小
#     :param learning_rate: 学习率
#     :return: 训练好的模型
#     """
#     # 创建数据集和数据加载器
#     dataset = CustomDataset(X_train_list, Y_train_list)
#     dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
#
#     # 初始化模型、损失函数和优化器
#     model = CNNSupervised()
#     criterion = nn.MSELoss()
#     optimizer = optim.Adam(model.parameters(), lr=learning_rate)
#
#     # 训练循环
#     for epoch in range(epochs):
#         model.train()
#         running_loss = 0.0
#         for inputs, targets in dataloader:
#             # 前向传播
#             outputs = model(inputs)
#             loss = criterion(outputs.squeeze(1), targets)
#
#             # 反向传播和优化
#             optimizer.zero_grad()
#             loss.backward()
#             optimizer.step()
#
#             running_loss += loss.item()
#
#         print(f"Epoch {epoch + 1}, Loss: {running_loss / len(dataloader):.4f}")
#
#     return model
#
#
# def predict(model, X_test_list):
#     """
#     使用训练好的模型进行预测
#     :param model: 训练好的模型
#     :param X_test_list: 测试输入数据列表，每个元素是形状为 (2, 16, 16) 的numpy数组
#     :return: 预测的目标数据列表，每个元素是形状为 (16, 16) 的numpy数组
#     """
#     model.eval()
#     Y_pred_list = []
#     with torch.no_grad():
#         for x in X_test_list:
#             test_input = torch.tensor(x, dtype=torch.float32).unsqueeze(0)
#             prediction = model(test_input).squeeze().numpy()
#             Y_pred_list.append(prediction)
#
#     return Y_pred_list
#
#
# # todo:示例使用, 不能正确执行，需要把(2,16,16)改[(16,16),(16,16)]不行删了
# def test_torch_cnn():
#     # 生成示例数据
#     num_samples = 100
#     X_train_list = [np.random.rand(2, 16, 16) for _ in range(num_samples)]
#     Y_train_list = [np.random.rand(16, 16) for _ in range(num_samples)]
#     X_test_list = [np.random.rand(2, 16, 16) for _ in range(10)]
#
#     # 训练网络
#     model = train(X_train_list, Y_train_list)
#
#     # 测试网络
#     Y_test_list = predict(model, X_test_list)
#     print("Predictions:")
#     for i, pred in enumerate(Y_test_list):
#         print(f"Prediction {i + 1}:\n", pred)


# ================================================= 测试代码 ======================================================
def get_traces_phaseBits(bit_num, theta_start, theta_step, theta_end, phi_start, phi_step, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_step={theta_step}, theta_end={theta_end}, "
                f"phi_start={phi_start}, phi_step={phi_step}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, phi_step):
        # 生成轨迹指向
        traces_idx = []
        for theta in range(theta_start, theta_end + 1, theta_step):
            traces_idx.append([theta, phi])
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def create_multibeam_dataset(bit_num, phaseRads1, phaseRads2, phaseBits1, phaseBits2):
    X = []
    Y = []
    for i in range(0, len(phaseRads1)):
        phase_mix, phaseBit_mix, phaseBitDeg_mix \
            = psm_complex_beam_n(phases=[phaseRads1[i], phaseRads2[i]], bit_num=bit_num)
        X.append([phaseBits1[i], phaseBits2[i]])
        Y.append(phaseBit_mix)
    return X, Y


def preprocess_data(encoder_beam1, encoder_beam2, phaseBits12, phaseBitsMix):
    """
    将 phaseBits 转换为适合卷积自编码器的格式
    :param phaseBits12: 输入数据列表，每个元素是形状为 (64, 64) 的numpy数组
    :param phaseBitsMix: 目标数据列表，每个元素是形状为 (64, 64) 的numpy数组
    :return: X_list 和 Y_list
    """
    X_list = []
    Y_list = []
    for i in range(0, len(phaseBits12)):
        phaseBit_X = phaseBits12[i]
        phaseBit_Y = phaseBitsMix[i]
        X = []
        for phaseBit in phaseBit_X:
            beam1_enc = encoder_beam1.predict(phaseBit.reshape(1, 32, 32, 1))
            X.append(beam1_enc.reshape(8, 8))
        beam2_enc = encoder_beam2.predict(phaseBit_Y.reshape(1, 32, 32, 1))
        Y = beam2_enc.reshape(8, 8)
        X_list.append(X)
        Y_list.append(Y)
        # draw_X_Y(decoder_beam1, decoder_beam2, X, Y)        # 画图检查生成数据
    return X_list, Y_list


def recover_phase(decoder_beam2, y):
    # 将mix转换为适合卷积自编码器的格式
    y_dec = decoder_beam2.predict(y.reshape(1, 8, 8, 1))
    # 将概率转换为0/1二进制值
    y_dec_bit = (y_dec > 0.5).astype(np.int32)
    # 将解码结果还原为原始形状
    y_dec_phaseBit = y_dec_bit.reshape(32, 32)
    # # # 画图检查生成数据
    # print("ffffffffffffffffffffffffffffffffffff")
    # draw_img(y_dec_phaseBit)
    return y_dec_phaseBit


def split_data(X_list, Y_list, test_size=0.2, random_state=42):
    """
    将数据分割为训练集和测试集
    :param X_list: 输入数据列表
    :param Y_list: 目标数据列表
    :param test_size: 测试集比例
    :param random_state: 随机种子
    :return: X_train, X_test, Y_train, Y_test
    """
    X_train_list, X_test_list, Y_train_list, Y_test_list = train_test_split(
        X_list, Y_list, test_size=test_size, random_state=random_state
    )

    # 将数据转换为 numpy 数组并调整形状
    X_train = np.array(X_train_list).transpose(0, 2, 3, 1)  # 调整为 (num_samples, 16, 16, 2)
    X_test = np.array(X_test_list).transpose(0, 2, 3, 1)     # 调整为 (num_samples, 16, 16, 2)
    Y_train = np.array(Y_train_list).reshape(-1, 8, 8, 1)
    Y_test = np.array(Y_test_list).reshape(-1, 8, 8, 1)

    return X_train, X_test, Y_train, Y_test


def draw_X_Y(decoder_beam1, decoder_beam2, X, Y):
    print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
    draw_img(X[0])
    draw_img(X[1])
    draw_img(Y)
    tmpX0 = decoder_beam1.predict(X[0].reshape(1, 16, 16, 1))
    tmpX0 = tmpX0.reshape(64, 64)
    draw_img(tmpX0)
    tmpX1 = decoder_beam1.predict(X[1].reshape(1, 16, 16, 1))
    tmpX1 = tmpX1.reshape(64, 64)
    draw_img(tmpX1)
    tmpY = decoder_beam2.predict(Y.reshape(1, 16, 16, 1))
    tmpY = tmpY.reshape(64, 64)
    draw_img(tmpY)


def phaseBit_2_pattern_point_psll(phaseBit, bit_num, beam_num):
    pattern, x, y = phaseBit_2_pattern(phaseBit, bit_num)
    peaks = pattern_2_peaks(pattern)
    points = peaks[: beam_num]
    psll = get_peak_nth(peaks, beam_num)
    return pattern, points, psll


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


def save_results(base_dir, bit_num, beam_num, decoder_beam2, Y_pred_list, Y_true_list):
    dir_compare = base_dir + "/compare/"
    total_correct = 0
    total_elements = 0
    accuracies = []
    for i, pred in enumerate(Y_pred_list):
        # 双波束解码器恢复码阵
        Y_true = recover_phase(decoder_beam2, Y_true_list[i])
        Y_pred = recover_phase(decoder_beam2, pred)
        # 计算方向图 & 波束指向 & PSLL
        pattern_true, point_true, psll_true = phaseBit_2_pattern_point_psll(Y_true, bit_num, beam_num)
        pattern_pred, point_pred, psll_pred = phaseBit_2_pattern_point_psll(Y_pred, bit_num, beam_num)
        diff = compare_arrays(Y_true, Y_pred)

        # 计算准确率
        correct = np.sum(Y_true == Y_pred)
        total_correct += correct
        total_elements += Y_true.size
        accuracy = correct / Y_true.size
        accuracies.append(accuracy)

        # 比较编解码器叠加和PS-complex合成
        # plot_images_3x2(Y_true, pred, Y_pred, pattern_true, diff, pattern_pred,
        #                 f"true:[{point_true[0][1]}, {point_true[1][1]}]-[{psll_true[0]:.2f},{psll_true[1]}], "
        #                 f"pred:[{point_pred[0][1]}, {point_pred[1][1]}]-[{psll_pred[0]:.2f},{psll_pred[1]}")
        save_images_3x2(dir_compare + str(i) + ".jpg", Y_true, "phaseBitTrue", pred, "pred", Y_pred, "phaseBitPred",
                        pattern_true, "patternTrue", diff, f"diff(accuracy={accuracy:.2f})", pattern_pred, "patternPred",
                        f"true:[{point_true[0][1]}, {point_true[1][1]}]-[{psll_true[0]:.2f},{psll_true[1]}], "
                        f"pred:[{point_pred[0][1]}, {point_pred[1][1]}]-[{psll_pred[0]:.2f},{psll_pred[1]}")
    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def main(path_pre, bit_num, beam_num,
         path_beam1_encoder, path_beam1_decoder, path_beam2_encoder, path_beam2_decoder,
         theta1_start, theta1_step, theta1_end, phi1_start, phi1_step, phi1_end,
         theta2_start, theta2_step, theta2_end, phi2_start, phi2_step, phi2_end):
    # 打印确认参数
    logger.info(f"path_pre={path_pre}, bit_num={bit_num}, beam_num={beam_num}")
    logger.info(f"path_beam1_encoder={path_beam1_encoder}")
    logger.info(f"path_beam1_decoder={path_beam1_decoder}")
    logger.info(f"path_beam2_encoder={path_beam2_encoder}")
    logger.info(f"path_beam2_decoder={path_beam2_decoder}")
    logger.info(f"theta1_start={theta1_start}, theta1_step={theta1_step}, theta_end={theta1_end}, "
                f"phi1_start={phi1_start}, phi1_step={phi1_step}, phi_end={phi1_end}")
    logger.info(f"theta2_start={theta2_start}, theta2_step={theta2_step}, theta_end={theta2_end}, "
                f"phi2_start={phi2_start}, phi2_step={phi2_step}, phi_end={phi2_end}")
    # 读取单波束编解码器
    encoder_beam1, decoder_beam1 = load_models(path_encoder=path_beam1_encoder, path_decoder=path_beam1_decoder)
    # 读取双波束编解码器
    encoder_beam2, decoder_beam2 = load_models(path_encoder=path_beam2_encoder, path_decoder=path_beam2_decoder)

    # 生成训练集数据
    traces1, phaseRads1, phaseBits1 = get_traces_phaseBits(bit_num, theta1_start, theta1_step, theta1_end,
                                                           phi1_start, phi1_step, phi1_end)
    traces2, phaseRads2, phaseBits2 = get_traces_phaseBits(bit_num, theta2_start, theta2_step, theta2_end,
                                                           phi2_start, phi2_step, phi2_end)
    # PS生成码阵数据集
    phaseBits12, phaseBitsMix = create_multibeam_dataset(bit_num, phaseRads1, phaseRads2, phaseBits1, phaseBits2)

    # 预处理数据
    X_list, Y_list = preprocess_data(encoder_beam1, encoder_beam2, phaseBits12, phaseBitsMix)

    # 分割数据
    X_train, X_test, Y_train, Y_test = split_data(X_list, Y_list)

    # 训练网络
    model = train(X_train, Y_train)

    # 测试网络
    # Y_pred_list = predict(model, X_test)

    # 在训练集和测试集上都进行预测
    datasets = {
        'train': {'X': X_train, 'Y_true': Y_train},
        'test': {'X': X_test, 'Y_true': Y_test}
    }
    all_accuracies = {}
    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        X = dataset['X']
        Y_true_list = dataset['Y_true']
        # 获取模型预测结果
        Y_pred_list = predict(model, X)
        # 保存结果并获取统计信息
        stats = save_results(path_pre + f'/dataset_{name}', bit_num, beam_num, decoder_beam2, Y_pred_list, Y_true_list)
        all_accuracies[name] = stats

    # 保存统计信息
    save_statistics({
        'train_max': all_accuracies['train']['max'],
        'train_min': all_accuracies['train']['min'],
        'train_mean': all_accuracies['train']['mean'],
        'train_std': all_accuracies['train']['std'],
        'test_max': all_accuracies['test']['max'],
        'test_min': all_accuracies['test']['min'],
        'test_mean': all_accuracies['test']['mean'],
        'test_std': all_accuracies['test']['std']
    }, path_pre + "/result.txt")

    # 保存模型
    model.save(path_pre + '/multi_beam_predictor.h5')



if __name__ == "__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_4/test",
                        help="Base directory path (default: ../files/dissertation/chapter_4/test")

    parser.add_argument("--beam1_ae_enc_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae2d-bit-cnn2d-cbam-(5,30)/encoder.h5",
                        help="beam1 autocoder encoder path. "
                             "default: ../files/dissertation/chapter_3/tf-ae2d-bit-cnn2d-cbam-(5,30)/encoder.h5")

    parser.add_argument("--beam1_ae_dec_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae2d-bit-cnn2d-cbam-(5,30)/decoder.h5",
                        help="beam1 autocoder decoder path. "
                             "default: ../files/dissertation/chapter_3/tf-ae2d-bit-cnn2d-cbam-(5,30)/decoder.h5")

    parser.add_argument("--beam2_ae_enc_path", type=str,
                        default="../files/dissertation/chapter_4/beam2-ae2d-cnnres-t(5,5,30)-p1(0,5,45)-p2(180,5,225)/encoder.h5",
                        help="beam2 autocoder encoder path. "
                             "default: ../files/dissertation/chapter_4/beam2-ae2d-cnnres-t(5,5,30)-p1(0,5,45)-p2(180,5,225)/encoder.h5")

    parser.add_argument("--beam2_ae_dec_path", type=str,
                        default="../files/dissertation/chapter_4/beam2-ae2d-cnnres-t(5,5,30)-p1(0,5,45)-p2(180,5,225)/decoder.h5",
                        help="beam2 autocoder decoder path. "
                             "default: ../files/dissertation/chapter_4/beam2-ae2d-cnnres-t(5,5,30)-p1(0,5,45)-p2(180,5,225)/decoder.h5")

    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--beam_num", type=int, default=2, help="Number of beams (default: 2)")

    parser.add_argument("--theta1_start", type=int, default=5, help="theta1_start (default: 5)")
    parser.add_argument("--theta1_step", type=int, default=1, help="theta1_step (default: 1)")
    parser.add_argument("--theta1_end", type=int, default=30, help="theta1_end (default: 30)")
    parser.add_argument("--phi1_start", type=int, default=0, help="phi1_start (default: 0)")
    parser.add_argument("--phi1_step", type=int, default=1, help="phi1_step (default: 1)")
    parser.add_argument("--phi1_end", type=int, default=45, help="phi1_end (default: 45)")

    parser.add_argument("--theta2_start", type=int, default=5, help="theta2_start (default: 5)")
    parser.add_argument("--theta2_step", type=int, default=1, help="theta2_step (default: 1)")
    parser.add_argument("--theta2_end", type=int, default=30, help="theta2_end (default: 30)")
    parser.add_argument("--phi2_start", type=int, default=180, help="phi2_start (default: 180)")
    parser.add_argument("--phi2_step", type=int, default=1, help="phi2_step (default: 1)")
    parser.add_argument("--phi2_end", type=int, default=225, help="phi2_end (default: 225)")

    args = parser.parse_args()

    base_path = args.base_path

    beam1_ae_enc_path, beam1_ae_dec_path = args.beam1_ae_enc_path, args.beam1_ae_dec_path
    beam2_ae_enc_path, beam2_ae_dec_path = args.beam2_ae_enc_path, args.beam2_ae_dec_path

    bit_num = args.bit_num
    beam_num = args.beam_num

    theta1_start, theta1_step, theta1_end = args.theta1_start, args.theta1_step, args.theta1_end
    phi1_start, phi1_step, phi1_end = args.phi1_start, args.phi1_step, args.phi1_end
    theta2_start, theta2_step, theta2_end = args.theta2_start, args.theta2_step, args.theta2_end
    phi2_start, phi2_step, phi2_end = args.phi2_start, args.phi2_step, args.phi2_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"beam1_ae_enc_path: {beam1_ae_enc_path}, beam1_ae_dec_path: {beam1_ae_dec_path}")
    logger.info(f"beam2_ae_enc_path: {beam2_ae_enc_path}, beam2_ae_dec_path: {beam2_ae_dec_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"Using beam_num: {beam_num}")
    logger.info(f"theta1_start={theta1_start}, theta1_step={theta1_step}, theta_end={theta1_end}, "
                f"phi1_start={phi1_start}, phi1_step={phi1_step}, phi_end={phi1_end}")
    logger.info(f"theta2_start={theta2_start}, theta2_step={theta2_step}, theta_end={theta2_end}, "
                f"phi2_start={phi2_start}, phi2_step={phi2_step}, phi_end={phi2_end}")

    # 双波束合成: cnn预测编码后码阵
    main(base_path, bit_num, beam_num,
         beam1_ae_enc_path, beam1_ae_dec_path, beam2_ae_enc_path, beam2_ae_dec_path,
         theta1_start, theta1_step, theta1_end, phi1_start, phi1_step, phi1_end,
         theta2_start, theta2_step, theta2_end, phi2_start, phi2_step, phi2_end)