from tensorflow.keras.layers import Input, Dense,LSTM, Masking, Conv2D, MaxPooling2D, Flatten
from tempo import *
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.layers import Input, Dense,Activation,BatchNormalization,InputLayer,LayerNormalization,LeakyReLU
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import Callback
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, accuracy_score
import logging
import preprocess_2_data_distribution_compute
import json
import os
import ast

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__ = False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__ = True

# 定义一个函数来处理字符串中的列表
def convert_list_str(lst_str):
    # 使用 ast.literal_eval 将字符串转换为列表
    return ast.literal_eval(lst_str)

# 对于每种数据，如果长度小于 L，则进行填充
def pad_data(data, length):
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        padded_data = np.zeros((data.shape[0], length), dtype=data.dtype)
        for i, row in enumerate(data):
            padded_data[i, :min(length, row.size)] = row[:min(length, row.size)]
    else:
        raise "用户已取消当前操作"
    return padded_data
# 读取数据
def load_data_payload(path, n_text_pain=0, n_text_encr=0, n_acars_pain=0, n_acars_encr=0, n_ais_pain=0, n_ais_encr=0,
                      n_ais1_pain=0, n_ais1_encr=0,
                      n_ais4_pain=0, n_ais4_encr=0, n_http=0, n_ssh=0, n_dns=0, n_ftp=0, n_smtp=0, L=10):

    # 载荷数据
    '''Text'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        pain_text = np.load(os.path.join(path, 'dataset', "painText_dec_all.npy"))  # 96194条，1024字节长
        Blowfishcipher_text = np.load(os.path.join(path, 'dataset', "BlowfishcipherText_dec_all.npy"))  # 46580条，1024字节长
        AEScipher_text = np.load(os.path.join(path, 'dataset', "AEScipherText_dec_all.npy"))  # 46580条，1024字节长
        encr_text = np.vstack((Blowfishcipher_text, AEScipher_text))  # 93160条，1024字节长
    else:
        raise "用户已取消当前操作"

    '''ACARS'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        pain_acars = np.load(os.path.join(path, 'dataset', "acars_pain_dec_9930.npy"))  # 9930条，48字节长
        encr_acars = np.load(os.path.join(path, 'dataset', "acars_aes_dec_9930.npy"))  # 9930条，48字节长
    else:
        raise "用户已取消当前操作"

    '''AIS'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        # ais各种消息混杂
        pain_ais_all = np.load(os.path.join(path, 'dataset', "AIS_pain_dec_51678.npy"))  # 51678条，16字节长
        encr_ais_all = np.load(os.path.join(path, 'dataset', "AIS_aes_dec_51678.npy"))  # 51678条，16字节长
        # ais消息1
        pain_ais1 = np.load(os.path.join(path, 'dataset', "AIS_pain_dec_xiaoxi1.npy"))  # 150460条，16字节长
        encr_ais1 = np.load(os.path.join(path, 'dataset', "AIS_aes_dec_xiaoxi1.npy"))  # 150460条，16字节长
        # ais消息4
        pain_ais4 = np.load(os.path.join(path, 'dataset', "AIS_pain_dec_xiaoxi4.npy"))  # 6637条，16字节长
        encr_ais4 = np.load(os.path.join(path, 'dataset', "AIS_aes_dec_xiaoxi4.npy"))  # 6637条，16字节长
    else:
        raise "用户已取消当前操作"

    '''HTTP'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        http = np.load(os.path.join(path, 'dataset', "http_whole_fram_dec_payload(40B).npy"))  # 153831条，40字节长
    else:
        raise "用户已取消当前操作"

    '''SSH'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        ssh = np.load(os.path.join(path, 'dataset', "ssh_dec_payload(40B).npy"))  # 11608条，40字节长
    else:
        raise "用户已取消当前操作"

    '''DNS'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        dns = np.load(os.path.join(path, 'dataset', "dns_maccdc_dec.npy"))  # 16137条，16字节长
    else:
        raise "用户已取消当前操作"
    '''FTP'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        ftp = np.load(os.path.join(path, 'dataset', "ftp_maccdc_dec.npy"))  # 1569条，16字节长
    else:
        raise "用户已取消当前操作"

    '''SMTP'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        smtp = np.load(os.path.join(path, 'dataset', "smtp_maccdc_dec.npy"))  # 181条，16字节长
    else:
        raise "用户已取消当前操作"

    # 对每个数据集进行填充
    pain_text = pad_data(pain_text, L)
    encr_text = pad_data(encr_text, L)
    pain_acars = pad_data(pain_acars, L)
    encr_acars = pad_data(encr_acars, L)
    pain_ais_all = pad_data(pain_ais_all, L)
    encr_ais_all = pad_data(encr_ais_all, L)
    pain_ais1 = pad_data(pain_ais1, L)
    encr_ais1 = pad_data(encr_ais1, L)
    pain_ais4 = pad_data(pain_ais4, L)
    encr_ais4 = pad_data(encr_ais4, L)
    http = pad_data(http, L)
    ssh = pad_data(ssh, L)
    dns = pad_data(dns, L)
    ftp = pad_data(ftp, L)
    smtp = pad_data(smtp, L)

    if __Cannel_TASK_WAIT_HANDLE__ == False:
        pain_data = np.vstack((pain_text[0:n_text_pain, 0:L], pain_acars[0:n_acars_pain, 0:L],
                               pain_ais_all[0:n_ais_pain, 0:L], pain_ais1[0:n_ais1_pain, 0:L],
                               pain_ais4[0:n_ais4_pain, 0:L], http[0:n_http, 0:L], dns[0:n_dns, 0:L], ftp[0:n_ftp, 0:L],
                               smtp[0:n_smtp, 0:L]))
        encr_data = np.vstack((encr_text[0:n_text_encr, 0:L], encr_acars[0:n_acars_encr, 0:L],
                               encr_ais_all[0:n_ais_encr, 0:L], encr_ais1[0:n_ais1_encr, 0:L],
                               encr_ais4[0:n_ais4_encr, 0:L], ssh[0:n_ssh, 0:L]))
    else:
        raise "用户已取消当前操作"

    return pain_data, encr_data


def lstm_process_source_data(data_pain, encr_data, ratio=0.9, L=10):
    '''
    L:数据截断或填充后的长度，程序默认为10
    ratio：训练数据占比，程序默认是0.9
    '''
    # 分割 pain 数据
    X_train_pain, X_test_pain, y_train_pain, y_test_pain = train_test_split(
        data_pain, np.zeros(len(data_pain)), test_size=1 - ratio, random_state=42)

    # 分割 encrypted 数据
    X_train_encr, X_test_encr, y_train_encr, y_test_encr = train_test_split(
        encr_data, np.ones(len(encr_data)), test_size=1 - ratio, random_state=16)

    # 合并训练数据
    X_train = np.vstack((X_train_pain, X_train_encr))
    Y_train = np.vstack((y_train_pain[:, np.newaxis], y_train_encr[:, np.newaxis]))

    # 合并测试数据
    X_test = np.vstack((X_test_pain, X_test_encr))
    Y_test = np.vstack((y_test_pain[:, np.newaxis], y_test_encr[:, np.newaxis]))

    # 归一化
    X_train = X_train / 255
    X_test = X_test / 255

    X_train = np.reshape(X_train, [-1, L, 1])
    X_test = np.reshape(X_test, [-1, L, 1])
    Y_train = np.reshape(Y_train, [-1, 1])
    Y_test = np.reshape(Y_test, [-1, 1])

    Y_train = OneHotEncoder(categories='auto').fit_transform(Y_train).todense()  # one-hot编码
    Y_test1 = OneHotEncoder(categories='auto').fit_transform(Y_test).todense()  # one-hot编码
    return X_train, Y_train, X_test, Y_test, Y_test1

class CustomCallback(Callback):

    def on_epoch_end(self, epoch, logs=None):

        # # 检查logs是否为None
        # if logs is None:
        #     logs = {}  # 使用空字典作为默认值
        # 在每个epoch结束时输出训练和验证的损失及准确率
        if (__Cannel_TASK_WAIT_HANDLE__ == False):
            logging.info(f'{epoch}   {logs["loss"]:.8f}   {logs["accuracy"]:.8f}')
            print(f'{epoch}   {logs["loss"]:.8f}   {logs["val_accuracy"]:.8f} ')
        else:
            raise "用户已取消当前操作"


def lstm(model, epochs, batch_size, X_train, Y_train, X_test, Y_test, Y_test1, L):
    data_L = L  # 从界面读入
    with tf.device('/gpu:0'):
        # 在训练集上训练模型
        print('Epoch    Loss        Accuracy')
        logging.info('Epoch     Loss        Accuracy')
        # 创建一个自定义回调实例
        custom_callback = CustomCallback()

        # 训练模型
        history = model.fit(X_train, Y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(X_test, Y_test1),
                            callbacks=[custom_callback],  # 添加自定义回调
                            verbose=0)  # 设置verbose为0以避免默认输出

    # 测试集上评估模型
    test_loss, test_acc = model.evaluate(X_test, Y_test1, verbose=0)
    #
    # # 记录训练过程中的精度变化
    # print('Epoch     Accuracy')
    # logging.info('Epoch     Accuracy')
    # for epoch, accuracy in enumerate(history.history['accuracy']):
    #     print(f'{epoch}     {accuracy:.4f}')
    #     logging.info(f'{epoch}     {accuracy:.4f}')

    # 记录测试集上的最终精度
    print(f'Test Accuracy: {test_acc:.3f}')
    logging.info(f'Test Accuracy: {test_acc:.3f}')

    # 进行预测
    predictions = np.argmax(model.predict(X_test), axis=-1)
    true_labels = np.argmax(Y_test1, axis=1)

    # MODIFY: 确保 predictions 和 true_labels 为 NumPy 数组
    predictions = np.asarray(predictions)
    true_labels = np.asarray(true_labels)
    # 计算混淆矩阵
    tn, fp, fn, tp = confusion_matrix(true_labels, predictions).ravel()

    # 计算精确率、召回率和 F1 分数
    epsilon = 1e-9  # 一个小到可以忽略不计的正数，用于避免除以零的情况

    precision_sklearn = precision_score(true_labels, predictions)
    recall_sklearn = recall_score(true_labels, predictions)
    f1_sklearn = f1_score(true_labels, predictions)

    # 打印结果
    print('训练样本数：%s' % len(X_train))
    print('测试样本数：%s' % len(true_labels))
    print('TP:%s' % tp)
    print('FP:%s' % fp)
    print('FN:%s' % fn)
    print('TN:%s' % tn)
    print('Recall:%s' % recall_sklearn)
    print('Precision:%s' % precision_sklearn)
    print("测试集F1值：%s" % f1_sklearn)
    print('训练结束！')

    logging.info('训练样本数：%s' % len(X_train))
    logging.info('测试样本数：%s' % len(true_labels))
    logging.info('TP:%s' % tp)
    logging.info('FP:%s' % fp)
    logging.info('FN:%s' % fn)
    logging.info('TN:%s' % tn)
    logging.info('Recall:%s' % recall_sklearn)
    logging.info('Precision:%s' % precision_sklearn)
    logging.info("测试集F1值：%s" % f1_sklearn)
    logging.info('训练结束！')

    return test_acc, recall_sklearn, precision_sklearn, f1_sklearn

def lstm_2(model, epochs, batch_size, X_train, Y_train, X_test, Y_test, Y_test1, L):
    data_L = L  # 从界面读入
    with tf.device('/gpu:0'):
        # 在训练集上训练模型
        print('Epoch    Loss        Accuracy')
        logging.info('Epoch     Loss        Accuracy')
        # 创建一个自定义回调实例
        custom_callback = CustomCallback()

        # 训练模型
        history = model.fit(X_train, Y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(X_test, Y_test1),
                            callbacks=[custom_callback],  # 添加自定义回调
                            verbose=0)  # 设置verbose为0以避免默认输出

    # 测试集上评估模型
    test_loss, test_acc = model.evaluate(X_test, Y_test1, verbose=0)

    # 记录测试集上的最终精度
    print(f'Test Accuracy: {test_acc:.3f}')
    logging.info(f'Test Accuracy: {test_acc:.3f}')

    # 进行预测
    predictions = np.argmax(model.predict(X_test), axis=-1)
    true_labels = np.argmax(Y_test1, axis=1)

    # MODIFY: 确保 predictions 和 true_labels 为 NumPy 数组
    predictions = np.asarray(predictions)
    true_labels = np.asarray(true_labels)
    # 计算混淆矩阵
    tn, fp, fn, tp = confusion_matrix(true_labels, predictions).ravel()

    # 计算精确率、召回率和 F1 分数
    epsilon = 1e-9  # 一个小到可以忽略不计的正数，用于避免除以零的情况

    precision_sklearn = precision_score(true_labels, predictions)
    recall_sklearn = recall_score(true_labels, predictions)
    f1_sklearn = f1_score(true_labels, predictions)

    # 打印结果
    print('训练样本数：%s' % len(X_train))
    print('测试样本数：%s' % len(true_labels))
    print('TP:%s' % tp)
    print('FP:%s' % fp)
    print('FN:%s' % fn)
    print('TN:%s' % tn)
    print('Recall:%s' % recall_sklearn)
    print('Precision:%s' % precision_sklearn)
    print("测试集F1值：%s" % f1_sklearn)
    print('训练结束！')

    logging.info('训练样本数：%s' % len(X_train))
    logging.info('测试样本数：%s' % len(true_labels))
    logging.info('TP:%s' % tp)
    logging.info('FP:%s' % fp)
    logging.info('FN:%s' % fn)
    logging.info('TN:%s' % tn)
    logging.info('Recall:%s' % recall_sklearn)
    logging.info('Precision:%s' % precision_sklearn)
    logging.info("测试集F1值：%s" % f1_sklearn)
    logging.info('训练结束！')

    return history

def lstm_TL(X_test,Y_test,file_name,L):

    data_L = L

    if len(Y_test) != 0:
        Y_test1 = OneHotEncoder(categories='auto').fit_transform(Y_test).todense() #one-hot编码
    else:
        temp1 = [0] + [1] * (len(X_test)-1)
        temp1 = np.asarray(temp1)
        temp1 = np.reshape(temp1,[-1,1])
        Y_test1 = OneHotEncoder(categories='auto').fit_transform(temp1).todense()  #此时的标签并没有真实意义，只是适配代码运行

    batch_size = 64
    model = tf.keras.models.load_model(file_name)

    # 测试集上的批次预测和性能评估
    # 初始化预测结果列表
    res_ypred = []
    test_accs = []

    # 遍历测试数据集
    for batch_xs, batch_ys in tf.data.Dataset.from_tensor_slices((X_test, Y_test)).batch(batch_size):
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            batch_preds = model.predict(batch_xs)
            batch_ypred = tf.argmax(batch_preds, axis=1).numpy()
            res_ypred.extend(batch_ypred)

            if len(Y_test) != 0:
                test_acc = tf.keras.metrics.categorical_accuracy(batch_ys, batch_preds).numpy()
                test_accs.extend(test_acc)
        else:
            raise "用户已取消当前操作"

    # 保存预测结果
    temp = np.arange(1, len(res_ypred) + 1, 1)
    res_ypred = np.array(res_ypred)  # 将列表转换为NumPy数组
    res_out = np.hstack((temp.reshape(-1, 1), res_ypred.reshape(-1, 1)))
    np.savetxt('无监督分类结果.txt', res_out, fmt='%0.0f', delimiter=',')

    print('分类结束，结果已保存至文本文件')
    logging.info('分类结束，结果已保存至文本文件')

    # 计算混淆矩阵
    tn, fp, fn, tp = confusion_matrix(Y_test, res_ypred).ravel()

    accuracy = accuracy_score(Y_test, res_ypred)
    precision_sklearn = precision_score(Y_test, res_ypred)
    recall_sklearn = recall_score(Y_test, res_ypred)
    f1_sklearn = f1_score(Y_test, res_ypred)

    print('TP:%s' % tp)
    print('FP:%s' % fp)
    print('FN:%s' % fn)
    print('TN:%s' % tn)
    print('Recall:%s' % recall_sklearn)
    print('Precision:%s' % precision_sklearn)
    print("测试集F1值：%s" % f1_sklearn)
    print("测试集准确度：%s" % accuracy)
    logging.info('TP:%s' % tp)
    logging.info('FP:%s' % fp)
    logging.info('FN:%s' % fn)
    logging.info('TN:%s' % tn)
    logging.info('Recall:%s' % recall_sklearn)
    logging.info('Precision:%s' % precision_sklearn)
    logging.info("测试集F1值：%s" % f1_sklearn)
    logging.info("测试集准确度：%s" % accuracy)
    return  recall_sklearn, precision_sklearn, f1_sklearn, accuracy



def lstm_process_target_data(data_pain, data_encr, L=10):
    '''
    L:数据截断或填充后的长度，程序默认为10

    '''
    X_test = np.vstack((data_pain, data_encr))
    X_test = X_test / 255
    Y_test = np.vstack((np.zeros([len(data_pain), 1], float), np.ones([len(data_encr), 1], float)))

    X_test = np.reshape(X_test, [-1, L, 1])
    Y_test = np.reshape(Y_test, [-1, 1])

    return X_test, Y_test

def load_cnn_data(path, n_text_pain=0, n_text_encr=0, n_acars_pain=0, n_tls=0, n_http=0, n_ssh=0, Len=8):
    # Len为输入CNN的数据格式，Len*Len
    path2 = path + 'whole_frame/'
    ltemp = str(Len ** 2)
    """""""""""数据读取"""""""""""
    '''读取小说数据'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        text_pain = np.load(os.path.join(path, 'dataset', "painText_dec_all.npy"))  # 96194,1024
        text_encr = np.load(os.path.join(path, 'dataset', "AEScipherText_dec_all.npy"))  # 46580,1024
    else:
        raise "用户已取消当前操作"
    '''读取HTTP数据'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        http = np.load(os.path.join(path, 'dataset', r"http_whole_fram_dec_payload(" + ltemp + r"B).npy"))  # 128249,64
    else:
        raise "用户已取消当前操作"
    '''读取ACARS数据'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        acars = np.load(os.path.join(path, 'dataset', r"acars_pain_dec_withhead(" + ltemp + r"B).npy"))  # 29472,64
    else:
        raise "用户已取消当前操作"
    '''读取SSH数据'''
    ssh = np.load(os.path.join(path, 'dataset', r"ssh_dec_payload(" + ltemp + r"B).npy"))  # 3173,64
    ssh = np.delete(ssh, [0, 1, 2], axis=0)
    '''读取TLS数据'''
    if __Cannel_TASK_WAIT_HANDLE__ == False:
        tls = np.load(os.path.join(path, 'dataset', r"tls_maccdc_nohead_dec(" + ltemp + r"B).npy"))  # 126887,64
    else:
        raise "用户已取消当前操作"
    '''制作全部一样的数据，如全0全1全122，均为明数据'''
    ltemp = int(ltemp)
    d_same = []
    for i in range(256):
        temp = [i] * ltemp
        for i in range(10):
            d_same.append(temp)
    d_same = np.asfarray(d_same)
    """""""""""制作训练数据"""""""""""

    pain = np.vstack(
        (text_pain[0:n_text_pain, 0:ltemp], http[0:n_http, 0:ltemp], acars[0:n_acars_pain, 0:ltemp])) # , d_same
    encr = np.vstack((text_encr[0:n_text_encr, 0:ltemp], tls[0:n_tls, 0:ltemp], ssh[0:n_ssh, 0:ltemp]))
    return pain, encr
# 读取数据
def cnn_data_process(path, pain, encr, Len=8):
    # Len为输入CNN的数据格式，Len*Len
    # path2 = path + 'whole_frame/'
    ltemp = str(Len ** 2)
    '''读取小说数据'''
    text_pain = np.load(os.path.join(path, 'dataset', "painText_dec_all.npy"))  # 96194,1024
    text_encr = np.load(os.path.join(path, 'dataset', "AEScipherText_dec_all.npy"))  # 46580,1024
    '''读取HTTP数据'''
    http = np.load(os.path.join(path, 'dataset', r"http_whole_fram_dec_payload(" + ltemp + r"B).npy"))  # 128249,64
    ltemp = int(ltemp)
    """""""""""制作训练数据"""""""""""
    X_train = np.vstack((pain, encr))
    Y_train = np.vstack((np.zeros([len(pain), 1], float), np.ones([len(encr), 1], float)))  # 训练集标签
    all_train = np.hstack((X_train, Y_train))
    np.random.shuffle(all_train)
    X_train = all_train[:, 0:ltemp]
    Y_train = all_train[:, -1]

    """"""""""制作测试数据"""""""""""
    test_pain = np.vstack((text_pain[-1000:, 0:ltemp], http[-1000:, 0:ltemp]))
    test_encr = text_encr[-2000:, 0:ltemp]
    X_test = np.vstack((test_pain, test_encr))
    Y_test = np.vstack((np.zeros([len(test_pain), 1], float), np.ones([len(test_encr), 1], float)))  # 测试集标签

    """训练数据与测试数据尺寸转换"""""
    X_train = np.reshape(X_train, [-1, Len, Len, 1])
    X_test = np.reshape(X_test, [-1, Len, Len, 1])
    Y_train = np.reshape(Y_train, [-1, 1])
    Y_test = np.reshape(Y_test, [-1, 1])
    # Y_train = OneHotEncoder(categories='auto').fit_transform(Y_train).todense()  # one-hot编码
    # Y_test = OneHotEncoder(categories='auto').fit_transform(Y_test).todense()  # one-hot编码

    return X_train, Y_train, X_test, Y_test


def build_CNN(input_shape, output_shape):
    model = Sequential([
        Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=input_shape, padding='same'),
        MaxPooling2D(pool_size=(2, 2), padding='same'),
        Conv2D(24, kernel_size=(5, 5), activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2), padding='same'),
        Flatten(),
        Dense(24, activation='relu'),
        Dense(output_shape, activation='softmax')
    ])
    return model

def binary_df(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        data = row['data']
        # 将二进制字符串转换为整数
        byte_str = binary_strings_to_integers(data)
                # 截断或填充到长度 L
        if len(byte_str) > L:
            byte_str = byte_str[:L]
        elif len(byte_str) < L:
            byte_str += [0] * (L - len(byte_str))  # 用0填充
        all_list.append(byte_str)
    return np.array(all_list)

def process_ziduan_divide(df, L_mesg=40):
    data = binary_df(df, L_mesg)
    # 要作为键的列名
    keys = np.unique(df['name'])
    sel_data = {}
    sel_label = {}
    label = {}
    file_out = ''
    for key in keys:
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            file_out += str(df[df['name'] == key]['name_out'].drop_duplicates().values[0])
            value = data[df['name'] == key]
            sel_data[key] = value
            lab = df[df['name'] == key]['field'].drop_duplicates()
            sel_label[key] = lab.tolist()[0]
            label[key] = df[df['name'] == key]['field'].tolist()
        else:
            raise "用户已取消当前操作"
    return data, file_out, sel_data, label

def CNN_train(X_train, Y_train, X_test, Y_test, Len, epochs, cnn_model_name):
    # 构建模型
    model = build_CNN((Len, Len, 1), 2)

    # 设置批量大小
    batch_size = 64

    # # 创建数据集
    # dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train)).batch(batch_size, drop_remainder=True)
    # val_dataset = tf.data.Dataset.from_tensor_slices((X_test, Y_test)).batch(batch_size, drop_remainder=True)

    # 编译模型
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    with tf.device('/gpu:0'):
        # 在训练集上训练模型
        print('Epoch    Loss        Accuracy')
        logging.info('Epoch     Loss        Accuracy')
        # 创建一个自定义回调实例
        custom_callback = CustomCallback()
        # 训练模型
        history = model.fit(X_train, Y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(X_test, Y_test),
                            callbacks=custom_callback,
                            verbose=0)

    # 测试集上评估模型
    test_loss, test_acc = model.evaluate(X_test, Y_test, verbose=0)

    # 记录训练过程中的精度变化

    # for epoch, accuracy in enumerate(history.history['accuracy']):
    #     print(f'{epoch}     {accuracy:.4f}')
    #     logging.info(f'{epoch}     {accuracy:.4f}')

    # 记录测试集上的最终精度
    print(f'Test Accuracy: {test_acc:.3f}')
    logging.info(f'Test Accuracy: {test_acc:.3f}')

    # 保存模型
    base_path = f"{cnn_model_name}"
    model.save(base_path)
    print('训练结束，模型已保存。')
    logging.info('训练结束，模型已保存。')

    # # 测试模型
    # # y_test_category = np.argmax(Y_test, axis=1)
    # res_ypred = model.predict(X_test)
    # print('%s' % res_ypred.flatten())
    #
    # # 获取准确率
    # accuracy = model.evaluate(X_test, Y_test, verbose=0)[1]
    # print('精确度：%s' % (accuracy * 100))
    # # return

def rpdctestdata(path, n_acars_up_encr, n_acars_down_encr, n_tls, n_ssh, n_ais_en, n_acars_down_pain, n_acars_up_pain,
                 n_ais_pain, n_http, n_dns):
    import rpdc_test_data_process
    data_pad01, file_out01, sel_data01, label01 = rpdc_test_data_process.data_under_test(path, n_acars_up_encr,
                                                                                    n_acars_down_encr,
                                                                                    n_tls, n_ssh, n_ais_en,
                                                                                    n_acars_down_pain,
                                                                                    n_acars_up_pain, n_ais_pain,
                                                                                    n_http, n_dns)
    print('内部数据加载成功')
    logging.info('内部数据加载成功')
    return data_pad01, file_out01, sel_data01, label01


def rpdcgn2(data_pad01, line_edit_text, file_out01, rpdc2_len, sel_data01, label01):
    # 数据重组和加密概率计算
    import rpdc_data_reconstruct_and_prob_compute
    p = rpdc_data_reconstruct_and_prob_compute.data_reconstruct_and_prob_compute(
        data_pad01, line_edit_text, file_out01, rpdc2_len)
    p_derivative = rpdc_data_reconstruct_and_prob_compute.discrete_funtion_derivative3(p)

    # 保存原始数据以便后续使用
    se = sel_data01.copy()
    la = label01.copy()

    # 绘制 p 图
    plt.figure(figsize=(10, 6))
    plt.plot(p)
    plt.title('加密概率')
    plt.xlabel('字节偏移量')
    plt.ylabel('加密概率')
    plt.savefig('p_plot.png')
    plt.close()

    # 绘制 p_derivative 图
    plt.figure(figsize=(10, 6))
    plt.plot(p_derivative)
    plt.title('加密概率导数')
    plt.xlabel('字节偏移量')
    plt.ylabel('加密概率导数')
    plt.savefig('p_derivative_plot.png')
    plt.close()

    print("---------开始数据重组和加密概率计算------------")
    # print("计算结束，加密概率已保存到Excel文件。")
    logging.info("---------开始数据重组和加密概率计算------------")
    # logging.info("计算结束，加密概率已保存到Excel文件。")
    return p, p_derivative


def rpdcgn3(vari_max_input, p1):
    import rpdc_sus_field_generate
    # 如果提供了输入，则使用提供的输入作为最大变化值；否则，默认为0.1
    vari_max = float(vari_max_input)    # if vari_max_input != 0.1 else 0.1

    # 寻找可能的跳跃点
    en_section = rpdc_sus_field_generate.find_section(p1, vari_max)

    if not en_section:
        print("未提取出跳跃点。")
        logging.info("未提取出跳跃点。")
        return

    # 获取所有组合
    en_combination = rpdc_sus_field_generate.all_combination(en_section)

    if not en_combination:
        print("疑似加密字段为空。")
        logging.info("疑似加密字段为空。")
        return

    # 处理组合，移除空列表并调整格式
    adjusted_combinations = []
    for combination in en_combination:
        if len(combination) >= 2:
            adjusted_combinations.append([combination[0], combination[-1]])

    # 移除空列表
    adjusted_combinations = [item for item in adjusted_combinations if item]

    # 输出处理后的组合
    for comb in adjusted_combinations:
        print(comb)
        logging.info(comb)

    return en_combination

def my_moments(data,n):
    #data为list,n为阶数,n>2，此处是计算的标准化变量后的高阶矩，即减去均值后除以方差标准化后。
    d = np.asarray(data)
    m = np.mean(d)
    s = np.std(d)
    if s==0.0:
        s = s+0.000001
    d = (d-m)/s
    moment = np.sum(d**n)/len(data)
    return moment

def classify_based_4_moments(all_combi,test):
    #横向标准化
    #此方法利用加密段与非密段的4种矩，即均值、方差、偏度和峰度与均匀分布之间的相似度进行匹配
    #0~255的均匀分布：均值为a+b/2=255/2,方差为((b-a+1)**2-1)/12=5461.25,开根号后是73.9，偏度为0，峰度为-6*(n**2+1)/(5*(n**2-1)),当n=256时，其值约等于-1.2
    class_result = []
    s_k = []
    for sample in test:
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            i = 0
            d_temp = []
            s_k_t = [] #存放每个样本每种加密段下的方差
            f_av = np.array([127.5, 5461.25, 0, -1.2]) #用四种矩组成特征向量，这是均匀分布的标准值
            f_av = (f_av-np.mean(f_av))/np.std(f_av) #标准化特征向量
            while i < len(all_combi):
                if len(all_combi[i]) == 0:    #说明是空的，不含加密段
                    un_session_test = sample
                    un_mean = np.mean(un_session_test)
                    un_var = np.var(un_session_test)
                    un_skew = pd.Series(un_session_test).skew()
                    # un_kurt = pd.Series(un_session_test).kurt()
                    un_kurt = my_moments(un_session_test, 4)
                    f_un = np.array([un_mean,un_var,un_skew,un_kurt]) #用四种矩组成特征向量
                    f_un = (f_un-np.mean(f_un))/np.std(f_un) #标准化特征向量
                    d1 = np.linalg.norm(f_un-f_av) #非加密段与均匀分布的欧氏距离
                    d = d1-0.025
                    s_k_t.append([un_mean,un_var,un_skew,un_kurt,-1,-1,-1,-1]) #保存
                else:
                    if all_combi[i][-1] < len(sample):   #加密的模式长度要小于等于数据长度(索引从0开始)
                        en_session_test = [sample[x] for x in all_combi[i]]   #将当前加密模式段对应的样本值单独放在一个列表里，并办的成Pd的Series
                        un_session_test = [sample[x] for x in range(len(sample)) if x not in all_combi[i]] #将当前非加密模式段对应的样本值单独放在一个列表里，并办的成Pd的Series
                        un_mean = np.mean(np.asfarray(un_session_test))
                        un_var = np.var(np.asfarray(un_session_test))
                        un_skew = pd.Series(un_session_test).skew()
                        # un_kurt = pd.Series(un_session_test).kurt()
                        un_kurt = my_moments(un_session_test,4)
                        f_un = np.array([un_mean,un_var,un_skew,un_kurt]) #用四种矩组成特征向量
                        f_un = (f_un-np.mean(f_un))/np.std(f_un) #标准化特征向量
                        d1 = np.linalg.norm(f_un-f_av) #非加密段与均匀分布的欧氏距离
                        en_mean = np.mean(np.asfarray(en_session_test))
                        en_var = np.var(np.asfarray(en_session_test))
                        en_skew = pd.Series(en_session_test).skew()
                        # en_kurt = pd.Series(en_session_test).kurt()
                        en_kurt = my_moments(en_session_test,4)
                        f_en = np.array([en_mean,en_var,en_skew,en_kurt]) #用四种矩组成特征向量
                        f_en = (f_en-np.mean(f_en))/np.std(f_en) #标准化特征向量
                        d2 = np.linalg.norm(f_en-f_av) #加密段与均匀分布的欧氏距离
                        d = d1-d2
                        s_k_t.append([un_mean,un_var,un_skew,un_kurt,en_mean,en_var,en_skew,en_kurt]) #保存
                    else:
                        d = -100  #加密段超出数据长度，说明不是同一加密格式，设置距离为非常大,因为后面是取最大值
                        s_k_t.append(['Skip'])
                d_temp.append(d)
                i = i + 1
                pass
            s_k.append(s_k_t) #保存峰度
            max_index = d_temp.index(max(d_temp))  #取得最小值索引，即距离最小的索引为其类别标签
            if len(all_combi[max_index]) != 0:
                class_result.append([all_combi[max_index][0],all_combi[max_index][-1]])
            else:
                class_result.append([-1,-1])
            pass
        else:
            raise "用户已取消当前操作"
    class_result = np.asfarray(class_result).reshape(-1,2)
    # results = np.hstack((class_result,test))
    return class_result, s_k

def ming_mi_pan_bie(res):
    new_res = np.zeros([1,3])
    for it in res:
        it = it[0:2].reshape([1,2])
        if it[0][0] == -1:
            new_res = np.vstack((new_res,np.hstack((np.zeros([1,1]),it))))
        else:
            temp = np.hstack((np.ones([1,1]),it))
            new_res = np.vstack((new_res,temp))
    new_res = np.delete(new_res,0,0)
    return new_res

def acc_compute1(label, pred, name):
    # 对于加密与非加密样本统计样本查全率和查准率（整条报文均未加密的样本其标签为[-1,-1]
    # 对于加密字段的识别统计两种准确率，一种是整体准确率，即加密字段完全预测准确算是正确的，不允许有一个比特偏差；
    # 二是字节准确率，即按字节统计有多少字节是预测对的
    # 参数说明：label为numpy数组，起止点,pred是预测值，为Numpy数组，起止点
    print('-------开始加密字段匹配-----------')
    logging.info('-------开始加密字段匹配-----------')
    print('%s测试样本数： %s' % (name, len(label)))
    logging.info('%s测试样本数： %s' % (name, len(label)))
    FP = 0
    TP = 0
    FN = 0
    TN = 0

    i = 0
    all_un_sam = 0  # 所有真正不加密的样本
    corr_un_sam = 0  # 预测对的不加密的样本
    all_pred_un_sam = 0  # 所有预测为不加密的样本

    corr_en_sam = 0  # 整条加密段完全预测对的样本数量
    corr_en_bytes = 0  # 预测对的字节数
    all_en_sam = 0  # 所有加密样本的数量
    all_en_bytes = 0  # 所有真正的加密字节数
    all_pred_en_bytes = 0  # 所有预测为加密的字节数
    all_pred_en_sam = 0  # 所有预测为具有加密段的样本数量
    while i < len(label):
        if label[i][0] != -1:  # 具有加密段
            all_en_sam += 1
            if label[i][0] == pred[i][0] and label[i][1] == pred[i][1]:
                corr_en_sam += 1
            temp1 = list(range(int(label[i][0]), int(label[i][1] + 1)))  # 按照起止段生成起止序列
            if pred[i][0] != -1:
                temp2 = list(range(int(pred[i][0]), int(pred[i][1] + 1)))
                all_pred_en_sam += 1
            else:
                temp2 = []
                all_pred_un_sam += 1
            corr_en_bytes += len(set(temp1) & set(temp2))  # set为集合，&表示交集，起止序列有交集的地方就是预测对的
            all_en_bytes += len(temp1)  # 预测正确的加密段的长度
            all_pred_en_bytes += len(temp2)  # 所有预测为加密段的总长度
            i += 1
        else:  # 是非加密数据
            all_un_sam += 1
            if pred[i][0] == -1:
                corr_un_sam += 1
                all_pred_un_sam += 1
            else:
                all_pred_en_sam += 1
            i += 1
    acc_un_sam = -1
    precision_un_sam = -1
    acc_byte = -1
    precision_byte = -1
    acc_msg = -1
    if all_pred_un_sam != 0:
        if all_un_sam != 0:  # 测试数据只存在非加密样本
            acc_un_sam = corr_un_sam / all_un_sam
            precision_un_sam = corr_un_sam / all_pred_un_sam
            print("测试集非加密样本查全率rec：%s" % acc_un_sam)  # 所有真正非加密样本有多少被预测出来了
            logging.info("测试集非加密样本查全率rec：%s" % acc_un_sam)
            print("测试集非加密查准率pre：%s" % precision_un_sam)  # 即预测为非加密样本有多少是真正非加密的
            logging.info("测试集非加密查准率pre：%s" % precision_un_sam)
            FP = all_pred_en_sam
            TP = 0
            TN = all_un_sam - all_pred_en_sam
            FN = 0
        else:
            print("测试数据无非加密样本，错误预测为非加密样本的数量：%s" % all_pred_un_sam)
            logging.info("测试数据无非加密样本，错误预测为非加密样本的数量：%s" % all_pred_un_sam)
            FP = 0
            TP = all_en_sam - all_pred_un_sam
            TN = 0
            FN = all_pred_un_sam
    if all_pred_en_sam != 0:
        if all_en_bytes != 0:
            acc_byte = corr_en_bytes / all_en_bytes
            precision_byte = corr_en_bytes / all_pred_en_bytes
            if corr_en_bytes == 0:
                print("测试集未能正确划分！")
                logging.info('测试集未能正确划分！')
            else:
                acc_msg = corr_en_sam / all_en_sam
                f1 = 2 * acc_byte * precision_byte / (acc_byte + precision_byte)
                # print("测试集报文准确率(F_acc)：",acc_msg)
                print("测试集字节查全率b_rec(Cov_b)：%s" % acc_byte)  # 所有真正加密字节中有多少被预测出来了
                logging.info("测试集字节查全率b_rec(Cov_b)：%s" % acc_byte)
                print("测试集字节查准率b_pre(Cov_f)：%s" % precision_byte)  # 即预测为加密的字节中是真正加密所占的比重
                logging.info("测试集字节查准率b_pre(Cov_f)：%s" % precision_byte)
                print("测试集F1值：%s" % f1)
                logging.info("测试集F1值：%s" % f1)
                FN = all_pred_un_sam
                TP = all_en_sam - all_pred_un_sam
                TN = 0
                FP = 0
        else:
            print("测试数据无加密样本，错误识别为含有加密段样本的数量：%s" % all_pred_en_sam)
            logging.info("测试数据无加密样本，错误识别为含有加密段样本的数量：%s" % all_pred_en_sam)
            FP = all_pred_en_sam
            TP = 0
            TN = all_un_sam - all_pred_en_sam
            FN = 0

    if acc_byte == -1:
        acc_byte = []
    if precision_byte == -1:
        precision_byte = []

    return acc_un_sam, precision_un_sam, acc_msg, acc_byte, precision_byte, FP, TP, FN, TN

def int_to_binary(n):
    return format(int(n), '08b')
# 定义一个函数将一行数据转换为二进制数组
def row_to_binary_array(row):
    binary_str = ''.join(int_to_binary(int(x)) for x in row)
    return np.array(list(binary_str), dtype=np.uint8)

def encr_field_match(data, label, en_combination):
    if len(label) != 0:
        FP = 0
        TP = 0
        FN = 0
        TN = 0
        cov_b = []
        cov_f = []

        # 处理每个数组的每一行
        processed_data = {}
        for key, array in data.items():
            processed_data[key] = np.array([row_to_binary_array(row) for row in array])

        label_dict = {}
        for key, array in label.items():
            label_dict[key] = (np.array(array) + 1) * 8 - 1

        result_ziduan = {}
        for item in data.keys():
            if (__Cannel_TASK_WAIT_HANDLE__ == False):
                result, s = classify_based_4_moments(en_combination, data[item])
                result_ziduan[item] = (result + 1) * 8 - 1
                en_acc_sam, en_pre_sam, acc_frame, byte_acc, byte_preci, fp, tp, fn, tn = acc_compute1(
                    np.asarray(label[item]), result[:, 0:2], str(item))

                # 保存预测的结果
                temp = np.arange(1, len(result) + 1, 1)
                res = ming_mi_pan_bie(result)
                res_out = np.hstack((temp.reshape(len(temp), 1), res.reshape(len(res), 3)))
                f_name = str(item) + r'加密字段识别结果（内部数据）.txt'
                np.savetxt(f_name, res_out, fmt='%0.0f', delimiter=',')
                FP = FP + fp
                FN = FN + fn
                TP = TP + tp
                TN = TN + tn
                cov_b.append(byte_acc)
                cov_f.append(byte_preci)
                pass
            else:
                raise "用户已取消当前操作"

        while [] in cov_b:
            cov_b.remove([])
        while [] in cov_f:
            cov_f.remove([])
        avg_cov_b = np.mean(cov_b)
        avg_cov_f = np.mean(cov_f)
        avg_f1 = 2 * avg_cov_b * avg_cov_f / (avg_cov_b + avg_cov_f)
        f1_sam = 2 * TP / (2 * TP + FP + FN)
        acc_sam = (TP + TN) / (TP + FN + TN + FP)
        print("-----------------------")
        print("平均字节查全率b_rec(Cov_b)：%s" % avg_cov_b)
        print("平均字节查准率b_pre(Cov_f)：%s" % avg_cov_f)
        print("平均字节F1值：%s" % avg_f1)
        # print("加密与非加密分类识别准确率：%s" % acc_sam)
        # print("加密与非加密分类识别F1值：%s" % f1_sam)
        logging.info("-----------------------")
        logging.info("平均字节查全率b_rec(Cov_b)：%s" % avg_cov_b)
        logging.info("平均字节查准率b_pre(Cov_f)：%s" % avg_cov_f)
        logging.info("平均字节F1值：%s" % avg_f1)
        logging.info("加密与非加密分类识别准确率：%s" % acc_sam)
        logging.info("加密与非加密分类识别F1值：%s" % f1_sam)
    else:
        result_ziduan = {}
        processed_data = {}
        label_dict = {}
        result, s = classify_based_4_moments(en_combination, data)
        # 保存预测的结果
        temp = np.arange(1, len(result) + 1, 1)
        res = ming_mi_pan_bie(result)
        res_out = np.hstack((temp.reshape(len(temp), 1), res.reshape(len(res), 3)))
        np.savetxt(r'加密字段识别结果.txt', res_out, fmt='%0.0f', delimiter=',')
        print("匹配结束，结果已保存至文本文件！")
        logging.info("匹配结束，结果已保存至文本文件！")

    return processed_data, result_ziduan, label_dict

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result


def binary_df_to_mingmi(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list_ming = []
    all_list_mi = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            label = row['label']
            # 将二进制字符串转换为整数
            byte_str = binary_strings_to_integers(data)
            if len(byte_str) > L:
                byte_str = byte_str[:L]
            elif len(byte_str) < L:
                byte_str += [0] * (L - len(byte_str))  # 用0填充
            if label == 0:
                all_list_ming.append(byte_str)
            else:
                all_list_mi.append(byte_str)
        else:
            raise "用户已取消当前操作"
    print('外部数据加载成功')
    logging.info('外部数据加载成功')
    return np.array(all_list_ming), np.array(all_list_mi)

def main(parameters):

    path = parameters['path']
    dir_path = os.path.join(path, '过程文件')

    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    mode = parameters['mode']
    p = []
    p_derivative = []
    p_der_mean = 0.0
    img_zhanshi_ming = ''
    img_zhanshi_mi = ''
    data1 = []
    data2 = []
    load_ok = True
    processed_data = {}
    result_ziduan = {}
    label_dict = {}

    if mode == 0:
        waibu_data_1 = parameters['waibu_data_1']
        jieduan = parameters['jieduan']
        jiazai_0 = parameters['jiazai_0']

        if jiazai_0 == 1:
            if waibu_data_1 == '' or waibu_data_1==None:
                n_text_pain = parameters['n_text_pain']
                n_acars_pain = parameters['n_acars_pain']
                n_ais_pain = parameters['n_ais_pain']
                n_ais1_pain = parameters['n_ais1_pain']
                n_ais4_pain = parameters['n_ais4_pain']
                n_http = parameters['n_http']
                n_ssh = parameters['n_ssh']
                n_dns = parameters['n_dns']
                n_ftp = parameters['n_ftp']
                n_smtp = parameters['n_smtp']
                n_acars_encr = parameters['n_acars_encr']
                n_ais_encr = parameters['n_ais_encr']
                n_ais1_encr = parameters['n_ais1_encr']
                n_ais4_encr = parameters['n_ais4_encr']
                n_text_encr = parameters['n_text_encr']
                data1, data2 = load_data_payload(path, n_text_pain, n_text_encr, n_acars_pain, n_acars_encr, n_ais_pain,
                                                 n_ais_encr, n_ais1_pain, n_ais1_encr, n_ais4_pain, n_ais4_encr, n_http,
                                                 n_ssh, n_dns, n_ftp, n_smtp, jieduan)
            else:
                data = pd.read_csv(waibu_data_1)
                data1, data2 = binary_df_to_mingmi(data, jieduan)
            img_zhanshi_ming = preprocess_2_data_distribution_compute.data_distri_plot(data1, 'ming_distri')
            img_zhanshi_mi = preprocess_2_data_distribution_compute.data_distri_plot(data2, 'mi_distri')

    if mode == 1:
        waibu_data_1 = parameters['waibu_data_1']
        jieduan = parameters['jieduan']
        jiazai_1 = parameters['jiazai_1']
        jiazai_2 = parameters['jiazai_2']

        if jiazai_1 == 1:
            rati = parameters['rati']
            if waibu_data_1 == '' or waibu_data_1==None:
                n_text_pain = parameters['n_text_pain']
                n_acars_pain = parameters['n_acars_pain']
                n_ais_pain = parameters['n_ais_pain']
                n_ais1_pain = parameters['n_ais1_pain']
                n_ais4_pain = parameters['n_ais4_pain']
                n_http = parameters['n_http']
                n_ssh = parameters['n_ssh']
                n_dns = parameters['n_dns']
                n_ftp = parameters['n_ftp']
                n_smtp = parameters['n_smtp']
                n_acars_encr = parameters['n_acars_encr']
                n_ais_encr = parameters['n_ais_encr']
                n_ais1_encr = parameters['n_ais1_encr']
                n_ais4_encr = parameters['n_ais4_encr']
                n_text_encr = parameters['n_text_encr']
                pain, encr = load_data_payload(path, n_text_pain, n_text_encr, n_acars_pain, n_acars_encr, n_ais_pain,
                                               n_ais_encr,
                                               n_ais1_pain, n_ais1_encr,
                                               n_ais4_pain, n_ais4_encr, n_http, n_ssh, n_dns, n_ftp, n_smtp, L=jieduan)
                print("内部数据加载成功")
                logging.info("内部数据加载成功")
            else:
                data = pd.read_csv(waibu_data_1)
                pain, encr = binary_df_to_mingmi(data, jieduan)

            X_train0, Y_train0, X_test0, Y_test0, Y_test10 = lstm_process_source_data(pain, encr, ratio=rati,
                                                                                      L=jieduan)  # Y_train0,Y_test10为独热编码

            xunlian_duqu_1 = {
                'X_train0': X_train0,
                'Y_train0': Y_train0,
                'X_test0': X_test0,
                'Y_test0': Y_test0,
                'Y_test10': Y_test10
            }
            with open(os.path.join(dir_path, 'xunlian_1.pickle'), 'wb') as f:
                pickle.dump(xunlian_duqu_1, f)

        xunlian = parameters['xunlian']

        if (jiazai_1 == 1 or jiazai_1 == 0) and xunlian == 1:

            if jiazai_1 == 0:
                with open(os.path.join(dir_path, 'xunlian_1.pickle'), 'rb') as f:
                    xunlian_duqu_1 = pickle.load(f)
                X_train0 = xunlian_duqu_1['X_train0']
                Y_train0 = xunlian_duqu_1['Y_train0']
                X_test0 = xunlian_duqu_1['X_test0']
                Y_test0 = xunlian_duqu_1['Y_test0']
                Y_test10 = xunlian_duqu_1['Y_test10']

            model_name = parameters['model_name']
            # 参数定义
            lr = parameters['learning_rate']
            batch_size = 512
            epoches = parameters['epoches']
            n_inputs = 1
            n_steps = jieduan
            n_hidden_units = 15
            n_classes = 2

            # 定义LSTM模型
            model = tf.keras.models.Sequential([
                tf.keras.layers.Dense(units=n_hidden_units, input_shape=(n_steps, n_inputs)),
                tf.keras.layers.Dense(units=n_hidden_units),
                tf.keras.layers.LSTM(n_hidden_units * 2, return_sequences=False),
                # tf.keras.layers.Dense(units=n_hidden_units),
                # tf.keras.layers.Flatten(),  # 添加这行来展平 LSTM 输出，如果 LSTM 设置了 return_sequences=True
                tf.keras.layers.Dense(n_classes, activation='softmax')
            ])

            model.compile(optimizer=tf.keras.optimizers.Adam(lr),
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

            model.summary()

            lstm_len = jieduan
            accuracy, recall_sklearn, precision_sklearn, f1_sklearn = lstm(model=model, epochs=epoches, batch_size=batch_size, X_train=X_train0,
                 Y_train=Y_train0,
                 X_test=X_test0, Y_test=Y_test0, Y_test1=Y_test10, L=lstm_len)
            # MODIFY: 设置默认类型为空串
            if model_name == '':
                model_name = f'model_{jieduan}B.h5'

            model.save(model_name)

        if jiazai_1 == -1 and xunlian == 1:
            print('未加载数据！')
            logging.info('未加载数据！')
            load_ok = False

        if jiazai_2 == 1:
            if waibu_data_1 == '' or waibu_data_1==None:
                n_text_pain = parameters['n_text_pain']
                n_acars_pain = parameters['n_acars_pain']
                n_ais_pain = parameters['n_ais_pain']
                n_ais1_pain = parameters['n_ais1_pain']
                n_ais4_pain = parameters['n_ais4_pain']
                n_http = parameters['n_http']
                n_ssh = parameters['n_ssh']
                n_dns = parameters['n_dns']
                n_ftp = parameters['n_ftp']
                n_smtp = parameters['n_smtp']
                n_acars_encr = parameters['n_acars_encr']
                n_ais_encr = parameters['n_ais_encr']
                n_ais1_encr = parameters['n_ais1_encr']
                n_ais4_encr = parameters['n_ais4_encr']
                n_text_encr = parameters['n_text_encr']
                pain_test, encr_test = load_data_payload(path, n_text_pain, n_text_encr, n_acars_pain, n_acars_encr, n_ais_pain,
                                                         n_ais_encr, n_ais1_pain, n_ais1_encr,
                                                         n_ais4_pain, n_ais4_encr, n_http, n_ssh, n_dns, n_ftp, n_smtp,
                                                         L=jieduan)
                print("内部数据加载成功")
                logging.info("内部数据加载成功")
            else:
                data = pd.read_csv(waibu_data_1)
                pain_test, encr_test = binary_df_to_mingmi(data, jieduan)

            data_test, label_test = lstm_process_target_data(pain_test, encr_test, jieduan)

            fenlei_duqu_1 = {
                'data_test': data_test,
                'label_test': label_test
            }
            with open(os.path.join(dir_path, 'fenlei_1.pickle'), 'wb') as f:
                pickle.dump(fenlei_duqu_1, f)

        fenlei = parameters['fenlei']
        if (jiazai_2 == 0 or jiazai_2 == 1) and fenlei == 1:
            if jiazai_2 == 0:
                with open(os.path.join(dir_path, 'fenlei_1.pickle'), 'rb') as f:
                    fenlei_duqu_1 = pickle.load(f)
                data_test = fenlei_duqu_1['data_test']
                label_test = fenlei_duqu_1['label_test']
            model_name = parameters['model_name']
            #TODO 这里可能还要改，我会给完整的文件名
            if model_name == '':
                model_name = f'model_{jieduan}B.h5'

            recall_sklearn, precision_sklearn, f1_sklearn, accuracy = lstm_TL(data_test, label_test, model_name, L=jieduan)

        if jiazai_2 == -1 and fenlei == 1:
            print('未加载数据！')
            logging.info('未加载数据!')
            load_ok = False

    if mode == 2:
        jiazai_3 = parameters['jiazai_3']
        jiazai_4 = parameters['jiazai_4']
        huafen = parameters['huafen']
        gailv = parameters['gailv']
        shengcheng = parameters['shengcheng']
        pipei = parameters['pipei']
        waibu_data_1 = parameters['waibu_data_1']
        if jiazai_3 == 1:
            Len = parameters['size_1']
            if waibu_data_1 == '' or waibu_data_1==None:
                n_text_pain = parameters['n_text_pain_2']
                n_text_encr = parameters['n_text_encr_2']
                n_acars_pain = parameters['n_acars_pain_2']
                n_tls = parameters['n_tsl_2']
                n_http = parameters['n_http_2']
                n_ssh = parameters['n_ssh_2']
                pain_cnn, encr_cnn = load_cnn_data(path, n_text_pain, n_text_encr, n_acars_pain, n_tls, n_http, n_ssh, Len)
                print('内部数据加载成功')
                logging.info('内部数据加载成功')
            else:
                d_same = []
                for i in range(256):
                    temp = [i] * Len * Len
                    for i in range(10):
                        d_same.append(temp)
                d_same = np.asfarray(d_same)
                data = pd.read_csv(waibu_data_1)
                pain_cnn, encr_cnn = binary_df_to_mingmi(data, Len * Len)
                pain_cnn = np.vstack((pain_cnn, d_same))
            X_train2, Y_train2, X_test2, Y_test2 = cnn_data_process(path, pain_cnn, encr_cnn, Len)

            xunlian_duqu_2 = {
                'X_train2': X_train2,
                'Y_train2': Y_train2,
                'X_test2': X_test2,
                'Y_test2': Y_test2,
                'Len': Len
            }
            with open(os.path.join(dir_path, 'xunlian_2.pickle'), 'wb') as f:
                pickle.dump(xunlian_duqu_2, f)

        if (jiazai_3 == 0 or jiazai_3 == 1) and huafen == 1:
            if jiazai_3 == 0:
                with open(os.path.join(dir_path, 'xunlian_2.pickle'), 'rb') as f:
                    xunlian_duqu_2 = pickle.load(f)
                X_train2 = xunlian_duqu_2['X_train2']
                Y_train2 = xunlian_duqu_2['Y_train2']
                X_test2 = xunlian_duqu_2['X_test2']
                Y_test2 = xunlian_duqu_2['Y_test2']
                Len = xunlian_duqu_2['Len']
            model_name = parameters['cnn_model_name']
            # if Len == 0:
            #     Len = 8
            if model_name == '':
                model_name = f"cnn_{Len}-{Len}.h5"
            CNN_train(X_train2, Y_train2, X_test2, Y_test2, Len, 20, model_name)
        if jiazai_3 == -1 and huafen == 1:
            print('未加载数据！')
            logging.info('未加载数据！')
            load_ok = False

        if jiazai_4 == 1:
            Len = parameters['size_2']
            waibu_data_2 = parameters['waibu_data_2']
            if Len == 0:
                Len = 8
            if waibu_data_2 == '' or waibu_data_2 == None:
                n_acars_up_encr = parameters['n_acars_up_encr']
                n_acars_down_encr = parameters['n_acars_down_encr']
                n_tls = parameters['n_tsl_3']
                n_ssh = parameters['n_ssh_3']
                n_ais_en = parameters['n_ais_en']
                n_acars_down_pain = 0   # parameters['n_acars_down_pain']
                n_acars_up_pain = 0   # parameters['n_acars_up_pain']
                n_ais_pain = 0   # parameters['n_ais_pa']
                n_http = 0   # parameters['n_http_3']
                n_dns = 0   # parameters['n_dns_3']
                data_pad01, file_out01, sel_data01, label01 = rpdctestdata(path, n_acars_up_encr,n_acars_down_encr,n_tls,n_ssh,n_ais_en,n_acars_down_pain,n_acars_up_pain,n_ais_pain,n_http,n_dns)
            else:
                data = pd.read_csv(waibu_data_2)
                data['field'] = data['field'].apply(convert_list_str)
                data_pad01, file_out01, sel_data01, label01 = process_ziduan_divide(data)
                print('外部数据加载成功')
                logging.info('外部数据加载成功')
            huafen_duqu = {
                'data_pad01': data_pad01,
                'file_out01': file_out01,
                'sel_data01': sel_data01,
                'label01': label01,
                'Len': Len
            }
            with open(os.path.join(dir_path, 'huafen.pickle'), 'wb') as f:
                pickle.dump(huafen_duqu, f)

        if (jiazai_4 == 0 or jiazai_4 == 1) and gailv == 1:
            if jiazai_4 == 0:
                with open(os.path.join(dir_path, 'huafen.pickle'), 'rb') as f:
                    huafen_duqu = pickle.load(f)
                data_pad01 = huafen_duqu['data_pad01']
                file_out01 = huafen_duqu['file_out01']
                sel_data01 = huafen_duqu['sel_data01']
                label01 = huafen_duqu['label01']
                Len = huafen_duqu['Len']

            cnn_model_name = parameters['load_cnn_name']
            if cnn_model_name == None or cnn_model_name == '':
                cnn_model_name = f'cnn_{Len}-{Len}.h5'
            p, p_derivative = rpdcgn2(data_pad01, cnn_model_name, file_out01, Len, sel_data01, label01)
            p_der_mean = np.mean([arr[0] for arr in p_derivative])
            np.save(os.path.join(dir_path, 'p.npy'), p)
            np.save(os.path.join(dir_path, 'p_derivative_mean.npy'), p_der_mean)
        if jiazai_4 == -1 and gailv == 1:
            print('未加载数据！')
            logging.info('未加载数据！')
            load_ok = False

        if (gailv == 0 or gailv == 1) and shengcheng == 1:
            if gailv == 0:
                p = np.load(os.path.join(dir_path, 'p.npy'))
                p_der_mean = np.load(os.path.join(dir_path, 'p_derivative_mean.npy'))
            yuzhi_if = parameters['yuzhi_if']
            if yuzhi_if == 1:
                yuzhi = parameters['yuzhi']
            else:
                yuzhi = p_der_mean
            en = rpdcgn3(yuzhi, p)
            with open(os.path.join(dir_path, 'en.json'), 'w') as file:
                json.dump(en, file, indent=4)
        if gailv == -1 and shengcheng == 1:
            print('未计算概率！')
            logging.info('未计算概率！')
            load_ok = False

        if (gailv == 0 or gailv == 1) and pipei == 1:
            if gailv == 0:
                with open(os.path.join(dir_path, 'en.json'), 'r') as file:
                    en = json.load(file)
                with open(os.path.join(dir_path, 'huafen.pickle'), 'rb') as f:
                    huafen_duqu = pickle.load(f)
                sel_data01 = huafen_duqu['sel_data01']
                label01 = huafen_duqu['label01']
            processed_data, result_ziduan, label_dict = encr_field_match(sel_data01, label01, en)
        if gailv == -1 and pipei == 1:
            print('未生成加密字段！')
            logging.info('未生成加密字段！')
            load_ok = False


    result_dict = {
        'img_zhanshi_ming': img_zhanshi_ming,     # 界面1，mode=0，图:明数据分布
        'img_zhanshi_mi': img_zhanshi_mi,     # 界面1，mode=0，图:密数据分布
        'form_1': data1,   # 界面1，mode=0，明数据展示表，可以像xls一样上下左右滚动
        'form_2': data2,   # 界面1，mode=0，密数据展示表，可以像xls一样上下左右滚动
        # 界面3，mode2，图的纵轴数据，图例在文件夹里p_plot.png和p_derivative_plot.png，要网格线
        'p': p,
        'p_derivative': p_derivative,
        'p_der_mean': p_der_mean,
        'load_ok': load_ok,  # 是否加载成功，True则刚刚点击的按钮从-1返回0，False则返回-1
        # 界面3，mode2
        'data': processed_data,
        'label': label_dict,
        'predict': result_ziduan
    }
    return result_dict

def main2(parameters):

    rati = 0.9
    path = parameters['path']
    waibu_data_train = parameters['waibu_train_path']
    waibu_data_test = parameters['waibu_test_path']
    jieduan_range = parameters['jieduan_range']
    lr_range = parameters['lr_range']
    epoches_range = parameters['epoches_range']
    lr = 0.003
    epoches = 20
    jieduan = 20

    if jieduan_range is not None:
        para = '长度'
        parame = jieduan_range
    if lr_range is not None:
        para = '学习率'
        parame = lr_range
    if epoches_range is not None:
        para = '轮次'
        parame = epoches_range

    if jieduan_range is None:
        jieduan_range = [20]
    if lr_range is None:
        lr_range = [0.003]
    if epoches_range is None:
        epoches_range = [30]
    f1 = []
    accuracy = []
    recall = []
    precision = []
    f1_TL = []
    accuracy_TL = []
    recall_TL = []
    precision_TL = []

    lr_data = {lr: {'loss': [], 'accuracy': []} for lr in lr_range}

    data_1 = pd.read_csv(waibu_data_train)
    data_2 = pd.read_csv(waibu_data_test)
    for jieduan in jieduan_range:
        for lr in lr_range:
            for epoches in epoches_range:
                pain, encr = binary_df_to_mingmi(data_1, jieduan)
                X_train0, Y_train0, X_test0, Y_test0, Y_test10 = lstm_process_source_data(pain, encr, ratio=rati,
                                                                                          L=jieduan)  # Y_train0,Y_test10为独热编码

                model_name = os.path.join(path, f'model_{jieduan}B.h5')
                # 参数定义
                batch_size = 512
                n_inputs = 1
                n_steps = jieduan
                n_hidden_units = 15
                n_classes = 2

                # 定义LSTM模型
                model = tf.keras.models.Sequential([
                    tf.keras.layers.Dense(units=n_hidden_units, input_shape=(n_steps, n_inputs)),
                    tf.keras.layers.Dense(units=n_hidden_units),
                    tf.keras.layers.LSTM(n_hidden_units * 2, return_sequences=False),
                    # tf.keras.layers.Dense(units=n_hidden_units),
                    # tf.keras.layers.Flatten(),  # 添加这行来展平 LSTM 输出，如果 LSTM 设置了 return_sequences=True
                    tf.keras.layers.Dense(n_classes, activation='softmax')
                ])

                model.compile(optimizer=tf.keras.optimizers.Adam(lr),
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])

                model.summary()

                lstm_len = jieduan

                if para == '学习率':
                    history = lstm_2(model=model, epochs=20, batch_size=batch_size, X_train=X_train0, Y_train=Y_train0,
                     X_test=X_test0, Y_test=Y_test0, Y_test1=Y_test10, L=lstm_len)
                    lr_data[lr]['loss'].append(history.history['loss'])
                    lr_data[lr]['accuracy'].append(history.history['accuracy'])

                else:
                    accuracy_sklearn, recall_sklearn, precision_sklearn, f1_sklearn = lstm(model=model, epochs=epoches, batch_size=batch_size, X_train=X_train0,
                         Y_train=Y_train0,
                         X_test=X_test0, Y_test=Y_test0, Y_test1=Y_test10, L=lstm_len)

                    accuracy.append(accuracy_sklearn)
                    f1.append(f1_sklearn)
                    recall.append(recall_sklearn)
                    precision.append(precision_sklearn)

                model.save(model_name)

                pain_test, encr_test = binary_df_to_mingmi(data_2, jieduan)
                data_test, label_test = lstm_process_target_data(pain_test, encr_test, jieduan)
                recall_sklearn, precision_sklearn, f1_sklearn, accuracy_sklearn = lstm_TL(data_test, label_test, model_name, L=jieduan)

                accuracy_TL.append(accuracy_sklearn)
                f1_TL.append(f1_sklearn)
                recall_TL.append(recall_sklearn)
                precision_TL.append(precision_sklearn)
    if para == '学习率':
        # 绘制损失和准确率曲线
        plt.figure(figsize=(12, 6))

        # 绘制损失曲线
        plt.subplot(1, 2, 1)
        for lr, data in lr_data.items():
            losses = np.array(data['loss']).mean(axis=0)  # 平均每个epoch的损失
            plt.plot(losses, label=f'LR={lr}')
        plt.title('Loss over Epochs')
        plt.xlabel('Epochs')
        plt.ylabel('Loss')
        plt.legend()

        # 绘制准确率曲线
        plt.subplot(1, 2, 2)
        for lr, data in lr_data.items():
            accuracies = np.array(data['accuracy']).mean(axis=0)  # 平均每个epoch的准确率
            plt.plot(accuracies, label=f'LR={lr}')
        plt.title('Accuracy over Epochs')
        plt.xlabel('Epochs')
        plt.ylabel('Accuracy')
        plt.legend()
    else:
        dataframe = {
            'range': parame,
            'Accuracy': accuracy,
            'F1 Score': f1,
            'Recall': recall,
            'Precision': precision,
            'Accuracy TL': accuracy_TL,
            'F1 Score TL': f1_TL,
            'Recall TL': recall_TL,
            'Precision TL': precision_TL
        }

        df = pd.DataFrame(dataframe)

        # 保存 DataFrame 到 Excel 文件
        output_file = os.path.join(path, f'output_{para}.xlsx')
        df.to_excel(output_file, index=False)
        # 创建图表
        plt.figure(figsize=(12, 8))
        plt.plot(parame, accuracy_TL, label='Accuracy', marker='o')
        plt.plot(parame, f1_TL, label='F1 Score', marker='x')
        plt.plot(parame, recall_TL, label='Recall', marker='s')
        plt.plot(parame, precision_TL, label='Precision', marker='^')

        # plt.title('Evaluation Metrics vs. Segment Length')
        plt.xlabel(para)
        plt.ylabel('评价指标')
        plt.legend()
        plt.grid(True)
    plt.show()

if __name__ == '__main__':
    parameters1 = {
        'path': r'D:\temp\xinda\mingmi', # 'D:/devProduction/xinda/mingmi_2024_10_12/'D:\temp\xinda\mingmi
        # 页面选择
        'mode': 2,  # int，选择模式，0 数据展示，1 迁移分类，2 加密字段

        # 第一个界面和第二个界面，大部分参数共用，但界面输入位置不一样，没标为共用，mode=0,1
        # 输入框和上下调节输入框，可以箭头1000条1000条的加，也可以直接输入，数量超过上限直接显示最大值
            # int，明数据条数
        'n_text_pain': 2000,    # 96194条，TEXT
        'n_acars_pain': 0,  # 9930条，ACARS
        'n_ais_pain': 0,    # 51678条，AIS
        'n_ais1_pain': 0,    # 150460条，AIS1
        'n_ais4_pain': 0,   # 6637条，AIS4
        'n_http': 0,    # 153831条，HTTP
        'n_ssh': 0,      # 11608条，SSH
        'n_dns': 0,     # 16137条，DNS
        'n_ftp': 0,  # 1569条，FTP
        'n_smtp': 0,    # 181条，SMTP
            # int，密数据条数
        'n_text_encr': 100,      # 93160条，TEXT
        'n_acars_encr': 0,      # 9930条，ACARS
        'n_ais_encr': 0,        # 51678条，AIS
        'n_ais1_encr': 0,    # 150460条，AIS1
        'n_ais4_encr': 0,    # 6637条，AIS4
        'jieduan': 20,  # int，数据截断长度，(0:1:40]可输入可上下箭头调节

        'rati': 0.9,  # float，训练集验证集比例，(0.0:0.1:1.0)可输入可上下箭头调节，mode=1
        'learning_rate': 0.003,  # float，学习率，[0.001:0.001:0.005]，mode=1

        'waibu_data_1': None,    #FIXME 外部数据地址，如果外部数据不为空则读取外部数据，空为‘’, mode012共用
        
        # 按钮，当前1为点击条件
        'jiazai_0': 1,  # int，是否加载数据展示，mode=0，需要有数据（外部或内部）和jieduan
        'jiazai_1': -1,    # int，是否加载训练数据，按键训练和分类各使用一次，mode=1，需要有数据（外部或内部）和rati，jieduan
        'jiazai_2': 1,    # int，测试数据加载，mode=1，需要有数据（外部或内部）
        'xunlian': -1,  # int，是否训练模型，mode=1， 需要model_name，learning_rate，epoches，
        'fenlei': 1,  # int，是否分类，mode=1，需要model_name
        # 输入框
        'epoches': 100,  # int，训练轮次，>0，mode=1，可以做成默认显示已经填入100但是可以修改的输入框
        # 文件选择键加文本框
        # MODIFY: 设置默认类型为空串
        'model_name': '',  # str，输入训练模型名或读取模型文件名，可以为空，可以自己编辑，也可以选择电脑里的.h5文件

        # 第三个界面第一个步骤训练模型，mode=2
        # 输入框加上下调节条数
            # 明
        'n_text_pain_2': 100,  # 96194，TEXT
        'n_acars_pain_2': 100,  # 29472，ACARS
        'n_http_2': 100,  # 128249
            # 密
        'n_tsl_2': 100,  # 126887，TLS
        'n_text_encr_2': 100,  # 46580，TEXT
        'n_ssh_2': 100,   # 3173，SSH
        # 下拉选择
        'size_1': 8,  # int，CNN尺寸，可选8、10
        # 按键
        'jiazai_3': 0,  # int，是否加载    -1、0、1    需要数据（内部或外部），size_1
        'huafen': 0,  # int，是否字段划分   0、1    需要cnn_model_name
        # 输入框加文件选择
        'cnn_model_name': '',    # str，cnn模型名称，可以为空

        # 第三个界面第二个步骤加密概率计算、疑似加密字段生成、加密字段匹配
        #     # 明
        # 'n_acars_down_pain': 0,     # 45055，ACARS下行
        # 'n_acars_up_pain': 100,   # 31378，ACARS下行
        # 'n_ais_pa': 0,  # 150460，AIS
        # 'n_http_3': 0,  # 128249，HTTP
        # 'n_dns_3': 0,   # 7175，DNS
            # 密
        'n_acars_up_encr': 100,    # 25741，ACARS上行
        'n_acars_down_encr': 100,     # 23919，ACARS下行
        'n_tsl_3': 100,    # 136389，TLS
        'n_ssh_3': 100,    # 9479，SSH
        'n_ais_en': 100,  # 150460，AIS
        'waibu_data_2': None,  # FIXME 外部数据地址，如果外部数据不为空则读取外部数据，空为‘’, mode2用
        # 下拉选择
        'size_2': 8,  # int，CNN尺寸，8、10
        # 按键
        'jiazai_4': 1,  # int，是否加载、
        #TODO 概率 -1、0、1
        'gailv': 1,    # int，是否进行概率计算
        #TODO 生成 0、1
        'shengcheng': 1,  # int，是否进行加密字段生成
        #TODO 匹配 0、1
        'pipei': 1,  # int，是否进行匹配
        # 选择.h5文件
        'load_cnn_name': r'D:\temp\xinda\mingmi\cnn_8-8.h5',  # str，cnn模型名称，可以为‘’或者None  D:\temp\xinda\mingmi\cnn_8-8.h5
        # 勾选框
        'yuzhi_if': 0,  # int，是否自选阈值
        # 输入框，如果yuzhi_if勾选了就需要输入，没勾选默认显示0.01输入为0
        'yuzhi': None,     # float，阈值，(0, 1)
        #TODO 要问问Yuzhi的区间
    }

    # setParameters(parameters1)

    result_dict = main(parameters1)

    parameters2 = {
        'path': r'D:\pythonProject\mingmi',
        'jieduan_range': None,  # np.arange(10, 40, 1)
        'lr_range': np.arange(0.001, 0.006, 0.001),     # None
        'epoches_range': None,  # np.arange(10, 61, 5)
        'waibu_train_path': r'D:\pythonProject\mingmi\dataset\外部数据_分类007.csv',
        'waibu_test_path': r'D:\pythonProject\mingmi\dataset\外部数据_分类008.csv'
    }

    # main2(parameters2)