import os
import pandas as pd
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
import tensorflow as tf
import time
from model import *
from draw_pic import *
from train_2 import train_2
from test_2 import test_2
from KDE import save_kernels, load_kernels
from tempo import *
import logging
import re
import math

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__ = False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__ = True

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []

    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'

    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]

    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]

    result.extend(integers)

    return result

def binary_df_to_dict(df):
    # 创建一个字典来保存不同标签的数据
    label_dict = {}
    kwn = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            label = row['label']

            # 将二进制字符串转换为整数列表
            byte_str = binary_strings_to_integers(data)

            # 使用字典来存储对应标签的数据
            if label not in label_dict:
                label_dict[label] = []
                kwn.append(label)
        else:
            raise "用户已取消当前操作"
        label_dict[label].append(byte_str)
    n_train = len(df)
    return label_dict, n_train, kwn

def main(parameters):

    mode = None
    kwn = None
    unkwn = None
    confusion_array=None
    path0 = parameters['path']
    # os.chdir(path0)

    from load_AIS_data import load_AIS_train, load_AIS_test, load_WAIBU_train, load_WAIBU_test
    from load_NET_data import load_NET_train, load_NET_test

    dir_path = os.path.join(path0, '过程文件')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    model_path = os.path.join(path0, '模型')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    # parameters = getParameters()
    AIS_if = parameters['AIS_if']
    NET_if = parameters['NET_if']
    load_if = parameters['load_if']
    train_if = parameters['train_if']

    test_if = parameters['test_if']

    display_steps = 50
    batch_size = 100
    validation_batch_size = batch_size
    test_batch = 100
    beta = 1
    alpha = 1
    pca_if = 1
    openmax_if = 0
    shift_ratio = 1
    KDE_if = 1

    # 对比实验
    kmeans_if = 0
    only_kmeans = 0
    only_KDE = 0

    return_if = True
    packet_train = {}
    packet_test = {}
    df_sorted_both = []
    valid_indices = []
    valid_col = []
    X_pca = []
    centers = []
    predict_labels = []
    classes = []
    img = None

    start_time = time.time()  # 获取当前时间

    if load_if == 1:
        waibu_train_path = parameters['waibu_train_path']
        waibu_test_path = parameters['waibu_test_path']
        if waibu_train_path == '' or waibu_train_path == None:
            if AIS_if == 1:
                mode = 'AIS'
                n_kwn_1 = parameters['n_kwn_1']
                n_kwn_3 = parameters['n_kwn_3']
                n_kwn_4 = parameters['n_kwn_4']
                n_kwn_5 = parameters['n_kwn_5']
                n_kwn_18 = parameters['n_kwn_18']
                n_kwn_19 = parameters['n_kwn_19']
                n_kwn_24A = parameters['n_kwn_24A']
                n_kwn_24B = parameters['n_kwn_24B']
                n_train = sum([n_kwn_1, n_kwn_3, n_kwn_4, n_kwn_5, n_kwn_18, n_kwn_19, n_kwn_24A, n_kwn_24B])
                kwn = []
                # 提取并检查键值对
                for key, value in parameters.items():
                    # 检查键名是否以 'n_kwn_' 开头
                    if key.startswith('n_kwn_'):
                        # 提取数字部分
                        try:
                            number = str(key.split('_')[2])
                            # 检查值是否不等于0
                            if value != 0:
                               # kwn.append(str(number))
                               kwn.append(f'AIS{str(number)}')
                        except ValueError:
                            # 如果无法转换为整数，则跳过此键
                            continue
                packet_train = load_AIS_train(n_kwn_1, n_kwn_3, n_kwn_4, n_kwn_5, n_kwn_18, n_kwn_19, n_kwn_24A, n_kwn_24B,
                                              kwn)
                print('训练数据加载完毕\n', '已知类：', kwn, '训练集大小：', n_train)
                logging.info(f'训练数据加载完毕')
                logging.info(f'训练集大小：{n_train}  已知类：{kwn}')

            elif NET_if == 1:
                mode = 'NET'
                kwn_n_FTP = parameters['kwn_n_FTP']
                kwn_n_NBNS = parameters['kwn_n_NBNS']
                kwn_n_NTP = parameters['kwn_n_NTP']
                kwn_n_SMTP = parameters['kwn_n_SMTP']
                kwn_n_ICMP = parameters['kwn_n_ICMP']
                kwn_n_DNS = parameters['kwn_n_DNS']
                kwn_n_ARP = parameters['kwn_n_ARP']
                kwn_n_Modbus = parameters['kwn_n_Modbus']
                kwn_n_DNP3 = parameters['kwn_n_DNP3']
                n_train = sum(
                    [kwn_n_FTP, kwn_n_NBNS, kwn_n_NTP, kwn_n_SMTP, kwn_n_ICMP, kwn_n_DNS, kwn_n_ARP, kwn_n_Modbus,
                     kwn_n_DNP3])

                kwn = []
                # 提取并检查键值对
                for key, value in parameters.items():
                    # 检查键名是否以 'n_kwn_' 开头
                    if key.startswith('kwn_n_'):
                        # 提取数字部分
                        try:
                            number = str(key.split('_')[2])
                            # 检查值是否不等于0
                            if value != 0:
                                kwn.append(str(number))
                        except ValueError:
                            # 如果无法转换为整数，则跳过此键
                            continue
                packet_train = load_NET_train(kwn_n_FTP, kwn_n_NBNS, kwn_n_NTP, kwn_n_SMTP, kwn_n_ICMP, kwn_n_DNS, kwn_n_ARP, kwn_n_Modbus,
                     kwn_n_DNP3, kwn)
                print('训练数据加载完毕\n', '已知类：', kwn, '训练集大小：', n_train)
                logging.info(f'训练数据加载完毕')
                logging.info(f'训练集大小：{n_train}  已知类：{kwn}')
            else:
                print('未选择训练数据！')
                logging.info('未选择训练数据!')
                return_if = False

        else:
            mode = 'WAIBU'
            train_data = pd.read_csv(waibu_train_path)
            label_dict_train, n_train, kwn = binary_df_to_dict(train_data)
            packet_train = load_WAIBU_train(train_data, kwn)
            print('训练数据加载完毕\n', '已知类：', kwn, '训练集大小：', n_train)
            logging.info(f'训练数据加载完毕')
            logging.info(f'训练集大小：{n_train}  已知类：{kwn}')

        if waibu_test_path == '' or waibu_test_path == None:
            if AIS_if == 1:
                n_unkwn_1 = parameters['n_unkwn_1']
                n_unkwn_3 = parameters['n_unkwn_3']
                n_unkwn_4 = parameters['n_unkwn_4']
                n_unkwn_5 = parameters['n_unkwn_5']
                n_unkwn_18 = parameters['n_unkwn_18']
                n_unkwn_19 = parameters['n_unkwn_19']
                n_unkwn_24A = parameters['n_unkwn_24A']
                n_unkwn_24B = parameters['n_unkwn_24B']
                test = []
                # 提取并检查键值对
                for key, value in parameters.items():
                    # 检查键名是否以 'n_kwn_' 开头
                    if key.startswith('n_unkwn_'):
                        # 提取数字部分
                        try:
                            number = str(key.split('_')[2])
                            # 检查值是否不等于0
                            if value != 0:
                                test.append(f'AIS{str(number)}')
                               # test.append(str(number))
                        except ValueError:
                            # 如果无法转换为整数，则跳过此键
                            continue
                unkwn = [key for key in test if key not in kwn]
                n_test = sum(
                    [n_unkwn_1, n_unkwn_3, n_unkwn_4, n_unkwn_5, n_unkwn_18, n_unkwn_19, n_unkwn_24A, n_unkwn_24B])
                # 初始化总和
                sum_values = 0
                # 遍历需要求和的键名后缀
                for suffix in unkwn:
                    # 构造完整的键名
                    key = f'n_unkwn_{suffix[3:]}'
                    # 检查键是否存在
                    if key in parameters:
                        # 加上该键对应的值
                        sum_values += parameters[key]
                openrisk = sum_values / n_test
                openness = 1 - math.sqrt(len(kwn) / (len(unkwn) + len(kwn)))

                # packet_train, packet_test = load_AIS_data(n_train, n_test, kwn, unkwn, openrisk)
                # packet_train, packet_test = load_AIS_data_1(n_kwn_1, n_kwn_3, n_kwn_4, n_kwn_5, n_kwn_18, n_kwn_19,
                #                                             n_kwn_24A, n_kwn_24B,
                #                                             n_unkwn_1, n_unkwn_3, n_unkwn_4, n_unkwn_5, n_unkwn_18,
                #                                             n_unkwn_19, n_unkwn_24A, n_unkwn_24B,
                #                                             kwn, unkwn)
                packet_test = load_AIS_test(n_unkwn_1, n_unkwn_3, n_unkwn_4, n_unkwn_5, n_unkwn_18, n_unkwn_19, n_unkwn_24A, n_unkwn_24B,
                                            kwn, unkwn)

                print('测试数据加载完毕\n', '未知类：', unkwn, '测试集大小：', n_test, '未知类测试集占比：', openrisk, '开放度：', openness)
                logging.info(f'测试数据加载完毕')
                logging.info(f'测试集大小：{n_test}   未知类：{unkwn}')
                logging.info(f'未知类测试集占比：{openrisk}     开放度：{openness}')
            elif NET_if == 1:
                unkwn_n_FTP = parameters['unkwn_n_FTP']
                unkwn_n_NBNS = parameters['unkwn_n_NBNS']
                unkwn_n_NTP = parameters['unkwn_n_NTP']
                unkwn_n_SMTP = parameters['unkwn_n_SMTP']
                unkwn_n_ICMP = parameters['unkwn_n_ICMP']
                unkwn_n_DNS = parameters['unkwn_n_DNS']
                unkwn_n_ARP = parameters['unkwn_n_ARP']
                unkwn_n_Modbus = parameters['unkwn_n_Modbus']
                unkwn_n_DNP3 = parameters['unkwn_n_DNP3']
                test = []
                # 提取并检查键值对
                for key, value in parameters.items():
                    # 检查键名是否以 'n_kwn_' 开头
                    if key.startswith('unkwn_n_'):
                        # 提取数字部分
                        try:
                            number = str(key.split('_')[2])
                            # 检查值是否不等于0
                            if value != 0:
                                test.append(str(number))
                        except ValueError:
                            # 如果无法转换为整数，则跳过此键
                            continue
                unkwn = [key for key in test if key not in kwn]
                n_test = sum(
                    [unkwn_n_FTP, unkwn_n_NBNS, unkwn_n_NTP, unkwn_n_SMTP, unkwn_n_ICMP, unkwn_n_DNS, unkwn_n_ARP,
                     unkwn_n_Modbus, unkwn_n_DNP3])
                # 初始化总和
                sum_values = 0
                # 遍历需要求和的键名后缀
                for suffix in unkwn:
                    # 构造完整的键名
                    key = f'unkwn_n_{suffix}'
                    # 检查键是否存在
                    if key in parameters:
                        # 加上该键对应的值
                        sum_values += parameters[key]

                openrisk = sum_values / n_test
                openness = 1 - math.sqrt(len(kwn) / (len(unkwn) + len(kwn)))

                # packet_train, packet_test = load_AIS_data(n_train, n_test, kwn, unkwn, openrisk)
                # packet_train, packet_test = load_NET_data_1(kwn_n_FTP, kwn_n_NBNS, kwn_n_NTP, kwn_n_SMTP, kwn_n_ICMP,
                #                                             kwn_n_DNS, kwn_n_ARP, kwn_n_Modbus, kwn_n_DNP3,
                #                                             unkwn_n_FTP, unkwn_n_NBNS, unkwn_n_NTP, unkwn_n_SMTP,
                #                                             unkwn_n_ICMP,
                #                                             unkwn_n_DNS, unkwn_n_ARP, unkwn_n_Modbus, unkwn_n_DNP3,
                #                                             kwn, unkwn)
                packet_test = load_NET_test(unkwn_n_FTP, unkwn_n_NBNS, unkwn_n_NTP, unkwn_n_SMTP, unkwn_n_ICMP, unkwn_n_DNS, unkwn_n_ARP,
                     unkwn_n_Modbus, unkwn_n_DNP3, kwn, unkwn)

                print('测试数据加载完毕\n', '未知类：', unkwn, '测试集大小：', n_test, '未知类测试集占比：', openrisk, '开放度：', openness)
                logging.info(f'测试数据加载完毕')
                logging.info(f'测试集大小：{n_test}   未知类：{unkwn}')
                logging.info(f'未知类测试集占比：{openrisk}     开放度：{openness}')
            else:
                print('未选择测试数据！')
                logging.info('未选择测试数据!')
                return_if = False
        else:
            test_data = pd.read_csv(waibu_test_path)
            label_dict_test, n_test, test = binary_df_to_dict(test_data)
            unkwn = [key for key in test if key not in kwn]
            # 计算这些未知键对应的值的总长度
            sum_values = np.sum([len(label_dict_test[key]) for key in unkwn])
            openrisk = sum_values / n_test
            openness = 1 - math.sqrt(len(kwn) / (len(unkwn) + len(kwn)))
            packet_test = load_WAIBU_test(test_data, kwn, unkwn)
            print('测试数据加载完毕\n', '未知类：', unkwn, '测试集大小：', n_test, '未知类测试集占比：', openrisk,
                  '开放度：', openness)
            logging.info(f'测试数据加载完毕')
            logging.info(f'测试集大小：{n_test}   未知类：{unkwn}')
            logging.info(f'未知类测试集占比：{openrisk}     开放度：{openness}')



        dataset = {
            'packet_train': packet_train,
            'packet_test': packet_test,
            'mode': mode,
            'kwn': kwn,
            'unkwn': unkwn
        }
        with open(os.path.join(dir_path, 'load_data.pkl'), 'wb') as f:
            pickle.dump(dataset, f)

    if (load_if == 0 or load_if == 1) and train_if == 1:
        epochs = parameters['epochs']
        initial_learning_rate = parameters['initial_learning_rate']
        validation_split = parameters['validation_split']
        model_name = parameters['model_name']

        feature_size = 16
        reconstruction_weight = 0.5
        classification_weight = 0.5
        size = 16
        decay_steps = 10000
        decay_rate = 0.96
        staircase = True
        first_iteration = True
        learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=initial_learning_rate,
            decay_steps=decay_steps,
            decay_rate=decay_rate,
            staircase=staircase
        )

        if load_if == 0:
            with open(os.path.join(dir_path, 'load_data.pkl'), 'rb') as f:
                dataset = pickle.load(f)
            packet_train = dataset['packet_train']
            packet_test = dataset['packet_test']
            mode = dataset['mode']
            kwn = dataset['kwn']
            unkwn = dataset['unkwn']

        x_train = packet_train['img']
        y_train = packet_train['type']

        # 定义输入张量
        input_shape = (size, size, 1)  # 假设是任意大小的灰度图像
        input_tensor_1 = Input(shape=input_shape, name='input_image')
        extra_input = Input(shape=(1,), name='extra_feature')
        # prototypes = class_pic_train

        # 构建编码器，并直接使用返回的张量作为特征
        encoder_output = build_encoder(input_tensor_1, feature_size)
        features = encoder_output  # 如果需要明确地给张量命名，可以这么做，但这里直接使用即可
        # distance_to_prototypes = PrototypeDistanceLayer(prototypes, name='prototype_distance')(input_tensor_1)

        # # 分割编码器输出和额外输入用于分类器（如果需要调整，请根据实际情况修改）
        # # 确保额外输入与编码器输出在拼接前形状兼容（可能需要调整额外输入的维度）
        # concat_for_classifier = Concatenate(axis=-1)([encoder_output, extra_input])
        classifier_output = build_classifier(encoder_output, len(np.unique(y_train)))

        # 构建解码器，直接使用encoder的输出
        decoder_output = build_decoder(encoder_output)

        # 重建图像和分类输出
        features = encoder_output
        reconstructed_image = decoder_output
        classification_output = classifier_output

        # 定义最终模型，包含输入图像和额外输入，输出为重建图像和分类结果
        # model = Model(inputs=[input_tensor_1, extra_input], outputs=[reconstructed_image, classification_output])
        model = Model(inputs=input_tensor_1, outputs=[reconstructed_image, classification_output])
        feature_extraction_model = Model(inputs=input_tensor_1, outputs=features)

        # print(model.summary())

        # 找到解码器部分的最后两层，这里仅作示例，请根据实际模型结构调整
        decoder_output = model.get_layer('decoder_output_layer').output

        # 同样找到分类器部分的输出
        classifier_output = model.get_layer('classifier_output_layer').output

        input_layer = model.input
        encoder_output_layer = model.get_layer('encoder_output_layer').output  # 替换为实际的层名
        intermediate_model = Model(inputs=input_layer, outputs=encoder_output_layer)

        # 多输出模型的损失和优化器配置
        model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
                      loss={'decoder_output_layer': 'mse',
                            'classifier_output_layer': 'sparse_categorical_crossentropy'},
                      loss_weights={'decoder_output_layer': reconstruction_weight,
                                    'classifier_output_layer': classification_weight},
                      metrics={'classifier_output_layer': ['accuracy']})

        if model_name == '':
            model_name = f'{mode}_model_{kwn}.h5'

        if KDE_if == 1:
            reconstruct_threshold, class_cdfs, centers, pca, kernels, X_pca, classes = train_2(model, intermediate_model, pca_if,
                                                                               x_train, y_train, validation_split,
                                                                               batch_size, validation_batch_size,
                                                                               epochs, display_steps, kwn,
                                                                               extra_input_train=None)
            save_kernels(kernels, os.path.join(dir_path, f'{mode}_kernels_{kwn}.pkl'))

        if openmax_if == 1 or KDE_if == 1:
            np.save(os.path.join(dir_path, f'{mode}_centers_{kwn}.npy'), centers)
            with open(os.path.join(dir_path, f'{mode}_pca_model_{kwn}.pkl'), 'wb') as f:
                pickle.dump(pca, f)
            model.save(os.path.join(model_path, model_name))
            np.save(os.path.join(dir_path, f'{mode}_threshold_{kwn}.npy'), reconstruct_threshold)

        if test_if != 1:
            # plt.show()
            plt.close('all')

    if load_if == -1 and train_if == 1:
        print('未加载数据！')
        logging.info('未加载数据！')
        return_if = False

    if (load_if == 0 or load_if == 1) and test_if == 1:
        load_model_name = parameters['model_name']
        if load_if == 0:
            with open(os.path.join(dir_path, 'load_data.pkl'), 'rb') as f:
                dataset = pickle.load(f)
            # packet_train = dataset['packet_train']
            packet_test = dataset['packet_test']
            mode = dataset['mode']
            kwn = dataset['kwn']
            unkwn = dataset['unkwn']

        if load_model_name == '':
            load_model_name = f'{mode}_model_{kwn}.h5'
        else:
            pattern = r'(?P<mode>\w+)_model_(?P<kwn>\w+)\.h5'
            match = re.match(pattern, load_model_name)

            if match:
                mode = match.group('mode')
                kwn = match.group('kwn')

        with open(os.path.join(dir_path, f'{mode}_pca_model_{kwn}.pkl'), 'rb') as f:
            loaded_pca = pickle.load(f)
        reconstruct_threshold = np.load(os.path.join(dir_path, f'{mode}_threshold_{kwn}.npy'))

        if KDE_if == 1 or only_KDE == 1:
            centers = np.load(os.path.join(dir_path, f'{mode}_centers_{kwn}.npy'))
            # 加载KDE模型
            loaded_kernels = load_kernels(os.path.join(dir_path, f'{mode}_kernels_{kwn}.pkl'))

        x_test = packet_test['img']
        y_test = packet_test['type']

        model = tf.keras.models.load_model(os.path.join(model_path, load_model_name))
        # print(model.summary())

        # 获取模型的输入层
        input_layer = model.input
        # 获取你感兴趣的中间层
        encoder_output_layer = model.get_layer('encoder_output_layer').output  # 替换为实际的层名
        # 创建一个新的模型，其输入为原始模型的输入，输出为中间层的输出
        intermediate_model = Model(inputs=input_layer, outputs=encoder_output_layer)

        # if openmax_if == 1:
        #     predict_labels, raw_label, loss = test(model, intermediate_model, pca_if, x_test, y_test, kwn, unkwn,
        #                                            reconstruct_threshold, test_batch, loaded_pca, class_cdfs,
        #                                            weibull_qfs, centers, alpha, beta, dis_shifted, shift_ratio)
        if KDE_if == 1:
            predict_labels, raw_label, loss, accuracy, accuracy_kwn, accuracy_unkwn, f1_kwn, f1_unkwn, df_sorted_both, valid_col, valid_indices, X_pca, img = test_2(model,
                                                    intermediate_model, pca_if, x_test, y_test, kwn, unkwn,
                                                     reconstruct_threshold, test_batch, loaded_pca, loaded_kernels,
                                                     centers, alpha, beta)
            confusion_array = np.array(df_sorted_both)
            classes = predict_labels
        print(f'整体准确度：{accuracy}')
        print(f'已知类准确度：{accuracy_kwn}   已知类F-score：{f1_kwn}')
        print(f'未知类准确度：{accuracy_unkwn}   未知类F-score：{f1_unkwn}')
        logging.info(f'整体准确度：{accuracy}')
        logging.info(f'已知类准确度：{accuracy_kwn}   已知类F-score：{f1_kwn}')
        logging.info(f'未知类准确度：{accuracy_unkwn}   未知类F-score：{f1_unkwn}')

        # plt.show()


    if load_if == -1 and test_if == 1:
        print('未加载数据！')
        logging.info('未加载数据！')
        return_if = False

    end_time = time.time()  # 再次获取当前时间
    execution_time = end_time - start_time  # 计算时间差
    print(f"Execution time: {execution_time:.6f} seconds")
    plt.close('all')

    result_dict = {
                    'return_if': return_if,     # bool，是否加载成功，成功返回True按键返回0，失败返回False按键返回-1

                    # 图1，结果混淆矩阵，文件夹<混淆矩阵示意图.png>，测试画
                    # 'confusion_matrix': confusion_array,     # ndarry，混淆矩阵
                    # 'xlabel': valid_col,    # list[str]，x轴标签，从左到右
                    # 'ylabel': valid_indices,    # list[str]，y轴标签，从上到下
                    'confusion_img': img,

                    # 图2，散点图数据分布，训练和测试画
                    'X_pca': X_pca,  # ndarry(N, 2)float32，按照classes类别来区分颜色画二维散点图并画legend
                    'centers': centers,  # list[ndarray(2,)]，在X_pca上面加一个另外颜色的几个点
                    'classes': classes   # list[str]，散点的类别，一个类别一种颜色
                    }
    return result_dict

# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    parameters1 = {

        # 只有一个功能，分为三个步骤，加载数据、训练模型和测试
        'path': r'D:\temp\xinda\TPOSR_port',    # 代码所在目录
        # 数据类型选择界面，两个只能二选一
        'NET_if': 0,  # int，1为选择该数据集NET
        'AIS_if': 1,  # int，1为选择该数据集AIS
        # AIS
        # 文本框加上下箭头条数选取，100条100条往上加，选AIS文件夹后AIS具体消息类型条数选取
            # 训练部分数据
        'n_kwn_1': 2000,    # 150460
        'n_kwn_3': 2000,    # 2983
        'n_kwn_4': 2000,   # 6637
        'n_kwn_5': 2000,    # 177447
        'n_kwn_18': 2000,  # 21476
        'n_kwn_19': 0,   # 9055
        'n_kwn_24A': 0,     # 6177
        'n_kwn_24B': 582,     # 582
            # 测试部分数据，条数同上
        'n_unkwn_1': 500,
        'n_unkwn_3': 500,
        'n_unkwn_4': 500,
        'n_unkwn_5': 500,
        'n_unkwn_18': 500,
        'n_unkwn_19': 500,
        'n_unkwn_24A': 500,
        'n_unkwn_24B': 500,
        # NET
        # 文本框加上下箭头条数选取，100条100条往上加，选NET文件夹后NET具体消息类型条数选取
            # 训练部分数据
        'kwn_n_FTP': 0,  # 25067
        'kwn_n_NBNS': 2000,     # 21110
        'kwn_n_NTP': 0,     # 906
        'kwn_n_SMTP': 0,    # 462
        'kwn_n_ICMP': 2000,     # 62818
        'kwn_n_DNS': 2000,  # 16643
        'kwn_n_ARP': 2000,  # 5861
        'kwn_n_Modbus': 2000,   # 12852
        'kwn_n_DNP3': 0,    # 198
            # 测试部分数据，条数同上
        'unkwn_n_FTP': 500,
        'unkwn_n_NBNS': 500,
        'unkwn_n_NTP': 500,
        'unkwn_n_SMTP': 200,
        'unkwn_n_ICMP': 300,
        'unkwn_n_DNS': 500,
        'unkwn_n_ARP': 0,
        'unkwn_n_Modbus': 100,
        'unkwn_n_DNP3': 100,

        # 外部数据
        'waibu_train_path': r'',     # str，外部数据选择，可以为空，空时加载已经选择的AIS或者NET数据条数，如果AIS_if和NET_if==0这个也为空则未加载数据
        # D:\cyf\study\idea\TPOSR_port\dataset\外部数据_训练001.csv
        'waibu_test_path': r'',  #D:\cyf\study\idea\TPOSR_port\dataset\外部数据_测试001.csv

        # 按钮
        'load_if': 1,   # int，是否加载数据，相关参数以上全部，无返回值，输出日志

        'epochs': 30,  # int，训练轮次，(0, 100]，可以输入可以5轮五轮加
        'validation_split': 0.2,  # float，验证集占比，输入(0,1)
        'model_name': '',  # str，训练时可以为空，也可以输入，输入时要以.h5结束
        'initial_learning_rate': 0.002,  # float，学习率，默认从0.001开始，可以0.001往上加
        # 按钮
        'train_if': 1,  # int，是否训练，训练是相关参数有上面四个，输出日志，返回return_if、X_pca和classes，画一个图
        'test_if': 1   # int，是否测试，测试时相关参数有model_name（和上面训练共用），这里为可以为空或者选择文件，输出日志，返回所有参数，画两个图

    }
    # setParameters(parameters1)

    result_dict = main(parameters1)