import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
from tensorflow.keras.utils import to_categorical
from sklearn.utils import shuffle
import os

current_path = os.path.dirname(os.path.abspath(__file__))
original_path = os.getcwd()
data_path = os.path.join(current_path, 'dataset', 'NET')
os.chdir(data_path)
data_FTP = pd.read_csv('FTP_merged_data.csv', dtype=str) # 25067
data_NBNS = pd.read_csv('NBNS_merged_data.csv', dtype=str) # 16170
data_NTP = pd.read_csv('NTP_merged_data.csv', dtype=str) # 906
# data_ais = pd.read_csv(r'D:\cyf\study\协议\xiaoxi1_bit.csv', dtype=str) # dataset name.
data_SMTP = pd.read_csv('SMTP_merged_data.csv', dtype=str) # 464
data_ICMP = pd.read_csv('icmp/icmp_8_payloads.csv', dtype=str) # 62818
data_DNS = pd.read_csv('dns/dns_query_payloads.csv', dtype=str) # 10148
data_ARP = pd.read_csv('ARP_merged_data.csv', dtype=str) # 5861
data_Modbus = pd.read_csv('Modbus_payloads.csv', dtype=str)  # dataset name.
data_DNP3 = pd.read_csv('DNP3_payloads.csv', dtype=str)  # dataset name.
os.chdir(original_path)

def balance_type(kwn, unkwn, n_train, n_test, openrisk=0):
    # 计算每个类别中的最小样本数
    min_samples_per_class = min([df.shape[0] for df in [data_NBNS, data_ICMP, data_DNS, data_Modbus, data_ARP]])

    # 确保n_train和n_test不超过所有类别中最小样本数的总和，否则需要调整n_train和n_test的值
    assert n_train <= len(kwn) * min_samples_per_class and n_test <= (len(kwn) + len(unkwn)) * min_samples_per_class, \
        "n_train or n_test is too large for equal sampling."

    # 从每个已知类别中抽取相同数量的样本到训练集
    train_samples = []
    for class_label in kwn:
        class_data = eval(f"data_{class_label}")
        class_data = shuffle(class_data, random_state=1)  # 打乱数据顺序
        train_samples.append(class_data.iloc[:min_samples_per_class])
    data_train = pd.concat(train_samples, axis=0, ignore_index=True)

    # 确保训练集的总样本数不超过n_train
    if data_train.shape[0] > n_train:
        data_train = data_train.sample(n=n_train, random_state=1, ignore_index=True)

    bin_train = data_train['Payload'].values
    label_train = data_train['Label'].values
    print('训练集：\n', data_train['Label'].value_counts())

    # 同样的方法应用于测试集，包括已知和未知类别
    test_samples = []
    for class_label in kwn + unkwn:
        class_data = eval(f"data_{class_label}")
        class_data = shuffle(class_data, random_state=42)  # 使用不同的随机种子以区分训练集和测试集
        test_samples.append(class_data.iloc[:min_samples_per_class])
    data_test = pd.concat(test_samples, axis=0, ignore_index=True)

    # 确保测试集的总样本数不超过n_test
    if data_test.shape[0] > n_test:
        data_test = data_test.sample(n=n_test, random_state=42, ignore_index=True)

    if openrisk != 0:
        n_kwn = int(np.floor((1-openrisk) * n_test))
        n_unkwn = n_test - n_kwn
        test_samples_kwn = []
        for class_label in kwn:
            class_data = eval(f"data_{class_label}")
            class_data = shuffle(class_data, random_state=24)  # 使用不同的随机种子以区分训练集和测试集
            test_samples_kwn.append(class_data.iloc[:min_samples_per_class])
        data_test_kwn = pd.concat(test_samples_kwn, axis=0, ignore_index=True)
        test_samples_unkwn = []
        for class_label in unkwn:
            class_data = eval(f"data_{class_label}")
            class_data = shuffle(class_data, random_state=42)  # 使用不同的随机种子以区分训练集和测试集
            test_samples_unkwn.append(class_data.iloc[:min_samples_per_class])
        data_test_unkwn = pd.concat(test_samples_unkwn, axis=0, ignore_index=True)
        data_test = pd.concat([data_test_kwn, data_test_unkwn], axis=0, ignore_index=True)

        # 确保测试集的总样本数不超过n_test
        if data_test.shape[0] > n_test:
            data_test_kwn = data_test_kwn.sample(n=n_kwn, random_state=16)
            data_test_unkwn = data_test_unkwn.sample(n=n_unkwn, random_state=18)
            data_test = pd.concat([data_test_kwn, data_test_unkwn], axis=0, ignore_index=True)

        print('openrisk:', openrisk)

    else:
        # 同样的方法应用于测试集，包括已知和未知类别
        test_samples = []
        for class_label in kwn + unkwn:
            class_data = eval(f"data_{class_label}")
            class_data = shuffle(class_data, random_state=42)  # 使用不同的随机种子以区分训练集和测试集
            test_samples.append(class_data.iloc[:min_samples_per_class])
        data_test = pd.concat(test_samples, axis=0, ignore_index=True)

        # 确保测试集的总样本数不超过n_test
        if data_test.shape[0] > n_test:
            data_test = data_test.sample(n=n_test, random_state=42)
        n_unkwn = np.sum([1 for label in data_test['label'] if label in unkwn])
        openrisk = n_unkwn / n_test

        print('openrisk:', openrisk)

    bin_test = data_test['Payload'].values
    label_test = data_test['Label'].values

    print('测试集：\n', data_test['Label'].value_counts())
    return data_train, data_test, bin_train, bin_test

def to_binary_string(x):
    if np.isnan(x):
        return np.nan
    return format(int(x), 'b')

def combine_columns_optimized(df, n):
    # 记录原始数据中是否存在空值
    original_has_na = df.isna().values.any()

    # 临时替换空值为特殊字符（这里使用 '_'），以确保它们在组合时不被忽略
    df.fillna('_', inplace=True)

    # 计算能整除n的列数
    num_combined_columns = df.shape[1] // n * n

    # 初始化一个新的numpy数组来存储组合后的列
    combined_columns = np.empty((df.shape[0], num_combined_columns // n), dtype=object)

    for i in range(0, num_combined_columns, n):
        # 使用numpy数组操作加速拼接过程
        combined_column = np.apply_along_axis(lambda x: ''.join(x.astype(str)), axis=1, arr=df.iloc[:, i:i + n].values)

        # 直接将组合后的列放入新的numpy数组
        combined_columns[:, i // n] = combined_column

    # 将新的numpy数组转换为DataFrame，一次性添加所有新列
    new_column_names = [f'Column_{i}' for i in range(combined_columns.shape[1])]
    new_df = pd.DataFrame(combined_columns, columns=new_column_names, index=df.index)

    # 恢复原始空值标记
    if original_has_na:
        string = '_' * n
        new_df.replace(string, pd.NA, inplace=True)

    return new_df

def convert_to_decimal(x):
    if pd.isna(x):
        return np.nan  # 或者返回您希望的其他值，如0
    return int(x, base=2)

def hex_string_to_binary(hex_string):
    hex_to_dec = {
        '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5,
        '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11,
        'c': 12, 'd': 13, 'e': 14, 'f': 15, 'A': 10, 'B': 11,
        'C': 12, 'D': 13, 'E': 14, 'F': 15
    }
    binary_string = ""
    hex_string = hex_string[:64]
    for hex_char in hex_string:
        dec_val = hex_to_dec[hex_char]
        binary_string += format(dec_val, '04b')  # Append the 4-bit binary representation.
    return binary_string

def load_NET_data(n_train, n_test, kwn, unkwn, openrisk=0, num=0):

    data_train, data_test, bin_train, bin_test = balance_type(kwn, unkwn, n_train, n_test, openrisk)

    binary_train = []
    for hex_string in bin_train:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_train.append(hex_string_to_binary(hex_data))
    data_train = pd.concat([data_train, pd.DataFrame(binary_train, columns=['data'])], axis=1)
    binary_test = []
    for hex_string in bin_test:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_test.append(hex_string_to_binary(hex_data))
    data_test = pd.concat([data_test, pd.DataFrame(binary_test, columns=['data'])], axis=1)

    # num = 0  # 按2的num次方进行即为合并
    data_train['list'] = data_train['data'].apply(lambda x: [int(i) for i in x])
    bin_train = pd.DataFrame(data_train['list'].tolist()).applymap(to_binary_string)

    data_test['list'] = data_test['data'].apply(lambda x: [int(i) for i in x])
    bin_test = pd.DataFrame(data_test['list'].tolist()).applymap(to_binary_string)

    conbined_train = combine_columns_optimized(bin_train, 2 ** num)  # .replace(np.nan, 'NaN')
    conbined_test = combine_columns_optimized(bin_test, 2 ** num)  # .replace(np.nan, 'NaN')
    # 合并后转化为十进制
    bin_train = conbined_train.applymap(convert_to_decimal)
    bin_test = conbined_test.applymap(convert_to_decimal)

    packet_train = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }
    packet_train['bin'] = np.array(bin_train, dtype=np.int32)
    packet_train['length'] = bin_train.apply(lambda row: len(row.dropna()), axis=1)
    packet_train['label'] = data_train['Label']
    packet_test = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }
    packet_test['bin'] = np.array(bin_test, dtype=np.int32)
    packet_test['length'] = bin_test.apply(lambda row: len(row.dropna()), axis=1)
    packet_test['label'] = data_test['Label']
    bin_train = bin_train.fillna(int(0))
    bin_test = bin_test.fillna(int(0))
    packet_train['bin'] = np.array(bin_train, dtype=np.int32)
    packet_test['bin'] = np.array(bin_test, dtype=np.int32)

    # 重组成图像
    size = 16
    img_train = []
    img_test = []
    for g in range(len(packet_train['bin'])):
        #     img_train.append(cv2.resize(packet_train['bin'][g][:packet_train['length'][g]].astype('uint8'), (1, size * size)))
        img_train.append(cv2.resize(packet_train['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_train)):
        img_train[i] = img_train[i].reshape(size, size, 1)
    packet_train['img'] = img_train


    for g in range(len(packet_test['bin'])):
        #     img_test.append(cv2.resize(packet_test['bin'][g][:packet_test['length'][g]].astype('uint8'), (1, size * size)))
        img_test.append(cv2.resize(packet_test['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_test)):
        img_test[i] = img_test[i].reshape(size, size, 1)
    packet_test['img'] = img_test

    # 假设您想要按照字母顺序编码类别标签
    custom_mapping = {}
    for k, label in enumerate(kwn + unkwn):
        custom_mapping[str(label)] = k
    combined_label = pd.DataFrame(
        np.concatenate((packet_train['label'].values, packet_test['label'].values), axis=0)).astype(str)
    # 使用 replace() 函数将原始类别标签替换为自定义编码
    combined_labels_mapped = combined_label.replace(custom_mapping)
    combined_labels = pd.DataFrame(to_categorical(combined_labels_mapped))

    # 分离训练集和测试集的编码结果
    y_train_onehot = np.array(combined_labels.iloc[:len(packet_train['length']), :len(kwn)])
    y_test_onehot = np.array(combined_labels.iloc[len(packet_train['length']):, :])
    y_train = y_train_onehot.argmax(axis=-1)
    y_test = y_test_onehot.argmax(axis=-1)

    packet_train['type'] = y_train
    packet_test['type'] = y_test

    return packet_train, packet_test

def load_NET_data_1(kwn_n_FTP, kwn_n_NBNS, kwn_n_NTP, kwn_n_SMTP, kwn_n_ICMP, kwn_n_DNS, kwn_n_ARP, kwn_n_Modbus,
                    kwn_n_DNP3, unkwn_n_FTP, unkwn_n_NBNS, unkwn_n_NTP, unkwn_n_SMTP, unkwn_n_ICMP, unkwn_n_DNS,
                    unkwn_n_ARP, unkwn_n_Modbus, unkwn_n_DNP3,
                    kwn, unkwn, num=0):

    # data_train, data_test, bin_train, bin_test = balance_type(kwn, unkwn, n_train, n_test, openrisk)

    data_train = pd.concat([data_FTP.sample(kwn_n_FTP),
                            data_NBNS.sample(kwn_n_NBNS),
                            data_NTP.sample(kwn_n_NTP),
                            data_SMTP.sample(kwn_n_SMTP),
                            data_ICMP.sample(kwn_n_ICMP),
                            data_DNS.sample(kwn_n_DNS),
                            data_ARP.sample(kwn_n_ARP),
                            data_Modbus.sample(kwn_n_Modbus),
                            data_DNP3.sample(kwn_n_DNP3)
                            ], axis=0, ignore_index=True)

    data_test = pd.concat([data_FTP.sample(unkwn_n_FTP),
                            data_NBNS.sample(unkwn_n_NBNS),
                            data_NTP.sample(unkwn_n_NTP),
                            data_SMTP.sample(unkwn_n_SMTP),
                            data_ICMP.sample(unkwn_n_ICMP),
                            data_DNS.sample(unkwn_n_DNS),
                            data_ARP.sample(unkwn_n_ARP),
                            data_Modbus.sample(unkwn_n_Modbus),
                            data_DNP3.sample(unkwn_n_DNP3)
                            ], axis=0, ignore_index=True)
    bin_train = data_train['Payload'].values
    bin_test = data_test['Payload'].values

    binary_train = []
    for hex_string in bin_train:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_train.append(hex_string_to_binary(hex_data))
    data_train = pd.concat([data_train, pd.DataFrame(binary_train, columns=['data'])], axis=1)
    binary_test = []
    for hex_string in bin_test:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_test.append(hex_string_to_binary(hex_data))
    data_test = pd.concat([data_test, pd.DataFrame(binary_test, columns=['data'])], axis=1)

    # num = 0  # 按2的num次方进行即为合并
    data_train['list'] = data_train['data'].apply(lambda x: [int(i) for i in x])
    bin_train = pd.DataFrame(data_train['list'].tolist()).applymap(to_binary_string)

    data_test['list'] = data_test['data'].apply(lambda x: [int(i) for i in x])
    bin_test = pd.DataFrame(data_test['list'].tolist()).applymap(to_binary_string)

    conbined_train = combine_columns_optimized(bin_train, 2 ** num)  # .replace(np.nan, 'NaN')
    conbined_test = combine_columns_optimized(bin_test, 2 ** num)  # .replace(np.nan, 'NaN')
    # 合并后转化为十进制
    bin_train = conbined_train.applymap(convert_to_decimal)
    bin_test = conbined_test.applymap(convert_to_decimal)

    packet_train = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }
    packet_train['bin'] = np.array(bin_train, dtype=np.int32)
    packet_train['length'] = bin_train.apply(lambda row: len(row.dropna()), axis=1)
    packet_train['label'] = data_train['Label']
    packet_test = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }
    packet_test['bin'] = np.array(bin_test, dtype=np.int32)
    packet_test['length'] = bin_test.apply(lambda row: len(row.dropna()), axis=1)
    packet_test['label'] = data_test['Label']
    bin_train = bin_train.fillna(int(0))
    bin_test = bin_test.fillna(int(0))
    packet_train['bin'] = np.array(bin_train, dtype=np.int32)
    packet_test['bin'] = np.array(bin_test, dtype=np.int32)

    # 重组成图像
    size = 16
    img_train = []
    img_test = []
    for g in range(len(packet_train['bin'])):
        #     img_train.append(cv2.resize(packet_train['bin'][g][:packet_train['length'][g]].astype('uint8'), (1, size * size)))
        img_train.append(cv2.resize(packet_train['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_train)):
        img_train[i] = img_train[i].reshape(size, size, 1)
    packet_train['img'] = img_train


    for g in range(len(packet_test['bin'])):
        #     img_test.append(cv2.resize(packet_test['bin'][g][:packet_test['length'][g]].astype('uint8'), (1, size * size)))
        img_test.append(cv2.resize(packet_test['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_test)):
        img_test[i] = img_test[i].reshape(size, size, 1)
    packet_test['img'] = img_test

    # 假设您想要按照字母顺序编码类别标签
    custom_mapping = {}
    for k, label in enumerate(kwn + unkwn):
        custom_mapping[str(label)] = k
    combined_label = pd.DataFrame(
        np.concatenate((packet_train['label'].values, packet_test['label'].values), axis=0)).astype(str)
    # 使用 replace() 函数将原始类别标签替换为自定义编码
    combined_labels_mapped = combined_label.replace(custom_mapping)
    combined_labels = pd.DataFrame(to_categorical(combined_labels_mapped))

    # 分离训练集和测试集的编码结果
    y_train_onehot = np.array(combined_labels.iloc[:len(packet_train['length']), :len(kwn)])
    y_test_onehot = np.array(combined_labels.iloc[len(packet_train['length']):, :])
    y_train = y_train_onehot.argmax(axis=-1)
    y_test = y_test_onehot.argmax(axis=-1)

    packet_train['type'] = y_train
    packet_test['type'] = y_test

    return packet_train, packet_test

def load_NET_train(kwn_n_FTP, kwn_n_NBNS, kwn_n_NTP, kwn_n_SMTP, kwn_n_ICMP, kwn_n_DNS, kwn_n_ARP, kwn_n_Modbus,
                    kwn_n_DNP3,
                    kwn, num=0):

    data_train = pd.concat([data_FTP.sample(kwn_n_FTP),
                            data_NBNS.sample(kwn_n_NBNS),
                            data_NTP.sample(kwn_n_NTP),
                            data_SMTP.sample(kwn_n_SMTP),
                            data_ICMP.sample(kwn_n_ICMP),
                            data_DNS.sample(kwn_n_DNS),
                            data_ARP.sample(kwn_n_ARP),
                            data_Modbus.sample(kwn_n_Modbus),
                            data_DNP3.sample(kwn_n_DNP3)
                            ], axis=0, ignore_index=True)

    bin_train = data_train['Payload'].values
    binary_train = []
    for hex_string in bin_train:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_train.append(hex_string_to_binary(hex_data))
    data_train = pd.concat([data_train, pd.DataFrame(binary_train, columns=['data'])], axis=1)
    binary_test = []

    # num = 0  # 按2的num次方进行即为合并
    data_train['list'] = data_train['data'].apply(lambda x: [int(i) for i in x])
    bin_train = pd.DataFrame(data_train['list'].tolist()).applymap(to_binary_string)

    conbined_train = combine_columns_optimized(bin_train, 2 ** num)  # .replace(np.nan, 'NaN')
    # 合并后转化为十进制
    bin_train = conbined_train.applymap(convert_to_decimal)
    packet_train = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }
    packet_train['length'] = bin_train.apply(lambda row: len(row.dropna()), axis=1)
    packet_train['label'] = data_train['Label']

    bin_train = bin_train.fillna(int(0))
    packet_train['bin'] = np.array(bin_train, dtype=np.int32)

    # 重组成图像
    size = 16
    img_train = []
    img_test = []
    for g in range(len(packet_train['bin'])):
        #     img_train.append(cv2.resize(packet_train['bin'][g][:packet_train['length'][g]].astype('uint8'), (1, size * size)))
        img_train.append(cv2.resize(packet_train['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_train)):
        img_train[i] = img_train[i].reshape(size, size, 1)
    packet_train['img'] = img_train


    # 假设您想要按照字母顺序编码类别标签
    custom_mapping = {}
    for k, label in enumerate(kwn):
        custom_mapping[str(label)] = k
    train_label = pd.DataFrame(packet_train['label'].values).astype(str)
    # 使用 replace() 函数将原始类别标签替换为自定义编码
    train_labels_mapped = train_label.replace(custom_mapping)
    train_labels = pd.DataFrame(to_categorical(train_labels_mapped))

    # 分离训练集和测试集的编码结果
    y_train_onehot = np.array(train_labels)
    y_train = y_train_onehot.argmax(axis=-1)

    packet_train['type'] = y_train

    return packet_train

def load_NET_test(unkwn_n_FTP, unkwn_n_NBNS, unkwn_n_NTP, unkwn_n_SMTP, unkwn_n_ICMP, unkwn_n_DNS,
                    unkwn_n_ARP, unkwn_n_Modbus, unkwn_n_DNP3,
                    kwn, unkwn, num=0):

    data_test = pd.concat([data_FTP.sample(unkwn_n_FTP),
                            data_NBNS.sample(unkwn_n_NBNS),
                            data_NTP.sample(unkwn_n_NTP),
                            data_SMTP.sample(unkwn_n_SMTP),
                            data_ICMP.sample(unkwn_n_ICMP),
                            data_DNS.sample(unkwn_n_DNS),
                            data_ARP.sample(unkwn_n_ARP),
                            data_Modbus.sample(unkwn_n_Modbus),
                            data_DNP3.sample(unkwn_n_DNP3)
                            ], axis=0, ignore_index=True)
    bin_test = data_test['Payload'].values

    binary_test = []
    for hex_string in bin_test:
        # 对于每个16进制字符串，去除可能的前导或尾随空格，然后转换为二进制
        hex_data = hex_string.replace(" ", "")
        binary_test.append(hex_string_to_binary(hex_data))
    data_test = pd.concat([data_test, pd.DataFrame(binary_test, columns=['data'])], axis=1)

    # num = 0  # 按2的num次方进行即为合并
    data_test['list'] = data_test['data'].apply(lambda x: [int(i) for i in x])
    bin_test = pd.DataFrame(data_test['list'].tolist()).applymap(to_binary_string)

    conbined_test = combine_columns_optimized(bin_test, 2 ** num)  # .replace(np.nan, 'NaN')
    # 合并后转化为十进制
    bin_test = conbined_test.applymap(convert_to_decimal)

    packet_test = {
        'bin': [],
        'extractedfeature': [],
        'length': [],
        'label': [],
        'type': [],
        'img': [],
        'img_3': []
    }

    packet_test['length'] = bin_test.apply(lambda row: len(row.dropna()), axis=1)
    packet_test['label'] = data_test['Label']
    bin_test = bin_test.fillna(int(0))
    packet_test['bin'] = np.array(bin_test, dtype=np.int32)

    # 重组成图像
    size = 16
    img_test = []

    for g in range(len(packet_test['bin'])):
        #     img_test.append(cv2.resize(packet_test['bin'][g][:packet_test['length'][g]].astype('uint8'), (1, size * size)))
        img_test.append(cv2.resize(packet_test['bin'][g][:size * size].astype('uint8'), (1, size * size)))
    for i in range(len(img_test)):
        img_test[i] = img_test[i].reshape(size, size, 1)
    packet_test['img'] = img_test

    # 假设您想要按照字母顺序编码类别标签
    custom_mapping = {}
    for k, label in enumerate(kwn + unkwn):
        custom_mapping[str(label)] = k
    test_label = pd.DataFrame(packet_test['label'].values).astype(str)
    # 使用 replace() 函数将原始类别标签替换为自定义编码
    test_labels_mapped = test_label.replace(custom_mapping)
    test_labels = pd.DataFrame(to_categorical(test_labels_mapped))

    # 分离训练集和测试集的编码结果
    y_test_onehot = np.array(test_labels)
    y_test = y_test_onehot.argmax(axis=-1)

    packet_test['type'] = y_test

    return packet_test

