#20240822 将之前训练和测试合并在一个代码中，使用maccdc2012数据
#参数这样时性能良好

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from sklearn.cluster import KMeans,DBSCAN
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
import collections
import os
from collections import Counter
from sklearn.metrics.cluster import v_measure_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import calinski_harabasz_score
from sklearn.preprocessing import MinMaxScaler
import mplcursors
from sklearn.metrics import adjusted_rand_score
from collections import defaultdict
import math
import random
import io  
import base64

import logging
import time
from datetime import datetime  

# 配置日志级别和基本设置  
logging.basicConfig(level=logging.DEBUG,  
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',  
                    datefmt='%Y-%m-%d %H:%M:%S',  
                    filename='app.log',  
                    filemode='a')
'''
# 使用不同的日志级别  
logging.debug('这是一个debug级别的日志信息')  
logging.info('这是一个info级别的日志信息')  
logging.warning('这是一个warning级别的日志信息')  
logging.error('这是一个error级别的日志信息')  
logging.critical('这是一个critical级别的日志信息')
'''


# 定义自编码器模型
class SimpleAutoencoder(nn.Module):
    def __init__(self, input_dim, z_dim, encoding_dim,  dropout_prob=0.5):
        super(SimpleAutoencoder, self).__init__()

        # 编码器部分
        self.encoder = nn.Sequential(
            nn.Linear(input_dim, z_dim),  # 第一层全连接，将输入降至编码维度
            nn.ReLU(True), # 使用ReLU激活函数
            nn.Linear(z_dim, encoding_dim),  # 第一层全连接，将输入降至编码维度
            nn.ReLU(True) # 使用ReLU激活函数
        )

        # 解码器部分
        self.decoder = nn.Sequential(
            nn.Linear(encoding_dim, z_dim ),  # 第二层全连接，从编码维度恢复到原始输入维度
            nn.ReLU(True),
            nn.Linear(z_dim, input_dim ),  # 第一层全连接，将输入降至编码维度
            nn.Sigmoid()  # 输出层使用Sigmoid，使得输出值位于0-1之间，适用于数据重构
        )

    def forward(self, x):
        x = self.encoder(x)
        y = self.decoder(x)
        return y,x


#定义数据加载器
class OneDimSequenceDataset(Dataset):
    def __init__(self, data_tensor, transform=None):
        self.data = data_tensor.type(torch.float32)  # 直接将数据转换为浮点类型张量

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sequence = self.data[idx]
        return sequence


#自定义损失函数
class SimplifiedWeightedMSELoss(nn.Module):
    def __init__(self, weights):
        super(SimplifiedWeightedMSELoss, self).__init__()
        self.weights = weights.unsqueeze(0)  # 将权重张量扩展为(1, features)形状，以便广播机制能正确应用权重
    def forward(self, y_pred, y_true):
        squared_diff = (y_true - y_pred) ** 2
        loss = torch.mean(self.weights * squared_diff, dim=1)
        return torch.mean(loss)

#自定义损失函数，将不考虑非频繁项的MSE，使得模型关注于频繁项
class MSELoss(nn.Module):
    def __init__(self):
        super(MSELoss, self).__init__()
    def forward(self, y_pred, y_true, weight):
        squared_diff = (y_true - y_pred) ** 2 *weight
        loss = torch.mean(squared_diff, dim=1)
        return torch.mean(loss)


def calculate_xie_beni_index(data, labels):
    """
    计算 Xie-Beni 指数。

    :param data: 数据集，形状为 (n_samples, n_features)
    :param labels: 聚类标签，形状为 (n_samples,)
    :return: Xie-Beni 指数
    """
    unique_labels = np.unique(labels)
    n_clusters = len(unique_labels)
    centroids = np.array([data[labels == label].mean(axis=0) for label in unique_labels])
    global_center = data.mean(axis=0)

    # 计算簇内平方误差之和
    cluster_inertia = 0
    for label in unique_labels:
        cluster_data = data[labels == label]
        cluster_center = centroids[label]
        cluster_inertia += np.sum(np.linalg.norm(cluster_data - cluster_center, axis=1) ** 2)

    # 计算簇间最小距离
    inter_cluster_distances = np.linalg.norm(centroids[:, np.newaxis] - centroids[np.newaxis, :], axis=-1)
    np.fill_diagonal(inter_cluster_distances, np.inf)  # 对角线置为无穷大，避免计算自身距离
    min_inter_cluster_distance = np.min(inter_cluster_distances)

    # 计算 Xie-Beni 指数
    xi = cluster_inertia / (n_clusters * min_inter_cluster_distance ** 2)

    return xi

#使用dbscan算法进行最佳聚类簇数选取，并进行聚类
def dbscan_cluster(data):
    # 数据预处理 - 标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(data)

    # 定义寻找最佳eps的范围和min_samples的候选值
    eps_range = np.arange(0.02, 0.2, 0.02)
    
    start = np.ceil(len(data) * 0.005).astype(int)
    end = np.ceil(len(data) * 0.02).astype(int)
    min_samples_range = np.arange(start,end , 50)
    #min_samples_range = np.arange(len(data) * 0.005,len(data) * 0.02 , 50)

    best_eps = 0.1
    best_min_samples = len(data) * 0.01
    best_metrics = -1

    # 寻找最佳参数
    for eps in eps_range:
        for min_samples in min_samples_range:
            # 应用DBSCAN
            db = DBSCAN(eps=eps, min_samples=min_samples)
            labels = db.fit_predict(X_scaled)

            # 通过聚类评估指标寻找最佳聚类参数
            if len(set(labels)) > 1:  # 确保有多个簇存在以计算轮廓系数
                #silhouette_avg = silhouette_score(X_scaled, labels)   #使用轮廓系数进行参数搜索
                score = calculate_xie_beni_index(X_scaled, labels)  # 使用 Xie-Beni 指数进行参数搜索
                #score = calinski_harabasz_score(X_scaled, labels)   #使用ch_score进行参数搜索

                # 更新最佳参数
                if score > best_metrics:
                    best_eps = eps
                    best_min_samples = min_samples
                    best_metrics = score

    #print(f"Best Parameters: EPS={best_eps}, MinPts={best_min_samples}")

    # 使用最佳参数进行聚类
    db_optimized = DBSCAN(eps=best_eps, min_samples=best_min_samples)
    labels_optimized = db_optimized.fit_predict(X_scaled)
    cluster_labels = labels_optimized

    return cluster_labels


#将预测标签与真实标签比对
def seek_true_label(pre_label,true_label):
    for i in set(pre_label):
        index = np.where(pre_label == i)[0]
        x = true_label[index]
        # 找到所有唯一类别及其计数
        unique_categories, counts = np.unique(x, return_counts=True)
        # 计算每个类别的频率（即比例）
        category_frequencies = counts / len(x)
        #print("第{}簇中各种数据的总数为{}，其的比例为".format(i,len((x))))
        #print("Categories:", unique_categories)
        #print("Frequencies:", category_frequencies)


#计算纯度
def calculate_purity(pre_label,true_label):
    purtiy = 0
    unique_clusters = set(pre_label)  # 获取所有聚类类别
    for i in unique_clusters:
        index = np.where(pre_label == i)[0]
        x = true_label[index]
        counter = collections.Counter(x)
        most_common_element, max_count = counter.most_common(1)[0]
        purtiy = purtiy + max_count/len(pre_label)
    return purtiy

#计算F值
def calculate_macro_f1(pre_label, true_label):
    counter = collections.Counter(pre_label)
    unique_labels = set(true_label)
    F1 = 0
    for i in unique_labels:
        index = np.where(true_label == i)[0]
        x = pre_label[index]
        nt = len(x)
        unique_clu = set(x)
        list = []
        for j in unique_clu:
            nc = counter[j]
            index = np.where(x == j)[0]
            y = x[index]
            ntc = len(y)
            recall = ntc/ nt
            precision = ntc/nc
            F = 2*recall*precision/(recall+precision)
            list.append(F)
        F_max = max(list)
        F1 = F1 + F_max*nt
    return F1/len(pre_label)


def euclidean_distance(p1, p2):
    return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)


def map_clusters_to_labels(true_labels, cluster_labels):

    # 初始化一个字典来存储每个聚类的最佳映射
    cluster_to_label = {}

    # 对于每个唯一的聚类标签
    for cluster_id in np.unique(cluster_labels):
        # 获取属于当前聚类的所有样本的真实标签
        labels_in_cluster = true_labels[cluster_labels == cluster_id]

        # 使用 Counter 来找出最常见的标签
        most_common_label = Counter(labels_in_cluster).most_common(1)[0][0]

        # 将当前聚类映射到最常见的标签
        cluster_to_label[cluster_id] = most_common_label

    # 创建一个新的数组来存储映射后的标签
    mapped_labels = np.array([cluster_to_label[label] for label in cluster_labels])

    return mapped_labels

def calculate_class_f1(true_labels, predicted_labels):
    # 获取所有独特的类别标签
    unique_labels = np.unique(true_labels)

    # 初始化一个字典来存储每个类别的 F1 分数
    class_f1_scores = {}

    # 对于每个类别
    for label in unique_labels:
        # 提取当前类别的所有索引
        indices_true = np.where(true_labels == label)[0]

        # 提取当前类别对应的预测标签
        predicted_for_class = predicted_labels[indices_true]

        # 计算查全率（Recall）
        recall = np.sum(predicted_for_class == label) / len(indices_true) if len(indices_true) > 0 else 0

        # 提取预测为当前类别的所有索引
        indices_pred = np.where(predicted_labels == label)[0]

        # 计算查准率（Precision）
        precision = np.sum(true_labels[indices_pred] == label) / len(indices_pred) if len(indices_pred) > 0 else 0

        # 计算 F1 分数
        f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

        # 存储 F1 分数
        class_f1_scores[label] = f1_score

    return class_f1_scores

def calculate_class_accuracy(true_labels, predicted_labels):
    # 获取所有独特的类别标签
    unique_labels = np.unique(true_labels)

    # 初始化一个字典来存储每个类别的准确率
    class_accuracies = {}

    # 对于每个类别
    for label in unique_labels:
        # 提取当前类别的所有索引
        indices = np.where(true_labels == label)[0]

        # 提取当前类别对应的预测标签
        predicted_for_class = predicted_labels[indices]

        # 计算当前类别的准确率
        correct_predictions = np.sum(predicted_for_class == label)
        total_predictions = len(predicted_for_class)
        accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0

        # 存储准确率
        class_accuracies[label] = accuracy

    return class_accuracies



#    计算每类的纯度，并将结果存储在一个字典中。
def calculate_per_class_purity(true_labels, predicted_labels):
    # 初始化存储结果的字典
    purity_dict = {}

    # 获取所有真实标签的唯一标签
    unique_true_labels = np.unique(predicted_labels)

    # 遍历每个真实标签
    for true_label in unique_true_labels:
        # 获取该真实标签类中所有样本的索引
        true_label_indices = np.where(predicted_labels == true_label)[0]

        # 获取该真实标签类中所有样本的预测标签
        predicted_labels_in_true_label = true_labels[true_label_indices]

        # 计算该真实标签类中每个预测标签的出现次数
        label_counts = Counter(predicted_labels_in_true_label)

        # 获取出现次数最多的那个预测标签及其出现次数
        most_common_label, most_common_count = label_counts.most_common(1)[0]

        # 计算该真实标签类的纯度
        true_label_purity = most_common_count / len(true_label_indices)

        # 存储结果
        purity_dict[true_label] = true_label_purity

    return purity_dict

#绘制双轴条形图，共用一个 y 轴，左边轴为 F1 分数，右边轴为准确率。
def plot_dual_bars(f1_scores, accuracies, title='Purity Scores and Accuracy by Class'):
    # 将字典转换为列表
    labels = list(f1_scores.keys())
    f1_values = [f1_scores[label] for label in labels]
    accuracy_values = [accuracies[label] for label in labels]

    # 设置条形图的位置和宽度
    x = np.arange(len(labels))  # 类别的索引位置
    width = 0.35  # 条形图的宽度

    # 创建图形和坐标轴
    fig, ax = plt.subplots()

    # 在 ax1 上绘制 F1 分数的条形图
    rects1 = ax.bar(x - width/2, f1_values, width, label='Purity', color='black')
    # 在 ax2 上绘制准确率的条形图
    rects2 = ax.bar(x + width/2, accuracy_values, width, label='Accuracy', color='white', edgecolor = 'black')

    # 添加一些文本描述
    ax.set_ylabel('Score')
    ax.set_title(title)
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    # 自动旋转 x 轴标签以避免重叠
    plt.xticks(rotation=45)

    # 设置 Y 轴的范围
    ax.set_ylim(0, 1.2)

    # 添加数值标签到条形图上方，并保留两位小数
    def autolabel(rects, ax):
        """Attach a text label above each bar in *rects*, displaying its height with two decimal places."""
        for rect in rects:
            height = rect.get_height()
            ax.annotate(f'{height:.2f}',  # 保留两位小数
                        xy=(rect.get_x() + rect.get_width() / 2, height),
                        xytext=(0, 3),  # 3 points vertical offset
                        textcoords="offset points",
                        ha='center', va='bottom', fontsize=8)

    autolabel(rects1, ax)
    autolabel(rects2, ax)

    # 显示图形
    plt.tight_layout()
    #plt.ion()  # 启用交互模式
    #plt.show()
    #plt.ioff()  # 禁用交互模式，防止影响后续代码

    # 将图像保存到字节流  
    buf = io.BytesIO()  
    plt.savefig(buf, format='png')  
    buf.seek(0)  
  
    # 将字节流编码为Base64  
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    plt.close()
    return image_base64

#计算多个字典中相同键的值的平均值。
def average_dicts(dict_list):
    #param dict_list: 包含多个字典的列表，每个字典的键相同,return: 包含每个键的平均值的新字典
    # 初始化一个字典来存储每个键的总和
    sum_dict = dict.fromkeys(dict_list[0], 0)
    # 初始化一个字典来存储每个键出现的次数
    count_dict = dict.fromkeys(dict_list[0], 0)

    # 遍历字典列表，累加每个键的值并更新计数器
    for d in dict_list:
        for key in d:
            if key not in sum_dict:
                sum_dict[key] = 0
                count_dict[key] = 0
            sum_dict[key] += d[key]
            count_dict[key] += 1

    # 计算平均值
    average_dict = {key: sum_dict[key] / count_dict[key] for key in sum_dict}

    return average_dict


def update_zeros_with_min_value(dictionary):
    """
    更新字典中值为零的键，使其等于字典中非零值的最小值。

    :param dictionary: 输入的字典
    :return: 修改后的字典
    """
    # 获取字典中所有非零值的列表
    non_zero_values = [value for value in dictionary.values() if value != 0]

    # 如果所有值都为零，则无法找到非零的最小值
    if not non_zero_values:
        raise ValueError("字典中所有值都为零，无法找到非零的最小值。")

    # 计算非零值的最小值
    min_non_zero_value = min(non_zero_values)

    # 更新值为零的键
    for key, value in dictionary.items():
        if value == 0:
            dictionary[key] = min_non_zero_value

    return dictionary

#    计算字典中所有值的平均值，并将平均值作为一个新的键值对添加到字典中，
def add_average_to_dict(data_dict,weight):

    # 计算共同键对应的值相乘后再相加
    result = sum(data_dict[key] * weight[key] for key in data_dict if key in weight)

    # 将平均值添加到字典中
    data_dict["average"] = result
    return data_dict,result

# 使用不同数据占比计算总体F1和V值的均值
def calculate_average(data, weight):
    result = sum(x * y for x,y in zip(data, weight))
    return result

    # 创建饼状图
def draw_pie_chart(data_dict, title):
    # 从字典中提取标签和大小
    labels = list(data_dict.keys())
    sizes = list(data_dict.values())

    plt.figure(figsize=(8, 6))  # 设置图形大小
    # 使用 labels 和 sizes 来绘制饼状图
    plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140)
    # 设置标题
    plt.title(title)

    #plt.ion()  # 启用交互模式
    # 显示图表
    #plt.show()
    #plt.ioff()  # 禁用交互模式，防止影响后续代码

    # 将图像保存到字节流  
    buf = io.BytesIO()  
    plt.savefig(buf, format='png')  
    buf.seek(0)  
  
    # 将字节流编码为Base64  
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    plt.close()
    return image_base64


def calculate_ari(true_labels_str, cluster_labels):
    """
    计算 Adjusted Rand Index (ARI) 以比较字符串形式的真实标签和聚类标签。

    参数:
    true_labels_str (list of str): 字符串形式的真实标签列表。
    cluster_labels (list of int): 整数形式的聚类标签列表。

    返回:
    float: ARI 值。
    """
    # 将字符串形式的真实标签转换为整数形式
    label_map = defaultdict(int)
    true_labels_int = []
    unique_labels = set()

    for label in true_labels_str:
        if label not in unique_labels:
            unique_labels.add(label)
            label_map[label] = len(unique_labels) - 1  # 从 0 开始编号
        true_labels_int.append(label_map[label])

    # 计算 ARI
    ari = adjusted_rand_score(true_labels_int, cluster_labels)

    return ari

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i + 8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result


# 定义函数来补零或截断字符串
def pad_or_truncate(s, length=256):
    if len(s) >= length:
        # 如果字符串长度大于等于目标长度，则截断
        return s[:length]
    else:
        # 如果字符串长度小于目标长度，则补零
        return s.ljust(length, '0')


''''功能一，加载数据并完成聚类'''
def main_1(paramters):
    #输入参数提取
    data_path = paramters["data_path"]
    icmp_num = paramters["icmp_num"]
    nbns_num = paramters["nbns_num"]
    arp_num = paramters["arp_num"]
    smb_num = paramters["smb_num"]
    http_404_num = paramters["http_404_num"]
    http_GET_num = paramters["http_GET_num"]
    ais_4_num = paramters["ais_4_num"]
    ais_5_num = paramters["ais_5_num"]
    ais_18_num = paramters["ais_18_num"]
    ais_24A_num = paramters["ais_24A_num"]
    out_path = paramters["out_path"]
    loop_num = paramters["loop_num"]

    #结构参数
    num_epochs = 6
    inputs_dimension = 32
    middle_dimension = int(inputs_dimension/2)
    outputs_dimension = int(inputs_dimension/4)
    byte_num = 32
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    random_state = random.randint(0, 99)


    class_accuracies_list =[]
    class_purity_list = []



    if out_path == None or out_path == '' :


        df_1 = pd.read_csv(os.path.join(data_path, r'icmp3_payloads.csv'), dtype=str).sample(
            icmp_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]  # dataset name.
        df_2 = pd.read_csv(os.path.join(data_path, r'nbns_payloads.csv'), dtype=str).sample(
            nbns_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_3 = pd.read_csv(os.path.join(data_path, r'arp_replyt_payloads.csv'), dtype=str).sample(
            arp_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_4 = pd.read_csv(os.path.join(data_path, r'smb_success_payloads.csv'), dtype=str).sample(
            smb_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_5 = pd.read_csv(os.path.join(data_path, r'http_404_payloads.csv'), dtype=str).sample(
            http_404_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_6 = pd.read_csv(os.path.join(data_path, r'http_GET_payloads.csv'), dtype=str).sample(
            http_GET_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_7 = pd.read_csv(os.path.join(data_path, r'ais_24A.csv'), dtype=str).sample(
            ais_24A_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_8 = pd.read_csv(os.path.join(data_path, r'ais_4.csv'), dtype=str).sample(
            ais_4_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_9 = pd.read_csv(os.path.join(data_path, r'ais_5.csv'), dtype=str).sample(
            ais_5_num, random_state=random_state).reset_index(drop=True).iloc[:, :byte_num + 1]
        df_10 = pd.read_csv(os.path.join(data_path, r'ais_18.csv'), dtype=str).sample(
            ais_18_num, random_state=random_state).reset_index(drop=True)

        df_0 = pd.concat([df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10], axis=0, ignore_index=True)

    else:
        df_bit = pd.read_csv(out_path)
        label = df_bit['label'].values.tolist()
        data = df_bit['data'].values.tolist()
        byte_list = []
        for bit_str in data:
            duiqi_bit_str = pad_or_truncate(bit_str)
            byte_str = binary_strings_to_integers(duiqi_bit_str)
            byte_list.append(byte_str)
        df_0 = pd.DataFrame(byte_list)
        df_0.insert(0, 'label', label)


    data_len = df_0.shape[0]
    sec_true_label = df_0['label'].values
    numpy_data = np.array(df_0.drop('label', axis=1)).astype(int)
    data_num = numpy_data.shape[0]

    element_counts = Counter(sec_true_label)
    # 使用字典推导式创建新字典
    new_element_counts = {key: value / data_len for key, value in element_counts.items()}

    min_support = min(new_element_counts.values())


    # 定义噪声参数
    mean = 0  # 噪声均值
    std_dev = 1 # 噪声标准差
    # 生成与data相同形状的高斯噪声
    noise = np.random.normal(mean, std_dev, numpy_data.shape)
    # 将噪声添加到原始数据
    noisy_numpy_data = numpy_data + noise

    # 初始化MinMaxScaler对象
    scaler = MinMaxScaler()
    scaled_data = scaler.fit_transform(noisy_numpy_data)

    #寻找频繁项并计算每一列对应的权重
    threshold_max = data_len * 0.4
    threshold_min = data_len * 0.07
    #threshold_min = data_len*min_support
    weigth_list = []
    weigth_num_list = []
    weight_elem_list = [[] for _ in range(byte_num)]
    for i in range(numpy_data.shape[1]):
        totel_num = 0
        weight_num = 0
        unique_elements, counts = np.unique(numpy_data[:, i], return_counts=True)
        if unique_elements[0] == 0:
            unique_elements = np.delete(unique_elements, 0)
            counts = np.delete(counts, 0)
        # 仅保留计数大于等于某个阈值的元素
        mask = (counts >= threshold_min) & (counts <= threshold_max)
        filtered_elements = unique_elements[mask]
        filtered_counts = counts[mask]
        for elem, count in zip(filtered_elements, filtered_counts):
            totel_num = totel_num + count
            weight_elem_list[i].append(elem)
            weight_num = weight_num+1
        weigth_list.append(totel_num)
        weigth_num_list.append(weight_num)
    zero_array = np.zeros((numpy_data.shape[0], numpy_data.shape[1]))
    for i in range(numpy_data.shape[1]):
        for j in range(numpy_data.shape[0]):
            if numpy_data[j][i] in weight_elem_list[i]:
                zero_array[j][i] = 1
    weight_numpy = np.array(weigth_list)
    weight_numpy = weight_numpy/data_len



    second_sorted_indices = sorted(range(len(weight_numpy)), key=lambda i: weight_numpy[i], reverse = True)
    second_indices_ae = second_sorted_indices[:inputs_dimension]
    second_numpy_data = numpy_data[:, second_indices_ae]


    rows, cols = numpy_data.shape
    #取出频繁项提取后的重要项
    array_ae_sec = np.empty((rows, len(second_indices_ae)), dtype=scaled_data.dtype)
    for i, idx in enumerate(second_indices_ae):
        array_ae_sec[:, i] = scaled_data[:, idx]
    weight_ae_sec = np.empty((rows, len(second_indices_ae)), dtype=numpy_data.dtype)
    for i, idx in enumerate(second_indices_ae):
        weight_ae_sec[:, i] = zero_array[:, idx]
    array_ae_sec = array_ae_sec * weight_ae_sec
    data_weight_ae_sec = np.column_stack((array_ae_sec, weight_ae_sec))

    batch_size = 128

    # 记录算法运行时间
    start_time = time.time()  # 开始计时

    for i in range(loop_num):

        #构建数据加载器
        second_train_data = torch.from_numpy(data_weight_ae_sec).unsqueeze(1).to(torch.float64).to(device)
        second_train_dataset = OneDimSequenceDataset(second_train_data)
        second_train_loader = DataLoader(second_train_dataset, batch_size=batch_size, shuffle=True)
        
        #ae模型训练
        second_ae_model = SimpleAutoencoder(inputs_dimension, middle_dimension,outputs_dimension)
        second_ae_model.to(device)
        second_ae_model.train()

        #加权的损失函数
        custom_loss_fn = MSELoss()
        #优化器
        optimizer = optim.Adam(second_ae_model.parameters(), lr=0.002)
        
        # 第二层模型训练
        for epoch in range(num_epochs):            
            for inputs in second_train_loader:               
                inputs = inputs.to(device)               
                intputs_data = inputs[: ,:, :inputs_dimension]
                inputs_weight = inputs[: ,:, inputs_dimension:]
                optimizer.zero_grad()                

                outputs_data,encoder_outputs = second_ae_model(intputs_data)                
                loss = custom_loss_fn(outputs_data, intputs_data, inputs_weight)              
                loss.backward()
                optimizer.step()              


        second_test_data = second_train_data[:,:,:inputs_dimension].float()
        second_data_outputs_all, second_encoder_outputs_all = second_ae_model(second_test_data)
        second_encoder_outputs_all = second_encoder_outputs_all.cpu().detach().numpy().reshape(data_num,-1)


        # 初始化MinMaxScaler对象
        scaler = MinMaxScaler()
        second_scaled_updated_array = scaler.fit_transform(second_encoder_outputs_all)

        #通过参数搜索进行聚类
        second_cluster_labels = dbscan_cluster(second_scaled_updated_array)


        #计算每一类的聚类中心
        df_second_encoder_outputs_all = pd.DataFrame(second_encoder_outputs_all)
        df_second_cluster_centers = df_second_encoder_outputs_all.groupby(second_cluster_labels).mean()
        if -1 in df_second_cluster_centers.index:
            # 如果存在，则删除该行
            df_second_cluster_centers = df_second_cluster_centers.drop(-1)
        second_cluster_centers = np.array(df_second_cluster_centers)

        #筛选出-1类的索引
        second_indices_unkonw = [i for i, x in enumerate(second_cluster_labels) if x == -1]
        
        for i in second_indices_unkonw:
            # 计算目标点到每个点的距离并找出最近的点
            distances = {j: euclidean_distance(second_encoder_outputs_all[i], point) for j, point in enumerate(second_cluster_centers)}
            nearest_index = min(distances, key=distances.get)
            second_cluster_labels[i] = nearest_index

        #将聚类标签与真实标签按照最大数量进行映射
        sec_mapped_labels = map_clusters_to_labels(sec_true_label, second_cluster_labels)
        #计算每类数据的类内准确率和F1值
        class_accuracies = calculate_class_accuracy(sec_true_label, sec_mapped_labels)
        class_purity_scores = calculate_per_class_purity(sec_true_label, sec_mapped_labels)

        #将其添加至列表中以便于求取多次遍历的平均值
        class_accuracies_list.append(class_accuracies)
        class_purity_list.append(class_purity_scores)

        #print("本次循环聚类的纯度为：{}".format(calculate_purity(second_cluster_labels, sec_true_label)))
        seek_true_label(second_cluster_labels, sec_true_label)

    end_time = time.time()  # 结束计时
    elapsed_time = (end_time - start_time)/loop_num # 计算耗时

    df_0.insert(0, 'mapped label', sec_mapped_labels)
    df_0.insert(0, 'cluster label', second_cluster_labels)

    # 获取列名
    column_names = df_0.columns.tolist()
    # 将列名转换为 numpy 数组
    column_names_array = np.array([column_names])
    # 将 DataFrame 转换为 numpy 数组
    data_array = df_0.values
    # 将列名数组和数据数组垂直堆叠
    result_array = np.vstack((column_names_array, data_array))

    # 调用函数并打印结果
    average_class_accuracies = average_dicts(class_accuracies_list)
    average_class_accuracies,average_accuracies = add_average_to_dict(average_class_accuracies,new_element_counts)
    average_class_purity = average_dicts(class_purity_list)
    average_class_purity,average_purity = add_average_to_dict(average_class_purity,new_element_counts)

    #输出部分
    # 画出饼状图展示数据组成
    image_base64_1 = draw_pie_chart(new_element_counts, 'Total amount of data: {}'.format(data_num))
    # 调用函数绘制图表，将这个图像整体画上
    image_base64_2 = plot_dual_bars(average_class_purity, average_class_accuracies)
    
    # 输出总体聚类F1值和V-measure值
    # print("整体聚类准确率均值为：{}，各类纯度均值为：{}".format(average_accuracies,average_purity,))

    unique_cluster_labels = np.array(list(set(second_cluster_labels)))
    unique_mapple_labels = list(set(sec_mapped_labels))
    cluster_num = len(unique_cluster_labels)

    result_dict = {
    'pie_chart_image': image_base64_1,
    'dual_bars_image': image_base64_2,  
    'average_accuracies': average_accuracies,  
    'average_purity': average_purity,
    'time': elapsed_time,
    'unique_mapple_labels':unique_mapple_labels,
    'cluster_num' : cluster_num,
    'result_array': result_array             #此处返回的是一个二维的np数组，内部每个元素是object类型
    }      

    #返回图像或各类参数
    return result_dict


'''功能2：查看簇内数据'''
def main_2(paramters):
    show_label = paramters['show_cluster_label']
    result_array = paramters['result_array']
    # 分离第一行作为列名
    column_names = result_array[0]
    # 将剩余的行作为数据部分
    data = result_array[1:]
    # 使用 pandas.DataFrame 构造函数创建 DataFrame
    dataframe = pd.DataFrame(data, columns=column_names)
    df_show = dataframe[dataframe['cluster label'] == show_label]
    numpy_nolabel_show = df_show.to_numpy()
    column_names = df_show.columns.to_numpy()
    show_numpy = np.vstack((column_names, numpy_nolabel_show))
    result_dict = {
        "show_numpy": show_numpy, #返回的是一个二维的np数组，其中内部元素为object格式
    }
    return result_dict


'''功能3：查看协议特征'''
def main_3(paramters):
    unique_mapple_labels = paramters['unique_mapple_labels']
    result_array = paramters['result_array']
    # 分离第一行作为列名
    column_names = result_array[0]
    # 将剩余的行作为数据部分
    data = result_array[1:]
    # 使用 pandas.DataFrame 构造函数创建 DataFrame
    dataframe = pd.DataFrame(data, columns=column_names)

    list_features = []
    for show_mapped_label in unique_mapple_labels:
        df_show = dataframe[dataframe['label'] == show_mapped_label]
        numpy_nolabel_show = df_show.to_numpy()

        pure_data = numpy_nolabel_show[:, 3:]
        list_feature = np.full(pure_data.shape[1], -1)
        for i in range(pure_data.shape[1]):
            list = []
            for j in range(pure_data.shape[0]):
                list.append(pure_data[j][i])
            counter = Counter(list)
            # 找到出现次数最多的元素及其出现次数
            most_common_element, most_common_count = counter.most_common(1)[0]
            if most_common_count >= pure_data.shape[0] * 0.95:
                list_feature[i] = most_common_element

        list_features.append(list_feature)

    array_features = np.array(list_features)
    array_unique_mapple_labels = np.array(unique_mapple_labels)
    # 使用 column_stack 将新列添加到原始数组的最前面
    features = np.column_stack((array_unique_mapple_labels, array_features))

    result_dict = {
        "features": features   #返回的是一个二维np数组，其中元素格式为object格式
    }
    return result_dict


if __name__ == "__main__":
    #键名和值的类型，取值范围(输入时保护或使用时保护)，意义
    #数据输入部分
    paramters_1 = {
        #下面是个输入为构建数据集中各类数据的数量，各类选取时应为100的倍数,且各类数据不得超过5000
        "data_path" : r'D:\Program Files\PycharmProjects\pythonProject_wang\dataset',
        "icmp_num" : 2000,          #最多5000
        "nbns_num": 300,            #最多5000
        "arp_num": 2000,            #最多5000
        "smb_num": 1000,            #最多5000
        "http_404_num": 1400,       #最多5000
        "http_GET_num": 700,        #最多5000
        "ais_4_num": 300,           #最多5000
        "ais_5_num": 700,           #最多5000
        "ais_18_num": 600,          #最多5000
        "ais_24A_num": 1000,        #最多5000

        "loop_num": 1,
        "out_path" : None,
    }

    result_1 = main_1(paramters_1)

    paramters_2 = {
        #下面是个输入为构建数据集中各类数据的数量，各类选取时应为100的倍数,且各类数据不得超过5000
        "show_cluster_label" : 4,
        "result_array" : result_1['result_array'],
    }
    result_2 = main_2(paramters_2)

    paramters_3 = {
    # 下面是个输入为构建数据集中各类数据的数量，各类选取时应为100的倍数,且各类数据不得超过5000
        "result_array" : result_1['result_array'],
        "unique_mapple_labels": result_1['unique_mapple_labels']
}
    result_3 = main_3(paramters_3)

    print(result_3)