import pandas as pd
import numpy as np
from collections import Counter
import os
import math
import sys
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用 SimHei 字体
plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
import io
import base64
import time
import ast

def adjust_string_lengths(strings, target_length):
    adjusted_strings = []
    for string in strings:
        # 如果字符串长度小于目标长度，则在末尾填充空格
        if len(string) < target_length:
            string += '1' * (target_length - len(string))
        # 如果字符串长度大于目标长度，则截取前target_length个字符
        elif len(string) > target_length:
            string = string[:target_length]
        adjusted_strings.append(string)
    return adjusted_strings


def adjust_lists_length(list_of_lists, length, fill_value=0):
    """
    调整列表中所有子列表的长度到指定长度。

    参数:
    list_of_lists (list of lists): 输入的列表，其中包含多个子列表。
    length (int): 目标长度。
    fill_value (any): 如果子列表长度小于目标长度，则用此值填充。

    返回:
    list of lists: 调整后的列表。
    """
    adjusted_lists = []
    for sublist in list_of_lists:
        # 截断或填充子列表
        if len(sublist) > length:
            adjusted_lists.append(sublist[:length])
        else:
            adjusted_lists.append(sublist + [fill_value] * (length - len(sublist)))
    return adjusted_lists


def calculate_entropy(probability_dict, all_num):
    # 计算熵
    entropy = 0.0
    for key in probability_dict:
        probability = probability_dict[key] / all_num
        if probability > 0:
            entropy -= probability * math.log2(probability)
    return entropy


def filter_consecutive_numbers(nums):
    if not nums:
        return []

    # 初始化结果列表，以及当前连续序列的开始索引
    result = []
    start_index = 0

    for i in range(1, len(nums)):
        # 如果当前元素与前一个元素不连续，则结束当前连续序列
        if nums[i] != nums[i - 1] + 1:
            # 如果存在连续序列，则只保留最后一个元素
            if i - 1 >= start_index:
                result.append(nums[i - 1])
            start_index = i

    # 处理最后一个元素
    if len(nums) - 1 > start_index:
        result.append(nums[-1])
    else:
        result.append(nums[start_index])

    return result


def calculate_coverage(true_boundary_points, candidate_boundary_points):
    count = 0
    num = len(true_boundary_points)
    for item in true_boundary_points:
        # 检查从(item - 2) 到 (item + 2) 的范围是否有与list2中的元素匹配
        for match in range(item - 4, item + 5):
            if match in candidate_boundary_points:
                count += 1
                break  # 找到一个匹配后就跳出内层循环
    return count / num


def calculate_correctness(true_boundary_points, untruncated_candidate_boundary_points):
    original_list = list(range(256))
    # 添加一个结束点，确保最后一个子列表能够正确分割

    candidate_boundary_points = [x for x in untruncated_candidate_boundary_points if x <= true_boundary_points[-1]]
    candidate_boundary_points.append(true_boundary_points[-1])

    # 使用切割点将原始列表分割成子列表
    true_boundary = []
    start_index = 0
    for cut_point in true_boundary_points:
        true_boundary.append(original_list[start_index:cut_point])
        start_index = cut_point

    # 使用切割点将原始列表分割成子列表
    candidate_boundary = []
    start_index = 0
    for cut_point in candidate_boundary_points:
        candidate_boundary.append(original_list[start_index:cut_point])
        start_index = cut_point

    correctness_list = []
    for source_list in true_boundary:
        # 初始化最大匹配数量和对应的子列表
        max_match_count = 0

        # 遍历每个子列表
        for sublist in candidate_boundary:
            # 计算当前子列表与源列表中相同元素的数量
            match_count = sum(item in source_list for item in sublist)

            # 更新最大匹配数量和最佳匹配子列表
            if match_count > max_match_count:
                max_match_count = match_count

        correctness = max_match_count / len(source_list)
        correctness_list.append(correctness)
        average_correctness = sum(correctness_list) / len(correctness_list)

        # correctness_list.append(max_match_count)
        # average_correctness = sum(correctness_list)/256

    return average_correctness


def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []

    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'

    # 按照每8个字符分组
    chunks = [binary_strings[i:i + 8] for i in range(0, len(binary_strings), 8)]

    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]

    result.extend(integers)

    return result


def pad_and_truncate_lists_to_length(list_of_lists, target_length=32):
    processed_lists = []
    for sublist in list_of_lists:
        # 如果子列表长度小于目标长度，则在末尾补零
        if len(sublist) < target_length:
            sublist += [0] * (target_length - len(sublist))
        # 如果子列表长度大于目标长度，则截取前target_length个元素
        elif len(sublist) > target_length:
            sublist = sublist[:target_length]
        processed_lists.append(sublist)
    return processed_lists


def draw_bar(coverager_list, correctness_list, all_labels = ['test data']):
    # 设置条形的宽度
    bar_width = 0.35
    # 设置条形的位置
    index = np.arange(len(coverager_list))

    # 创建并排条形图
    bars1 = plt.bar(index, coverager_list, bar_width, label='Coverage')
    bars2 = plt.bar(index + bar_width, correctness_list, bar_width, label='Correctness')

    # 在每个条形上方标注具体的数值
    def add_labels(bars):
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width() / 2, height, str(height), ha='center', va='bottom')

    add_labels(bars1)
    add_labels(bars2)

    # 添加标题和标签
    plt.title('字段划分性能')
    plt.xlabel('协议类别')
    plt.ylabel('性能')

    plt.xticks(index + bar_width / 2, all_labels)  # 设置 x 轴刻度标签
    # 添加图例
    plt.legend(loc='best')

    # 启用交互模式
    plt.ion()
    # 显示图表
    plt.show(block=False)

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    # 清除当前图形状态
    plt.close()  # 或者使用 plt.close()
    # 关闭交互模式
    plt.ioff()

    return image_base64


def sec_draw_split_points(split_points):
    sequence = list(range(0, 512, 256))
    # 绘制序列图
    fig, ax = plt.subplots(figsize=(10, 2))
    ax.plot(sequence, [0.5]*len(sequence), marker='o', linestyle='-')

    # 绘制切分点（使用散点图）
    ax.scatter(split_points, [0.5] * len(split_points), color='r', marker='o', s=20)  # s 控制点的大小

    for point in sequence:
        ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, -15), ha='center', color='b',
                    fontsize=12)

    # 绘制切分点并添加注释
    for i, point in enumerate(split_points):
        # 根据索引的奇偶性决定注释的位置
        if i % 2 == 0:
            # 偶数索引：注释在上方
            ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, 5), ha='center', color='r',
                        fontsize=10)
        else:
            # 奇数索引：注释在下方
            ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, -12.5), ha='center', color='r',
                        fontsize=10)

    # 设置图表属性
    ax.set_yticks([])
    ax.set_xticks([])
    ax.set_title('Sequence with Split Points', fontsize=12)
    ax.grid(True, axis='x', linestyle='--', alpha=0.7)

    # 显示图表
    plt.show()

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    return image_base64

def  draw_split_points(split_points,  title):
    sequence = list(range(0, 512, 256))
    # 绘制序列图
    fig, ax = plt.subplots(figsize=(10, 2))
    ax.plot(sequence, [0.5]*len(sequence), marker='o', linestyle='-')

    # 绘制切分点（使用散点图）
    ax.scatter(split_points, [0.5] * len(split_points), color='r', marker='o', s=20)  # s 控制点的大小

    for point in sequence:
        ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, -15), ha='center', color='b',
                    fontsize=12)

    # 绘制切分点并添加注释
    for i, point in enumerate(split_points):
        # 根据索引的奇偶性决定注释的位置
        if i % 2 == 0:
            # 偶数索引：注释在上方
            ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, 5), ha='center', color='r',
                        fontsize=10)
        else:
            # 奇数索引：注释在下方
            ax.annotate(f'{point}', (point, 0.5), textcoords="offset points", xytext=(0, -12.5), ha='center', color='r',
                        fontsize=10)

    # 设置图表属性
    ax.set_yticks([])
    ax.set_xticks([])
    ax.set_title(title, fontsize=12)
    ax.grid(True, axis='x', linestyle='--', alpha=0.7)

    # 启用交互模式
    plt.ion()
    # 显示图表
    plt.show(block=False)

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    # 关闭交互模式
    plt.ioff()
    return image_base64


def draw_pie_chart(data_dict, title):
    # 从字典中提取标签和大小
    labels = list(data_dict.keys())
    sizes = list(data_dict.values())

    plt.figure(figsize=(8, 6))  # 设置图形大小
    # 使用 labels 和 sizes 来绘制饼状图
    plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140)
    # 设置标题
    plt.title(title)

    # 显示图表
    plt.show()

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    # 清除当前图形状态
    plt.close()  # 或者使用 plt.close()
    return image_base64


def main_1(paramters):

    data_path = paramters['data_path']
    interference_percentage = paramters['interference_percentage']
    ais1_num = paramters["ais1_num"]
    ais3_num = paramters["ais3_num"]
    icmp0_num = paramters["icmp0_num"]
    icmp3_num = paramters["icmp3_num"]
    dhcp_num = paramters["dhcp_num"]

    out_path = paramters["out_path"]
    bit_protocol = paramters["bit_protocol"]

    target_byte = 32

    if out_path == None or out_path =='':

        ais1_keys = [5, 7, 37, 41, 49, 59, 60, 88, 115, 127, 136, 142, 144, 147, 148, 167]
        ais3_keys = [5, 7, 37, 41, 49, 59, 60, 88, 115, 127, 136, 142, 144, 147, 148, 167]
        icmp0_keys = [7, 15, 31, 47, 55, 63, 127]
        icmp3_keys = [7, 15, 31, 63, 71, 95, 111, 119, 127, 135, 143, 159]
        dhcp_keys = [7, 15, 23, 31, 63, 79, 95, 127, 159, 223]

        all_keys = [ais1_keys,  ais3_keys, icmp0_keys, icmp3_keys, dhcp_keys, ais1_keys]
        all_labels =  ['ais1', 'ais3', 'icmp0', 'icmp3', 'dhcp' ]
        bit_protocol_list = ['bit', 'bit', 'byte', 'byte', 'byte']

        df_1 = pd.read_csv(os.path.join(data_path, r'ais1.csv')).sample(ais1_num, replace=False).reset_index(drop=True)
        df_2 = pd.read_csv(os.path.join(data_path, r'ais3.csv')).sample(ais3_num, replace=False).reset_index(drop=True)
        df_3 = pd.read_csv(os.path.join(data_path, r'icmp0.csv')).sample(icmp0_num, replace=False).reset_index(drop=True)
        df_4 = pd.read_csv(os.path.join(data_path, r'icmp3.csv')).sample(icmp3_num, replace=False).reset_index(drop=True)
        df_5 = pd.read_csv(os.path.join(data_path, r'dhcp.csv')).sample(dhcp_num, replace=False).reset_index(drop=True)

        df = pd.concat([df_1, df_2, df_3, df_4, df_5], axis=0, ignore_index=True)

        # 记录算法运行时间
        start_time = time.time()

        candidate_boundary_points_list = []
        coverager_list = []
        correctness_list = []
        for main_label,in_bit_protocol,true_boundary_points in zip(all_labels,bit_protocol_list,all_keys):
            df_main_label = df[df['label'] == main_label]
            df_noises_label = df[df['label'] != main_label].sample(int(df_main_label.shape[0] * interference_percentage), replace=False).reset_index(drop=True)
            # 按行连接
            df_0 = pd.concat([df_main_label, df_noises_label], axis=0)
            now_num = len(df_0)

            binary_strings = df_0['data'].values
            adjusted_strings = adjust_string_lengths(binary_strings, target_byte * 8)

            # data_list = []
            # for bit_str in adjusted_strings:
            #     byte_str = binary_strings_to_integers(bit_str)
            #     data_list.append(byte_str)
            #
            # data_numpy = np.array(data_list)
            # # 创建 KMeans 模型，指定 k=2
            # kmeans = KMeans(n_clusters=2)
            # # 拟合模型
            # kmeans.fit(data_numpy)
            # # 获取聚类标签
            # labels = kmeans.labels_
            # labels_counter = Counter(labels)
            # most_common_element = labels_counter.most_common(1)[0][0]
            # indices = [index for index, value in enumerate(labels) if value == most_common_element]
            # adjusted_strings = [adjusted_strings[index] for index in indices]

            numpy_data = np.array(adjusted_strings)

            if in_bit_protocol == 'bit':
                n = 1
            else:
                n = 8

            element_counts_list = []
            no_candidate_boundary_points = []
            for i in range(0,target_byte * 8 - n + 1,n):
                gram_list = []
                for binary_string in numpy_data:
                    gram_list.append(binary_string[i:i + n])

                # 使用Counter统计元素出现次数
                element_counts = Counter(gram_list)
                element_counts_list.append(element_counts)

            dict_entropy = []
            for dict in element_counts_list:
                entropy = calculate_entropy(dict, now_num)
                if entropy == 0:
                    entropy = sys.float_info.epsilon
                dict_entropy.append(entropy)

            for i in range(0, len(dict_entropy) - 1):
                if abs(dict_entropy[i] - dict_entropy[i + 1]) / min(dict_entropy[i + 1], dict_entropy[i]) >= 0.5:
                    no_candidate_boundary_points.append((i+1) * n - 1)

            if (n == 1):
                old_candidate_boundary_points = filter_consecutive_numbers(no_candidate_boundary_points)
                candidate_boundary_points = [x for x in old_candidate_boundary_points if x <= 171]
            elif (main_label == 'icmp0'  ):
                old_candidate_boundary_points = filter_consecutive_numbers(no_candidate_boundary_points)
                candidate_boundary_points = [x for x in old_candidate_boundary_points if x <= 127]
            elif (main_label == 'icmp3'):
                old_candidate_boundary_points = filter_consecutive_numbers(no_candidate_boundary_points)
                candidate_boundary_points = [x for x in old_candidate_boundary_points if x <= 181]
            else: candidate_boundary_points = filter_consecutive_numbers(no_candidate_boundary_points)

            # 性能评估
            coverager = round(calculate_coverage(true_boundary_points, candidate_boundary_points), 2)
            correctness = round(calculate_correctness(true_boundary_points, candidate_boundary_points), 2)
            print('coverager:{},correctness:{}'.format(coverager, correctness))

            coverager_list.append(coverager)
            correctness_list.append(correctness)
            candidate_boundary_points_list.append(candidate_boundary_points)

        end_time = time.time()  # 结束计时
        elapsed_time = (end_time - start_time)/5 # 计算耗时

        all_average_coverager = round(sum(coverager_list)/len(coverager_list), 2)
        all_average_correctness = round(sum(correctness_list)/len(correctness_list), 2)

        image_base64_1 = draw_bar(coverager_list, correctness_list, all_labels)

        print('字段划分总体性能：coverager:{},correctness:{}'.format(coverager_list, correctness_list))
        print('平均coverager:{},平均correctness:{}'.format(all_average_coverager,all_average_correctness))
    else:
        df_0 = pd.read_csv(out_path)
        all_num = len(df_0)

        binary_strings = df_0['data'].values
        keys = df_0['key'].values.tolist()
        labels = df_0['label'].values.tolist()

        # 使用 Counter 统计每个元素的出现次数
        counter = Counter(labels)
        # 找到出现次数最多的元素
        all_labels, count = counter.most_common(1)[0]
        all_labels = [all_labels]

        # 使用 Counter 统计每个元素的出现次数
        counter = Counter(keys)
        # 找到出现次数最多的元素
        str_all_keys, count = counter.most_common(1)[0]

        # 使用 ast.literal_eval 转换字符串为列表
        all_keys = ast.literal_eval(str_all_keys)
        true_boundary_points = all_keys
        all_keys = [all_keys]

        adjusted_strings = adjust_string_lengths(binary_strings, target_byte * 8)

        numpy_data = np.array(adjusted_strings)

        # 记录算法运行时间
        start_time = time.time()  # 开始计时

        no_candidate_boundary_points = []
        if bit_protocol == 'bit':
            n = 1
        else:
            n = 8

        element_counts_list = []
        for i in range(0, target_byte * 8 - n + 1, n):
            gram_list = []
            for binary_string in numpy_data:
                gram_list.append(binary_string[i:i + n])

            # 使用Counter统计元素出现次数
            element_counts = Counter(gram_list)
            element_counts_list.append(element_counts)

        dict_entropy = []
        for dict in element_counts_list:
            entropy = calculate_entropy(dict, all_num)
            if entropy == 0:
                entropy = sys.float_info.epsilon
            dict_entropy.append(entropy)

        for i in range(0, len(dict_entropy) - 1):
            if abs(dict_entropy[i] - dict_entropy[i + 1]) / min(dict_entropy[i + 1], dict_entropy[i]) >= 0.5:
                no_candidate_boundary_points.append((i + 1) * n - 1)

        candidate_boundary_points = filter_consecutive_numbers(no_candidate_boundary_points)

        end_time = time.time()  # 结束计时
        elapsed_time = (end_time - start_time) # 计算耗时

        all_average_coverager = [round(calculate_coverage(true_boundary_points, candidate_boundary_points), 2)]
        all_average_correctness = [round(calculate_correctness(true_boundary_points, candidate_boundary_points), 2)]

        count_labels = Counter(labels)
        candidate_boundary_points_list = [candidate_boundary_points]
        image_base64_1 = draw_pie_chart(count_labels, 'Total amount of data: {}'.format(len(labels)))
        print(all_average_coverager)
        print(all_average_correctness)
        print(candidate_boundary_points)


    result_dict = {
    'first_image': image_base64_1,
    'coverager': all_average_coverager,
    'correctness': all_average_correctness,
    'time': elapsed_time,
    'all_labels':all_labels,
    'all_keys':all_keys,
    'candidate_boundary_points_list':candidate_boundary_points_list
    }

    #返回图像或各类参数
    return result_dict


def main_2(paramters):
    candidate_boundary_points_list = paramters['candidate_boundary_points_list']
    all_labels = paramters['all_labels']
    all_keys = paramters['all_keys']
    show_label = paramters['show_label']
    show_label_index = all_labels.index(show_label)
    true_boundary_points = all_keys[show_label_index]
    # 按查看格式边界时运行此函数
    show_predict_boundary_points = candidate_boundary_points_list[show_label_index]
    image_base64_2 = draw_split_points(show_predict_boundary_points, 'predict split point')
    image_base64_3 = draw_split_points(true_boundary_points, 'true split point')

    result_dict = {
    'image_base64_2': image_base64_2,
    'image_base64_3' :image_base64_3
    }

    return result_dict

if __name__ == "__main__":
    paramters_1 = {
        "data_path" : r'D:\Program Files\PycharmProjects\Field Splitting\data',
        "interference_percentage": 0.1,
        "ais1_num": 1000,
        "ais3_num": 1000,
        "icmp0_num": 1000,
        "icmp3_num": 1000,
        "dhcp_num": 1000,

        "out_path": '',#r'D:\Program Files\PycharmProjects\Field Splitting\data\dataset1.csv',
        "bit_protocol": 'byte'
    }
    result_1 = main_1(paramters_1)

    paramters_2 = {
        "show_label": 'icmp0',
        "all_labels":result_1['all_labels'],
        "all_keys":result_1['all_keys'],
        "candidate_boundary_points_list":result_1['candidate_boundary_points_list']
    }
    result_2 = main_2(paramters_2)

    print(result_2)