import pandas as pd
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
import seaborn as sns
import logging
import io
import base64
from collections import Counter

def load_and_split_data(file_path, label, n_train, n_test, L, small_class_flag):
    data = np.load(file_path)
    x_data = data[0:(n_train + n_test), 0:L]
    y_data = np.array([label] * len(x_data))
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=n_test / (n_train + n_test),
                                                        random_state=None, shuffle=True)

    if small_class_flag != 0:
        x_train = x_train[:int(n_train * small_class_flag * 0.01)]
        y_train = y_train[:int(n_train * small_class_flag * 0.01)]

    return x_train, x_test, y_train, y_test

# 绘制混淆矩阵热力图
def plot_confusion_matrix(cm, title, labels, fontsize=20, annot_fontsize=14):
    plt.figure(figsize=(10, 7))
    sns.heatmap(cm, annot=True, fmt='d', cmap='coolwarm', cbar=False, annot_kws={"size": annot_fontsize},
                xticklabels=labels, yticklabels=labels)
    plt.xlabel('Predicted', fontsize=fontsize)
    plt.ylabel('True', fontsize=fontsize)
    plt.title(title, fontsize=fontsize)
    plt.xticks(fontsize=annot_fontsize)
    plt.yticks(fontsize=annot_fontsize)
    # plt.show()
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    return image_base64

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings =  binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result


def binary_df_to_classes(df, L):
    # 创建一个字典来保存每个类别的数据
    class_data = {}
    all_data = []

    # 遍历每一行的数据
    for _, row in df.iterrows():
        data = row['data']
        label = row['label']

        # 将二进制字符串转换为整数
        byte_str = binary_strings_to_integers(data)

        # 确保 byte_str 的长度为 L
        if len(byte_str) > L:
            byte_str = byte_str[:L]
        elif len(byte_str) < L:
            byte_str += [0] * (L - len(byte_str))  # 用0填充

        # 如果当前标签不在字典中，初始化一个新的列表
        if label not in class_data:
            class_data[label] = []

        # 将数据添加到对应的类别列表中
        class_data[label].append(byte_str)
        # 将数据及其标签添加到 all_data 列表中
        all_data.append(byte_str)

    # 将每个类别的数据转换为 numpy 数组
    for label in class_data:
        class_data[label] = np.array(class_data[label])
    # 将 all_data 转换为 numpy 数组
    all_data_array = np.array(all_data, dtype=object)

    return class_data, all_data_array


# 重新排列以确保小类和大类交替出现
def mix_small_large(sizes, labels):
    large_indices = [i for i in range(len(sizes)) if sizes[i] > np.mean(sizes)]
    small_indices = [i for i in range(len(sizes)) if sizes[i] <= np.mean(sizes)]

    mixed_sizes = []
    mixed_labels = []
    large_idx = 0
    small_idx = 0

    while large_idx < len(large_indices) or small_idx < len(small_indices):
        if large_idx < len(large_indices):
            mixed_sizes.append(sizes[large_indices[large_idx]])
            mixed_labels.append(labels[large_indices[large_idx]])
            large_idx += 1

        if small_idx < len(small_indices):
            mixed_sizes.append(sizes[small_indices[small_idx]])
            mixed_labels.append(labels[small_indices[small_idx]])
            small_idx += 1

    return mixed_sizes, mixed_labels


def main(parameters):
    # 配置日志级别和基本设置
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        filename='app.log',
                        filemode='w')

    path = parameters['path']
    waibu_train_path = parameters['waibu_train_path']
    waibu_test_path = parameters['waibu_test_path']

    if waibu_train_path == None or waibu_test_path == None:
        # 输入形状
        L = parameters['L_neibu']

        small_classes = parameters['small_classes']
        n_train = parameters['n_train']
        n_test = parameters['n_test']
        if n_train == 0 or n_test == 0:
            print('数据加载有误!')
            logging.info('数据加载有误!')
        data_dict = {}

        # 加载数据
        # 处理 BitTorrent 数据
        x_bittorrent_tr, x_bittorrent_te, y_bittorrent_tr, y_bittorrent_te = load_and_split_data(
            path + 'bittorrent_sslv3_80B_7502.npy', 0, n_train, n_test, L, small_classes[0])
        data_dict[0] = {'x_train': x_bittorrent_tr, 'x_test': x_bittorrent_te, 'y_train': y_bittorrent_tr,
                        'y_test': y_bittorrent_te}

        # 处理 SMB 数据
        x_smb_tr, x_smb_te, y_smb_tr, y_smb_te = load_and_split_data(path + 'smb_sum_80B_6030.npy', 1, n_train,
                                                                     n_test, L, small_classes[1])
        data_dict[1] = {'x_train': x_smb_tr, 'x_test': x_smb_te, 'y_train': y_smb_tr, 'y_test': y_smb_te}

        # 处理 Outlook 数据
        x_outlook_tr, x_outlook_te, y_outlook_tr, y_outlook_te = load_and_split_data(
            path + 'outlook_ssl_80B_7475.npy', 2, n_train, n_test, L, small_classes[2])
        data_dict[2] = {'x_train': x_outlook_tr, 'x_test': x_outlook_te, 'y_train': y_outlook_tr, 'y_test': y_outlook_te}

        # 处理 Skype 数据
        x_skp_tr, x_skp_te, y_skp_tr, y_skp_te = load_and_split_data(path + 'skype_ssl_80B_6089.npy', 3, n_train,
                                                                     n_test, L, small_classes[3])
        data_dict[3] = {'x_train': x_skp_tr, 'x_test': x_skp_te, 'y_train': y_skp_tr, 'y_test': y_skp_te}

        # 处理 WoW 数据
        x_wow_tr, x_wow_te, y_wow_tr, y_wow_te = load_and_split_data(path + 'wow_80B_23345.npy', 4, n_train, n_test,
                                                                     L, small_classes[4])
        data_dict[4] = {'x_train': x_wow_tr, 'x_test': x_wow_te, 'y_train': y_wow_tr, 'y_test': y_wow_te}

        # 处理 FaceTime 数据
        x_facetime_tr, x_facetime_te, y_facetime_tr, y_facetime_te = load_and_split_data(
            path + 'Facetime_80B_6000.npy', 5, n_train, n_test, L, small_classes[5])
        data_dict[5] = {'x_train': x_facetime_tr, 'x_test': x_facetime_te, 'y_train': y_facetime_tr,
                        'y_test': y_facetime_te}

        # 所有数据的训练集和标签拼接在一起
        x_train = np.concatenate((x_bittorrent_tr, x_smb_tr, x_outlook_tr, x_skp_tr, x_wow_tr, x_facetime_tr))
        y_train1 = np.concatenate((y_bittorrent_tr, y_smb_tr, y_outlook_tr, y_skp_tr, y_wow_tr, y_facetime_tr))
        y_train = np.reshape(y_train1, (y_train1.shape[0], 1))

        # 所有数据的测试集和标签拼接在一起
        x_test = np.concatenate((x_bittorrent_te, x_smb_te, x_outlook_te, x_skp_te, x_wow_te, x_facetime_te))
        y_test1 = np.concatenate((y_bittorrent_te, y_smb_te, y_outlook_te, y_skp_te, y_wow_te, y_facetime_te))
        y_test = np.reshape(y_test1, (y_test1.shape[0], 1))

        # 训练集打散
        training_data1 = np.hstack([x_train, y_train])
        np.random.shuffle(training_data1)
        X_tr = training_data1[:, 0:L]

        y_tr1 = training_data1[:, -1]
        y_tr = np.reshape(y_tr1, (y_tr1.shape[0], 1))

        # 测试集打散
        test_data1 = np.hstack([x_test, y_test])
        np.random.shuffle(test_data1)
        X_te = test_data1[:, 0:L]  # X_te = test_data1[:,:-1]
        y_te1 = test_data1[:, -1]
        y_te = np.reshape(y_te1, (y_te1.shape[0], 1))

        # 独热编码
        encoder = LabelEncoder()
        Y_encoded_tr = encoder.fit_transform(y_tr)
        Y_encoded_te = encoder.fit_transform(y_te)
        Y_train = to_categorical(Y_encoded_tr)
        Y_test = to_categorical(Y_encoded_te)

        # X_tr = np.expand_dims(X_tr[:, 0:L].astype(float), axis=2)
        # X_te = np.expand_dims(X_te[:, 0:L].astype(float), axis=2)

        # X_tr=X_tr.reshape(X_tr.shape[0],L,1)
        # X_te=X_te.reshape(X_te.shape[0],L,1)

        X_train = X_tr / 255
        X_test = X_te / 255
        # 数据
        labels = ['bittorrent', 'smb', 'outlook', 'skp', 'wow', 'facetime']
        sizes = [len(data_dict[0]['x_train']), len(data_dict[1]['x_train']), len(data_dict[2]['x_train']),
                 len(data_dict[3]['x_train']), len(data_dict[4]['x_train']), len(data_dict[5]['x_train'])]

    else:
        # 输入形状
        L = parameters['L_waibu']
        if L <= 0:
            print('数据加载有误!')
            logging.info('数据加载有误!')
        train_data = pd.read_csv(waibu_train_path)
        test_data = pd.read_csv(waibu_test_path)
        train_dict, all_train_data = binary_df_to_classes(train_data, L)
        test_dict, all_test_data = binary_df_to_classes(test_data, L)

        encoder = LabelEncoder()
        Y_encoded_tr = encoder.fit_transform(train_data['label'])
        Y_encoded_te = encoder.fit_transform(test_data['label'])
        labels = encoder.classes_.tolist()
        unique_elements, train_counts = np.unique(Y_encoded_tr, return_counts=True)
        sizes = train_counts
        X_train = all_train_data / 255
        X_test = all_test_data / 255
        small_classes = []
        for i in range(len(sizes)):
            if (sizes[i] / max(sizes)) <= 0.01:
                small_classes.append(sizes[i] / max(sizes) * 100)
            else:
                small_classes.append(100)

    # # 创建饼图
    # plt.figure(figsize=(8, 8))  # 设置图形大小
    # plt.pie(sizes, labels=labels, autopct='%1.2f%%', startangle=140)

    # 选择一个颜色方案
    colors = sns.color_palette('pastel')[0:6]  # 使用 seaborn 的 pastel 颜色方案
    sorted_indices = np.argsort(sizes)[::-1]  # 降序排序
    sizes_sorted = np.array(sizes)[sorted_indices]
    labels_sorted = np.array(labels)[sorted_indices]
    sizes_mixed, labels_mixed = mix_small_large(sizes_sorted, labels_sorted)
    # 创建环形图
    plt.figure(figsize=(8, 10))  # 增大图形尺寸以容纳更大的文本
    wedges, texts, autotexts = plt.pie(sizes_mixed, labels=labels_mixed, autopct='%1.4f%%', startangle=140,
                                       wedgeprops=dict(width=0.4),
                                       textprops=dict(color="black", fontsize=16),  # 将标签文本颜色调整为黑色
                                       labeldistance=1.1,  # 增加标签距离
                                       pctdistance=0.85,  # 调整百分比文本的位置
                                       colors=colors)  # 使用自定义颜色方案

    # 调整文本样式
    # 调整百分比文本样式
    for autotext in autotexts:
        autotext.set_fontsize(16)  # 增大百分比文本大小
        autotext.set_color('black')
    # 自动调整布局
    plt.tight_layout()
    # 添加标题
    plt.title('原始数据分布', fontsize=20)
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64_1 = base64.b64encode(buf.getvalue()).decode('utf-8')


    # 标准化数据
    scaler = StandardScaler()
    X_train_scaler = scaler.fit_transform(X_train)
    X_test_scaler = scaler.transform(X_test)

    # 不使用SMOTE的情况
    # 训练模型
    model_no_smote = RandomForestClassifier(n_estimators=10, random_state=16)
    model_no_smote.fit(X_train_scaler, Y_encoded_tr.ravel())

    # 预测
    y_pred_no_smote = model_no_smote.predict(X_test_scaler)

    # 评估模型
    # print("Without SMOTE:")
    # print("Confusion Matrix:")
    cm_no_smote = confusion_matrix(Y_encoded_te.ravel(), y_pred_no_smote)
    # print(cm_no_smote)
    print("不均衡直接分类结果：")
    logging.info("不均衡直接分类结果：")
    jieguo_no = classification_report(Y_encoded_te.ravel(), y_pred_no_smote, output_dict=True)
    print(classification_report(Y_encoded_te.ravel(), y_pred_no_smote))
    logging.info(classification_report(Y_encoded_te.ravel(), y_pred_no_smote))

    # 使用SMOTE进行过采样
    smote = SMOTE(random_state=42, k_neighbors=5)
    X_train_resampled, y_train_resampled = smote.fit_resample(X_train_scaler, Y_encoded_tr.ravel())

    # 选择一个颜色方案
    colors = sns.color_palette('pastel')[0:6]  # 使用 seaborn 的 pastel 颜色方案

    # 创建环形图
    plt.figure(figsize=(8, 10))  # 增大图形尺寸以容纳更大的文本
    unique_elements, counts = np.unique(y_train_resampled, return_counts=True)
    wedges, texts, autotexts = plt.pie(counts, labels=labels, autopct='%1.4f%%', startangle=140,
                                       wedgeprops=dict(width=0.4),
                                       textprops=dict(color="black", fontsize=16),  # 将标签文本颜色调整为黑色
                                       labeldistance=1.1,  # 增加标签距离
                                       pctdistance=0.85,  # 调整百分比文本的位置
                                       colors=colors)  # 使用自定义颜色方案

    # 调整文本样式
    # 调整百分比文本样式
    for autotext in autotexts:
        autotext.set_fontsize(16)  # 增大百分比文本大小
        autotext.set_color('black')
    # 自动调整布局
    plt.tight_layout()
    # 添加标题
    plt.title('增强后数据分布', fontsize=20)
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64_2 = base64.b64encode(buf.getvalue()).decode('utf-8')

    # 训练模型
    model = RandomForestClassifier(n_estimators=10, random_state=16)
    model.fit(X_train_resampled, y_train_resampled.ravel())

    # 预测
    y_pred = model.predict(X_test_scaler)

    # 评估模型
    # print("With SMOTE:")
    # print("Confusion Matrix:")
    cm_with_smote = confusion_matrix(Y_encoded_te.ravel(), y_pred)
    # print(cm_with_smote)
    print("\n增强后分类结果：")
    logging.info("增强后分类结果：")
    jieguo = classification_report(Y_encoded_te.ravel(), y_pred, output_dict=True)
    print(classification_report(Y_encoded_te.ravel(), y_pred))
    logging.info(classification_report(Y_encoded_te.ravel(), y_pred))
    for i in range(len(labels)):
        if small_classes[i] != 100:
            logging.info(f'{labels[i]}: ')
            logging.info(f'precision: {jieguo_no[str(i)]["precision"]} --> {jieguo[str(i)]["precision"]}')
            logging.info(f'recall: {jieguo_no[str(i)]["recall"]} --> {jieguo[str(i)]["recall"]}')
            logging.info(f'f1-score: {jieguo_no[str(i)]["f1-score"]} --> {jieguo[str(i)]["f1-score"]}')
            print(f'{labels[i]}: ')
            print(f'precision: {jieguo_no[str(i)]["precision"]} --> {jieguo[str(i)]["precision"]}')
            print(f'recall: {jieguo_no[str(i)]["recall"]} --> {jieguo[str(i)]["recall"]}')
            print(f'f1-score: {jieguo_no[str(i)]["f1-score"]} --> {jieguo[str(i)]["f1-score"]}')
    logging.info(f'accuracy: {jieguo_no["accuracy"]} --> {jieguo["accuracy"]}')
    print(f'accuracy: {jieguo_no["accuracy"]} --> {jieguo["accuracy"]}')
    # 绘制没有使用SMOTE的混淆矩阵热力图
    image_base64_3 = plot_confusion_matrix(cm_no_smote, "未增强分类结果混淆矩阵", labels)

    # 绘制使用SMOTE的混淆矩阵热力图
    image_base64_4 = plot_confusion_matrix(cm_with_smote, "增强后分类结果混淆矩阵", labels)

    #plt.show()

    result_dict = {
        'pic_1': image_base64_1,    # 原始数据分布，例子Figure_1.png
        'pic_2': image_base64_2,    # 增强后数据分布， 例子Figure_1.png
        'pic_3': image_base64_3,     # 未增强分类结果混淆矩阵，例子Figure_2.png
        'pic_4': image_base64_4     # 增强后分类结果混淆矩阵，例子Figure_3.png
    }
    return result_dict


if __name__ == '__main__':
    parameters1 = {
        'path': 'D:/pythonProject/LSTM-GAN/dataset/',   # str，内部数据路径
        # 内部数据选择
        'L_neibu': 18,  # int，[15,40]，下拉选择或者输入
        'n_train': 2000,  # int，[1000, 5000]， 上下1000调节
        'small_classes': [0.4, 0, 0.7, 0, 0, 0],  # list[float]，每一个值要么为0要么 > 6/n_train，如果n_train=1000，至少要是0.6
        'n_test': 2000,  # int，[1000, 6000-n_train]，就是n_train和n_test加起来要小于等于6000，各自最小为1000

        # 外部数据选择
        'L_waibu': 6,   # int，输入框，大于0
        'waibu_train_path': r'D:\pythonProject\LSTM-GAN\dataset\外部数据_分类001.csv',    # str，训练数据
        'waibu_test_path': r'D:\pythonProject\LSTM-GAN\dataset\外部数据_测试002.csv',     # str，测试数据，训练和测试数据如果有一个为空则运行内部数据

    }
    result_dict = main(parameters1)