import numpy as np
import pandas as pd
import os
import logging
import tensorflow as tf
import re
import ast
from tensorflow.keras.callbacks import Callback
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.layers import Input, Dense,Activation,BatchNormalization,InputLayer,LayerNormalization,LeakyReLU
from tensorflow.keras.layers import Input, Dense,LSTM, Masking, Conv2D, MaxPooling2D, Flatten
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__ = False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__ = True

class CustomCallback(Callback):

    def on_epoch_end(self, epoch, logs=None):

        # # 检查logs是否为None
        # if logs is None:
        #     logs = {}  # 使用空字典作为默认值
        # 在每个epoch结束时输出训练和验证的损失及准确率
        if (__Cannel_TASK_WAIT_HANDLE__ == False):
            logging.info(f'{epoch}   {logs["loss"]:.8f}   {logs["accuracy"]:.8f}')
            print(f'{epoch}   {logs["loss"]:.8f}   {logs["val_accuracy"]:.8f} ')
        else:
            raise ValueError("用户已取消当前操作")

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result

def binary_df_to_mingmi(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list_ming = []
    all_list_mi = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            label = row['label']
            # 将二进制字符串转换为整数
            byte_str = binary_strings_to_integers(data)
            if len(byte_str) > L:
                byte_str = byte_str[:L]
            elif len(byte_str) < L:
                byte_str += [0] * (L - len(byte_str))  # 用0填充
            if label == 0:
                all_list_ming.append(byte_str)
            else:
                all_list_mi.append(byte_str)
        else:
            raise ValueError("用户已取消当前操作")
    print('数据加载成功')
    logging.info('数据加载成功')
    return np.array(all_list_ming), np.array(all_list_mi)

def binary_df_to_mingmi_2(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            # 将二进制字符串转换为整数
            byte_str = binary_strings_to_integers(data)
            if len(byte_str) > L:
                byte_str = byte_str[:L]
            elif len(byte_str) < L:
                byte_str += [0] * (L - len(byte_str))  # 用0填充

            all_list.append(byte_str)
        else:
            raise ValueError("用户已取消当前操作")
    print('外部数据加载成功')
    logging.info('外部数据加载成功')
    return np.array(all_list)

def cnn_data_process(pain, encr, Len):
    # Len为输入CNN的数据格式，Len*Len
    # path2 = path + 'whole_frame/'
    ltemp = str(Len ** 2)
    ltemp = int(ltemp)
    """""""""""制作训练数据"""""""""""
    X = np.vstack((pain, encr))
    Y = np.vstack((np.zeros([len(pain), 1], float), np.ones([len(encr), 1], float)))  # 训练集标签

    # 划分训练集和测试集，例如 80% 训练集，20% 测试集
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42, stratify=Y)

    """训练数据与测试数据尺寸转换"""""
    X_train = np.reshape(X_train, [-1, Len, Len, 1])
    X_test = np.reshape(X_test, [-1, Len, Len, 1])
    Y_train = np.reshape(Y_train, [-1, 1])
    Y_test = np.reshape(Y_test, [-1, 1])
    # Y_train = OneHotEncoder(categories='auto').fit_transform(Y_train).todense()  # one-hot编码
    # Y_test = OneHotEncoder(categories='auto').fit_transform(Y_test).todense()  # one-hot编码

    return X_train, Y_train, X_test, Y_test

def build_CNN(input_shape, output_shape):
    model = Sequential([
        Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=input_shape, padding='same'),
        MaxPooling2D(pool_size=(2, 2), padding='same'),
        Conv2D(24, kernel_size=(5, 5), activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2), padding='same'),
        Flatten(),
        Dense(24, activation='relu'),
        Dense(output_shape, activation='softmax')
    ])
    return model

def CNN_train(X_train, Y_train, X_test, Y_test, Len, epochs, cnn_model_name):
    # 构建模型
    model = build_CNN((Len, Len, 1), 2)

    # 设置批量大小
    batch_size = 64

    # 编译模型
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    with tf.device('/gpu:0'):
        # 在训练集上训练模型
        print('Epoch    Loss        Accuracy')
        logging.info('Epoch     Loss        Accuracy')
        # 创建一个自定义回调实例
        custom_callback = CustomCallback()
        # 训练模型
        history = model.fit(X_train, Y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(X_test, Y_test),
                            callbacks=custom_callback,
                            verbose=0)

    # 测试集上评估模型
    test_loss, test_acc = model.evaluate(X_test, Y_test, verbose=0)

    # 记录训练过程中的精度变化

    # for epoch, accuracy in enumerate(history.history['accuracy']):
    #     print(f'{epoch}     {accuracy:.4f}')
    #     logging.info(f'{epoch}     {accuracy:.4f}')

    # 记录测试集上的最终精度
    print(f'Test Accuracy: {test_acc:.3f}')
    logging.info(f'Test Accuracy: {test_acc:.3f}')

    # 保存模型
    model.save(cnn_model_name)
    print('训练结束，模型已保存。')
    logging.info('训练结束，模型已保存。')

    return test_acc

def main1(parameters):
    waibu_data_1 = parameters['waibu_data_1']
    Len = parameters['size_1']
    path0 = os.path.dirname(waibu_data_1)
    model_path = os.path.join(path0, '模型')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    model_name = f'cnn_{Len}-{Len}.h5'

    d_same = []
    for i in range(256):
        temp = [i] * Len * Len
        for i in range(10):
            d_same.append(temp)
    d_same = np.asfarray(d_same)
    data = pd.read_csv(waibu_data_1)
    pain_cnn, encr_cnn = binary_df_to_mingmi(data, Len * Len)
    pain_cnn = np.vstack((pain_cnn, d_same))
    X_train2, Y_train2, X_test2, Y_test2 = cnn_data_process(pain_cnn, encr_cnn, Len)

    test_acc = CNN_train(X_train2, Y_train2, X_test2, Y_test2, Len, 20, os.path.join(model_path, model_name))

    result_dict = {
        'val_acc': test_acc,    # 验证集准确度
    }

    return result_dict

def convert_list_str(lst_str):
    # 使用 ast.literal_eval 将字符串转换为列表
    return ast.literal_eval(lst_str)

def binary_df(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        data = row['data']
        # 将二进制字符串转换为整数
        byte_str = binary_strings_to_integers(data)
                # 截断或填充到长度 L
        if len(byte_str) > L:
            byte_str = byte_str[:L]
        elif len(byte_str) < L:
            byte_str += [0] * (L - len(byte_str))  # 用0填充
        all_list.append(byte_str)
    return np.array(all_list)

def process_ziduan_divide(df, L_mesg=40):
    data = binary_df(df, L_mesg)
    default_key = 'default_class'

    if 'name' not in df.columns:
        file_out = default_key  # 使用 'name_out' 列的第一个值或空字符串
        sel_data = {default_key: data}
    else:
        # 要作为键的列名
        keys = np.unique(df['name'])
        sel_data = {}
        file_out = ''
        for key in keys:
            if __Cannel_TASK_WAIT_HANDLE__ == False:
                file_out += str(df[df['name'] == key]['name_out'].drop_duplicates().values[0])
                value = data[df['name'] == key]
                sel_data[key] = value
            else:
                raise ValueError("用户已取消当前操作")
    return data, file_out, sel_data

def rpdcgn2(data_pad01, line_edit_text, file_out01, rpdc2_len, sel_data01):
    # 数据重组和加密概率计算
    import rpdc_data_reconstruct_and_prob_compute
    p = rpdc_data_reconstruct_and_prob_compute.data_reconstruct_and_prob_compute(
        data_pad01, line_edit_text, file_out01, rpdc2_len)
    p_derivative = rpdc_data_reconstruct_and_prob_compute.discrete_funtion_derivative3(p)

    # 保存原始数据以便后续使用
    se = sel_data01.copy()

    # 绘制 p 图
    plt.figure(figsize=(10, 6))
    plt.plot(p)
    plt.title('加密概率')
    plt.xlabel('字节偏移量')
    plt.ylabel('加密概率')
    plt.savefig('p_plot.png')
    plt.close()

    # 绘制 p_derivative 图
    plt.figure(figsize=(10, 6))
    plt.plot(p_derivative)
    plt.title('加密概率导数')
    plt.xlabel('字节偏移量')
    plt.ylabel('加密概率导数')
    plt.savefig('p_derivative_plot.png')
    plt.close()

    print("---------开始数据重组和加密概率计算------------")
    # print("计算结束，加密概率已保存到Excel文件。")
    logging.info("---------开始数据重组和加密概率计算------------")
    # logging.info("计算结束，加密概率已保存到Excel文件。")
    return p, p_derivative

def rpdcgn3(vari_max_input, p1):
    import rpdc_sus_field_generate
    # 如果提供了输入，则使用提供的输入作为最大变化值；否则，默认为0.1
    vari_max = float(vari_max_input)    # if vari_max_input != 0.1 else 0.1

    # 寻找可能的跳跃点
    en_section = rpdc_sus_field_generate.find_section(p1, vari_max)

    if not en_section:
        print("未提取出跳跃点。")
        logging.info("未提取出跳跃点。")
        return

    # 获取所有组合
    en_combination = rpdc_sus_field_generate.all_combination(en_section)

    if not en_combination:
        print("疑似加密字段为空。")
        logging.info("疑似加密字段为空。")
        return

    # 处理组合，移除空列表并调整格式
    adjusted_combinations = []
    for combination in en_combination:
        if len(combination) >= 2:
            adjusted_combinations.append([combination[0], combination[-1]])

    # 移除空列表
    adjusted_combinations = [item for item in adjusted_combinations if item]

    # 输出处理后的组合
    for comb in adjusted_combinations:
        print(comb)
        logging.info(comb)

    return en_combination, adjusted_combinations

def int_to_binary(n):
    return format(int(n), '08b')
# 定义一个函数将一行数据转换为二进制数组

def row_to_binary_array(row):
    binary_str = ''.join(int_to_binary(int(x)) for x in row)
    return np.array(list(binary_str), dtype=np.uint8)

def my_moments(data,n):
    #data为list,n为阶数,n>2，此处是计算的标准化变量后的高阶矩，即减去均值后除以方差标准化后。
    d = np.asarray(data)
    m = np.mean(d)
    s = np.std(d)
    if s==0.0:
        s = s+0.000001
    d = (d-m)/s
    moment = np.sum(d**n)/len(data)
    return moment

def classify_based_4_moments(all_combi,test):
    #横向标准化
    #此方法利用加密段与非密段的4种矩，即均值、方差、偏度和峰度与均匀分布之间的相似度进行匹配
    #0~255的均匀分布：均值为a+b/2=255/2,方差为((b-a+1)**2-1)/12=5461.25,开根号后是73.9，偏度为0，峰度为-6*(n**2+1)/(5*(n**2-1)),当n=256时，其值约等于-1.2
    class_result = []
    s_k = []
    for sample in test:
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            i = 0
            d_temp = []
            s_k_t = [] #存放每个样本每种加密段下的方差
            f_av = np.array([127.5, 5461.25, 0, -1.2]) #用四种矩组成特征向量，这是均匀分布的标准值
            f_av = (f_av-np.mean(f_av))/np.std(f_av) #标准化特征向量
            while i < len(all_combi):
                if len(all_combi[i]) == 0:    #说明是空的，不含加密段
                    un_session_test = sample
                    un_mean = np.mean(un_session_test)
                    un_var = np.var(un_session_test)
                    un_skew = pd.Series(un_session_test).skew()
                    # un_kurt = pd.Series(un_session_test).kurt()
                    un_kurt = my_moments(un_session_test, 4)
                    f_un = np.array([un_mean,un_var,un_skew,un_kurt]) #用四种矩组成特征向量
                    f_un = (f_un-np.mean(f_un))/np.std(f_un) #标准化特征向量
                    d1 = np.linalg.norm(f_un-f_av) #非加密段与均匀分布的欧氏距离
                    d = d1-0.025
                    s_k_t.append([un_mean,un_var,un_skew,un_kurt,-1,-1,-1,-1]) #保存
                else:
                    if all_combi[i][-1] < len(sample):   #加密的模式长度要小于等于数据长度(索引从0开始)
                        en_session_test = [sample[x] for x in all_combi[i]]   #将当前加密模式段对应的样本值单独放在一个列表里，并办的成Pd的Series
                        un_session_test = [sample[x] for x in range(len(sample)) if x not in all_combi[i]] #将当前非加密模式段对应的样本值单独放在一个列表里，并办的成Pd的Series
                        un_mean = np.mean(np.asfarray(un_session_test))
                        un_var = np.var(np.asfarray(un_session_test))
                        un_skew = pd.Series(un_session_test).skew()
                        # un_kurt = pd.Series(un_session_test).kurt()
                        un_kurt = my_moments(un_session_test,4)
                        f_un = np.array([un_mean,un_var,un_skew,un_kurt]) #用四种矩组成特征向量
                        f_un = (f_un-np.mean(f_un))/np.std(f_un) #标准化特征向量
                        d1 = np.linalg.norm(f_un-f_av) #非加密段与均匀分布的欧氏距离
                        en_mean = np.mean(np.asfarray(en_session_test))
                        en_var = np.var(np.asfarray(en_session_test))
                        en_skew = pd.Series(en_session_test).skew()
                        # en_kurt = pd.Series(en_session_test).kurt()
                        en_kurt = my_moments(en_session_test,4)
                        f_en = np.array([en_mean,en_var,en_skew,en_kurt]) #用四种矩组成特征向量
                        f_en = (f_en-np.mean(f_en))/np.std(f_en) #标准化特征向量
                        d2 = np.linalg.norm(f_en-f_av) #加密段与均匀分布的欧氏距离
                        d = d1-d2
                        s_k_t.append([un_mean,un_var,un_skew,un_kurt,en_mean,en_var,en_skew,en_kurt]) #保存
                    else:
                        d = -100  #加密段超出数据长度，说明不是同一加密格式，设置距离为非常大,因为后面是取最大值
                        s_k_t.append(['Skip'])
                d_temp.append(d)
                i = i + 1
                pass
            s_k.append(s_k_t) #保存峰度
            max_index = d_temp.index(max(d_temp))  #取得最小值索引，即距离最小的索引为其类别标签
            if len(all_combi[max_index]) != 0:
                class_result.append([all_combi[max_index][0],all_combi[max_index][-1]])
            else:
                class_result.append([-1,-1])
            pass
        else:
            raise ValueError("用户已取消当前操作")
    class_result = np.asfarray(class_result).reshape(-1,2)
    # results = np.hstack((class_result,test))
    return class_result, s_k

def encr_field_match(data, en_combination):

    # 处理每个数组的每一行
    processed_data = {}
    for key, array in data.items():
        processed_data[key] = np.array([row_to_binary_array(row) for row in array])

    result_ziduan = {}
    for item in data.keys():
        if (__Cannel_TASK_WAIT_HANDLE__ == False):

            result, s = classify_based_4_moments(en_combination, data[item])
            result_ziduan[item] = (result + 1) * 8 - 1
        else:
            raise ValueError("用户已取消当前操作")

    return processed_data, result_ziduan

def main2(parameters):

    waibu_data_2 = parameters['waibu_data_2']

    model_path = parameters['model_path']
    model_name = os.path.basename(model_path)
    pattern = r'cnn_\d+-(\d+)\.h5'
    match = re.search(pattern, model_name)
    if match:
        Len = int(match.group(1))
    else:
        raise ValueError("用户已取消当前操作")

    data = pd.read_csv(waibu_data_2)
    data_pad01, file_out01, sel_data01 = process_ziduan_divide(data)
    print('数据加载成功')
    logging.info('数据加载成功')

    p, p_derivative = rpdcgn2(data_pad01, model_path, file_out01, Len, sel_data01)
    p_der_mean = np.mean([arr[0] for arr in p_derivative])

    yuzhi = p_der_mean
    en, en_field = rpdcgn3(yuzhi, p)
    print('-------开始加密字段匹配-----------')
    logging.info('-------开始加密字段匹配-----------')
    processed_data, result_ziduan = encr_field_match(sel_data01, en)

    print('-------加密字段匹配结束-----------')
    logging.info('-------加密字段匹配结束-----------')

    result_dict = {
        # 界面3，mode2，图的纵轴数据，图例在文件夹里p_plot.png和p_derivative_plot.png，要网格线
        'p': [item[0] for item in p],
        'p_derivative': [arr[0] for arr in p_derivative],
        'p_der_mean': p_der_mean,

        'en_field': en_field,     # 疑似加密字段(字节）

        # 界面3，mode2
        'data': processed_data,
        'predict': result_ziduan
    }

    return result_dict

if __name__ == '__main__':
    parameters1 = {
        'waibu_data_1': r'D:\temp\xinda\mingmi\dataset\外部数据_字段划分001.csv',
        # 下拉选择
        'size_1': 8,  # int，CNN尺寸，可选8、10
    }
    result_dict_1 = main1(parameters1)

    parameters2 = {
        'waibu_data_2': r'D:\temp\xinda\mingmi\dataset\外部数据_字段划分002.csv',
        'model_path': r'D:\temp\xinda\mingmi\dataset\模型\cnn_8-8.h5'  # 完整模型地址
    }
    result_dict_2 = main2(parameters2)