import os
import pandas as pd
import tensorflow as tf
import logging
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, accuracy_score
from tensorflow.keras.callbacks import Callback

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__ = False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__ = True

class CustomCallback(Callback):

    def on_epoch_end(self, epoch, logs=None):

        # # 检查logs是否为None
        # if logs is None:
        #     logs = {}  # 使用空字典作为默认值
        # 在每个epoch结束时输出训练和验证的损失及准确率
        if (__Cannel_TASK_WAIT_HANDLE__ == False):
            logging.info(f'{epoch}   {logs["loss"]:.8f}   {logs["accuracy"]:.8f}')
            print(f'{epoch}   {logs["loss"]:.8f}   {logs["val_accuracy"]:.8f} ')
        else:
            raise ValueError("用户已取消当前操作")
def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result

def binary_df_to_mingmi(df, L):
    # 创建两个列表来保存明文和密文数据
    all_list_ming = []
    all_list_mi = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            label = row['label']
            # 将二进制字符串转换为整数
            byte_str = binary_strings_to_integers(data)
            if len(byte_str) > L:
                byte_str = byte_str[:L]
            elif len(byte_str) < L:
                byte_str += [0] * (L - len(byte_str))  # 用0填充
            if label == 0:
                all_list_ming.append(byte_str)
            else:
                all_list_mi.append(byte_str)
        else:
            raise ValueError("用户已取消当前操作")
    print('数据加载成功')
    logging.info('数据加载成功')
    return np.array(all_list_ming), np.array(all_list_mi)

def lstm_process_source_data(data_pain, encr_data, ratio=0.9, L=10):
    '''
    L:数据截断或填充后的长度，程序默认为10
    ratio：训练数据占比，程序默认是0.9
    '''
    # 分割 pain 数据
    X_train_pain, X_test_pain, y_train_pain, y_test_pain = train_test_split(
        data_pain, np.zeros(len(data_pain)), test_size=1 - ratio, random_state=42)

    # 分割 encrypted 数据
    X_train_encr, X_test_encr, y_train_encr, y_test_encr = train_test_split(
        encr_data, np.ones(len(encr_data)), test_size=1 - ratio, random_state=16)

    # 合并训练数据
    X_train = np.vstack((X_train_pain, X_train_encr))
    Y_train = np.vstack((y_train_pain[:, np.newaxis], y_train_encr[:, np.newaxis]))

    # 合并测试数据
    X_test = np.vstack((X_test_pain, X_test_encr))
    Y_test = np.vstack((y_test_pain[:, np.newaxis], y_test_encr[:, np.newaxis]))

    # 归一化
    X_train = X_train / 255
    X_test = X_test / 255

    X_train = np.reshape(X_train, [-1, L, 1])
    X_test = np.reshape(X_test, [-1, L, 1])
    Y_train = np.reshape(Y_train, [-1, 1])
    Y_test = np.reshape(Y_test, [-1, 1])

    Y_train = OneHotEncoder(categories='auto').fit_transform(Y_train).todense()  # one-hot编码
    Y_test1 = OneHotEncoder(categories='auto').fit_transform(Y_test).todense()  # one-hot编码
    return X_train, Y_train, X_test, Y_test, Y_test1

def lstm(model, epochs, batch_size, X_train, Y_train, X_test, Y_test, Y_test1, L):
    data_L = L  # 从界面读入
    with tf.device('/gpu:0'):
        # 在训练集上训练模型
        print('Epoch    Loss        Accuracy')
        logging.info('Epoch     Loss        Accuracy')
        # 创建一个自定义回调实例
        custom_callback = CustomCallback()

        # 训练模型
        history = model.fit(X_train, Y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(X_test, Y_test1),
                            callbacks=[custom_callback],  # 添加自定义回调
                            verbose=0)  # 设置verbose为0以避免默认输出

    # 测试集上评估模型
    test_loss, test_acc = model.evaluate(X_test, Y_test1, verbose=0)
    #
    # # 记录训练过程中的精度变化
    # print('Epoch     Accuracy')
    # logging.info('Epoch     Accuracy')
    # for epoch, accuracy in enumerate(history.history['accuracy']):
    #     print(f'{epoch}     {accuracy:.4f}')
    #     logging.info(f'{epoch}     {accuracy:.4f}')

    # 记录测试集上的最终精度
    print(f'Test Accuracy: {test_acc:.3f}')
    logging.info(f'Test Accuracy: {test_acc:.3f}')

    # 进行预测
    predictions = np.argmax(model.predict(X_test), axis=-1)
    true_labels = np.argmax(Y_test1, axis=1)

    # MODIFY: 确保 predictions 和 true_labels 为 NumPy 数组
    predictions = np.asarray(predictions)
    true_labels = np.asarray(true_labels)
    # 计算混淆矩阵
    tn, fp, fn, tp = confusion_matrix(true_labels, predictions).ravel()

    # 计算精确率、召回率和 F1 分数
    epsilon = 1e-9  # 一个小到可以忽略不计的正数，用于避免除以零的情况

    precision_sklearn = precision_score(true_labels, predictions)
    recall_sklearn = recall_score(true_labels, predictions)
    f1_sklearn = f1_score(true_labels, predictions)

    # 打印结果
    print('训练样本数：%s' % len(X_train))
    print('测试样本数：%s' % len(true_labels))
    print('TP:%s' % tp)
    print('FP:%s' % fp)
    print('FN:%s' % fn)
    print('TN:%s' % tn)
    print('Recall:%s' % recall_sklearn)
    print('Precision:%s' % precision_sklearn)
    print("测试集F1值：%s" % f1_sklearn)
    print('训练结束！')

    logging.info('训练样本数：%s' % len(X_train))
    logging.info('测试样本数：%s' % len(true_labels))
    logging.info('TP:%s' % tp)
    logging.info('FP:%s' % fp)
    logging.info('FN:%s' % fn)
    logging.info('TN:%s' % tn)
    logging.info('Recall:%s' % recall_sklearn)
    logging.info('Precision:%s' % precision_sklearn)
    logging.info("测试集F1值：%s" % f1_sklearn)
    logging.info('训练结束！')

    return test_acc, recall_sklearn, precision_sklearn, f1_sklearn, tp, fp, fn, tn

def main(parameters):
    waibu_data_1 = parameters['waibu_data_1']
    jieduan = parameters['jieduan']
    path0 = os.path.dirname(waibu_data_1)
    model_path = os.path.join(path0, '模型')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    rati = parameters['rati']

    data = pd.read_csv(waibu_data_1)
    pain, encr = binary_df_to_mingmi(data, jieduan)

    X_train0, Y_train0, X_test0, Y_test0, Y_test10 = lstm_process_source_data(pain, encr, ratio=rati,
                                                                                  L=jieduan)  # Y_train0,Y_test10为独热编码

    # 参数定义
    lr = parameters['learning_rate']
    batch_size = 512
    epoches = parameters['epoches']
    n_inputs = 1
    n_steps = jieduan
    n_hidden_units = 15
    n_classes = 2

    # 定义LSTM模型
    model = tf.keras.models.Sequential([
        tf.keras.layers.Dense(units=n_hidden_units, input_shape=(n_steps, n_inputs)),
        tf.keras.layers.Dense(units=n_hidden_units),
        tf.keras.layers.LSTM(n_hidden_units * 2, return_sequences=False),
        # tf.keras.layers.Dense(units=n_hidden_units),
        # tf.keras.layers.Flatten(),  # 添加这行来展平 LSTM 输出，如果 LSTM 设置了 return_sequences=True
        tf.keras.layers.Dense(n_classes, activation='softmax')
    ])

    model.compile(optimizer=tf.keras.optimizers.Adam(lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    lstm_len = jieduan
    accuracy, recall_sklearn, precision_sklearn, f1_sklearn, tp, fp, fn, tn = lstm(model=model, epochs=epoches,
                                                                   batch_size=batch_size, X_train=X_train0,
                                                                   Y_train=Y_train0,
                                                                   X_test=X_test0, Y_test=Y_test0, Y_test1=Y_test10,
                                                                   L=lstm_len)

    model_name = f'model_{jieduan}B.h5'
    model.save(os.path.join(model_path, model_name))

    resuly_dict = {
        'accuracy': accuracy,
        'recall': recall_sklearn,
        'precision': precision_sklearn,
        'f1': f1_sklearn,
        'tp': tp,
        'fp': fp,
        'fn': fn,
        'tn': tn
    }
    return resuly_dict

if __name__ == '__main__':
    parameters1 = {
        'waibu_data_1': r'D:\temp\xinda\mingmi\dataset\外部数据_分类002.csv',
        'jieduan': 20,  # int，数据截断长度，(0:1:40]可输入可上下箭头调节

        'rati': 0.9,  # float，训练集验证集比例，(0.0:0.1:1.0)可输入可上下箭头调节，mode=1
        'learning_rate': 0.003,  # float，学习率，[0.001:0.001:0.005]，mode=1
        'epoches': 80  # int，训练轮次，>0，mode=1，可以做成默认显示已经填入100但是可以修改的输入框
    }
    result_dict = main(parameters1)



