import os
import pandas as pd
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
import tensorflow as tf
import time
from model import *
from draw_pic import *
from train_2 import train_2
from KDE import save_kernels, load_kernels
from tempo import *
import logging
import re
import math
from load_WAIBU_data import load_WAIBU_train, load_WAIBU_test
from KDE import process_scores_renew

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__ = False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__ = True

def test_2(model, intermediate_model, pca_if, x_test, kwn, reconstruct_threshold, test_batch, load_pca, kernels, centers, alpha=1, beta=1, dis_shifted=0, extra_test_input=None):
    label_mapping = {}
    for k, label in enumerate(kwn):
        label_mapping[str(k)] = label

    reconstruction_threshold = beta * reconstruct_threshold

    if extra_test_input is None:
        test_dataset = tf.data.Dataset.from_tensor_slices(x_test)  # 同样，extra_input_test是额外测试输入
        test_dataset = test_dataset.batch(test_batch)  # 直接设置为整个测试集的大小作为批大小

        reconstruct_mse_new = []
        predict_labels = []
        features = []
        reconstruct_img = []
        predicts = []
        for batch_data in test_dataset:
            if __Cannel_TASK_WAIT_HANDLE__ == False:
                x_test_np = batch_data.numpy()

                x_reconstructions, y_predicts = model.predict(batch_data, verbose=0)
                feature = intermediate_model.predict(batch_data, verbose=0)

                predicts.extend(y_predicts)
                y_predict_classes = tf.argmax(y_predicts, axis=1)

                x_losses = np.mean((x_test_np - x_reconstructions) ** 2, axis=(1, 2, 3))
                reconstruct_mse_new.extend(x_losses)

                # 特征提取
                features.extend(feature)
                reconstruct_img.extend(x_reconstructions)

                # 判断逻辑简化，减少条件分支内的计算
                y_final_preds = np.where(
                    (x_losses >= reconstruction_threshold),
                    -1,
                    [label_mapping[str(int(class_id))] for class_id in y_predict_classes])
                predict_labels.extend(y_final_preds)
            else:
                raise "用户已取消当前操作"

        features_test = np.vstack(features)

        X_pca = draw_load_pca(features_test, predict_labels, load_pca, centers)
        if pca_if == 1:
            adjust_scores, predict_labels_new = process_scores_renew(predict_labels, X_pca, predicts, kwn,
                                                                     kernels, centers, alpha, dis_shifted)
        else:
            adjust_scores, predict_labels_new = process_scores_renew(predict_labels, features_test, predicts, kwn,
                                                                     kernels, centers, alpha, dis_shifted)
        X_pca = draw_load_pca(features_test, predict_labels_new, load_pca, centers)

    return predict_labels, reconstruct_mse_new, X_pca


def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []

    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings = binary_strings + '0'

    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]

    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]

    result.extend(integers)

    return result

def binary_df_to_dict(df):
    # 创建一个字典来保存不同标签的数据
    label_dict = {}
    kwn = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']
            label = row['label']

            # 将二进制字符串转换为整数列表
            byte_str = binary_strings_to_integers(data)

            # 使用字典来存储对应标签的数据
            if label not in label_dict:
                label_dict[label] = []
                kwn.append(label)
        else:
            raise "用户已取消当前操作"
        label_dict[label].append(byte_str)
    n_train = len(df)
    return label_dict, n_train, kwn

def binary_df_to_dict_2(df):
    # 创建一个字典来保存不同标签的数据
    label_dict = []
    # 遍历每一行的数据
    for _, row in df.iterrows():
        if __Cannel_TASK_WAIT_HANDLE__ == False:
            data = row['data']

            # 将二进制字符串转换为整数列表
            byte_str = binary_strings_to_integers(data)

        else:
            raise "用户已取消当前操作"
        label_dict.append(byte_str)
    n_train = len(df)
    return label_dict, n_train

def main1(parameters):

    waibu_train_path = parameters['waibu_train_path']

    path0 = os.path.dirname(waibu_train_path)
    dir_path = os.path.join(path0, '过程文件')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    model_path = os.path.join(path0, '模型')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    display_steps = 50
    batch_size = 100
    validation_batch_size = batch_size
    pca_if = 1
    openmax_if = 0
    KDE_if = 1

    return_if = True
    X_pca = []
    centers = []
    classes = []

    train_data = pd.read_csv(waibu_train_path)
    label_dict_train, n_train, kwn = binary_df_to_dict(train_data)
    packet_train = load_WAIBU_train(train_data, kwn)
    print('训练数据加载完毕\n', '已知类：', kwn, '训练集大小：', n_train)
    logging.info(f'训练数据加载完毕')
    logging.info(f'训练集大小：{n_train}  已知类：{kwn}')

    epochs = parameters['epochs']
    initial_learning_rate = parameters['initial_learning_rate']
    validation_split = parameters['validation_split']

    feature_size = 16
    reconstruction_weight = 0.5
    classification_weight = 0.5
    size = 16
    decay_steps = 10000
    decay_rate = 0.96
    staircase = True
    first_iteration = True
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=initial_learning_rate,
        decay_steps=decay_steps,
        decay_rate=decay_rate,
        staircase=staircase
    )

    x_train = packet_train['img']
    y_train = packet_train['type']

    # 定义输入张量
    input_shape = (size, size, 1)  # 假设是任意大小的灰度图像
    input_tensor_1 = Input(shape=input_shape, name='input_image')
    extra_input = Input(shape=(1,), name='extra_feature')
    # prototypes = class_pic_train

    # 构建编码器，并直接使用返回的张量作为特征
    encoder_output = build_encoder(input_tensor_1, feature_size)
    features = encoder_output  # 如果需要明确地给张量命名，可以这么做，但这里直接使用即可
    # distance_to_prototypes = PrototypeDistanceLayer(prototypes, name='prototype_distance')(input_tensor_1)

    # # 分割编码器输出和额外输入用于分类器（如果需要调整，请根据实际情况修改）
    # # 确保额外输入与编码器输出在拼接前形状兼容（可能需要调整额外输入的维度）
    # concat_for_classifier = Concatenate(axis=-1)([encoder_output, extra_input])
    classifier_output = build_classifier(encoder_output, len(np.unique(y_train)))

    # 构建解码器，直接使用encoder的输出
    decoder_output = build_decoder(encoder_output)

    # 重建图像和分类输出
    features = encoder_output
    reconstructed_image = decoder_output
    classification_output = classifier_output

    # 定义最终模型，包含输入图像和额外输入，输出为重建图像和分类结果
    # model = Model(inputs=[input_tensor_1, extra_input], outputs=[reconstructed_image, classification_output])
    model = Model(inputs=input_tensor_1, outputs=[reconstructed_image, classification_output])
    feature_extraction_model = Model(inputs=input_tensor_1, outputs=features)

    # print(model.summary())

    # 找到解码器部分的最后两层，这里仅作示例，请根据实际模型结构调整
    decoder_output = model.get_layer('decoder_output_layer').output

    # 同样找到分类器部分的输出
    classifier_output = model.get_layer('classifier_output_layer').output

    input_layer = model.input
    encoder_output_layer = model.get_layer('encoder_output_layer').output  # 替换为实际的层名
    intermediate_model = Model(inputs=input_layer, outputs=encoder_output_layer)

    # 多输出模型的损失和优化器配置
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
                  loss={'decoder_output_layer': 'mse',
                        'classifier_output_layer': 'sparse_categorical_crossentropy'},
                  loss_weights={'decoder_output_layer': reconstruction_weight,
                                'classifier_output_layer': classification_weight},
                  metrics={'classifier_output_layer': ['accuracy']})
    model_name_part = '_'.join(kwn)  # 将列表中的元素用下划线连接
    model_name = 'model_{}.h5'.format(model_name_part)

    if KDE_if == 1:
        reconstruct_threshold, class_cdfs, centers, pca, kernels, X_pca, classes = train_2(model, intermediate_model, pca_if,
                                                                           x_train, y_train, validation_split,
                                                                           batch_size, validation_batch_size,
                                                                           epochs, display_steps, kwn,
                                                                           extra_input_train=None)
        save_kernels(kernels, os.path.join(dir_path, f'kernels_{kwn}.pkl'))

    if openmax_if == 1 or KDE_if == 1:
        np.save(os.path.join(dir_path, f'centers_{kwn}.npy'), centers)
        with open(os.path.join(dir_path, f'pca_model_{kwn}.pkl'), 'wb') as f:
            pickle.dump(pca, f)
        model.save(os.path.join(model_path, model_name))
        np.save(os.path.join(dir_path, f'threshold_{kwn}.npy'), reconstruct_threshold)


    result_dict = {
                    # 图2，散点图数据分布，训练和测试画
                    'X_pca': X_pca,  # ndarry(N, 2)float32，按照classes类别来区分颜色画二维散点图并画legend
                    'centers': centers,  # list[ndarray(2,)]，在X_pca上面加一个另外颜色的几个点
                    'classes': classes   # list[str]，散点的类别，一个类别一种颜色
                    }

    return result_dict

def main2(parameters):

    waibu_test_path = parameters['waibu_test_path']
    path0 = os.path.dirname(waibu_test_path)
    dir_path = os.path.join(path0, '过程文件')
    # 检查目录是否存在，如果不存在则创建
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    test_data = pd.read_csv(waibu_test_path)
    label_list_test, n_test = binary_df_to_dict_2(test_data)
    packet_test = load_WAIBU_test(test_data)
    print('数据加载完毕\n', '数据集大小：', n_test)
    logging.info(f'训练数据加载完毕')
    logging.info(f'数据集大小：{n_test}')

    load_model_path = parameters['model_name']
    load_model_name = os.path.basename(load_model_path)
    model_path = os.path.dirname(load_model_path)
    pattern = r'model_(.+)\.h5'
    match = re.search(pattern, load_model_name)
    if match:
        model_name_part = match.group(1)
        kwn = model_name_part.split('_')
    else:
        raise '模型选择错误'

    with open(os.path.join(dir_path, f'pca_model_{kwn}.pkl'), 'rb') as f:
        loaded_pca = pickle.load(f)
    reconstruct_threshold = np.load(os.path.join(dir_path, f'threshold_{kwn}.npy'))

    centers = np.load(os.path.join(dir_path, f'centers_{kwn}.npy'))
    # 加载KDE模型
    loaded_kernels = load_kernels(os.path.join(dir_path, f'kernels_{kwn}.pkl'))

    x_test = packet_test['img']
    model = tf.keras.models.load_model(os.path.join(model_path, load_model_name))
    # print(model.summary())
    pca_if = 1
    test_batch = 100
    # 获取模型的输入层
    input_layer = model.input
    # 获取你感兴趣的中间层
    encoder_output_layer = model.get_layer('encoder_output_layer').output  # 替换为实际的层名
    # 创建一个新的模型，其输入为原始模型的输入，输出为中间层的输出
    intermediate_model = Model(inputs=input_layer, outputs=encoder_output_layer)

    predict_labels, loss, X_pca = test_2(model, intermediate_model, pca_if, x_test, kwn,
                                             reconstruct_threshold, test_batch, loaded_pca, loaded_kernels, centers)
    output = (((pd.concat(
        [pd.DataFrame(test_data['data'], columns=['data']),
         pd.DataFrame(predict_labels, columns=['predict label']),], axis=1))))
    classes = predict_labels
    plt.close('all')


    result_dict = {
                    # 图2，散点图数据分布，训练和测试画
                    'X_pca': X_pca,  # ndarry(N, 2)float32，按照classes类别来区分颜色画二维散点图并画legend
                    'centers': centers,  # list[ndarray(2,)]，在X_pca上面加一个另外颜色的几个点
                    'classes': classes,   # list[str]，散点的类别，一个类别一种颜色

                    'output': output
                    }
    return result_dict

# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    parameters1 = {

        # 外部数据
        'waibu_train_path': r'D:\temp\xinda\TPOSR_port\dataset\外部数据_训练001.csv',     # str，外部数据选择

        'epochs': 30,  # int，训练轮次，(0, 100]，可以输入可以5轮五轮加
        'validation_split': 0.2,  # float，验证集占比，输入(0,1)
        'initial_learning_rate': 0.002,  # float，学习率，默认从0.001开始，可以0.001往上加

    }
    result_dict_1 = main1(parameters1)

    parameters2 = {
        # D:\temp\xinda\TPOSR_port\dataset\外部数据_训练001.csv
        'waibu_test_path': r'D:\temp\xinda\TPOSR_port\dataset\外部数据_测试001.csv',  #
        'model_name': r'D:\temp\xinda\TPOSR_port\dataset\模型\model_AIS5_NBNS_ICMP8.h5',  # str，完整路径，模型存在文件所在文件夹
    }
    result_dict_2 = main2(parameters2)
