import tensorflow as tf
import numpy as np
from tensorflow.keras.losses import SparseCategoricalCrossentropy
import matplotlib.pyplot as plt
from draw_pic import draw_pca
from KDE import *
import logging

# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

__Cannel_TASK_WAIT_HANDLE__=False

def Channel():
    __Cannel_TASK_WAIT_HANDLE__=True

def train_2(model, intermediate_model, pca_if, x_train, y_train, validation_split, batch_size, validation_batch_size, epochs, display_steps,
        kwn, extra_input_train=None):

    best_val_loss = float('inf')  # 初始化最佳验证损失为正无穷大
    no_improvement_count = 0  # 记录连续没有改善的epoch数
    patience = 3

    # with tf.device('/gpu:0'):
    total_train_examples = len(x_train)  # 假定x_train和y_train长度相同，且与额外输入对应
    validation_samples = int(total_train_examples * validation_split)
    train_samples = total_train_examples - validation_samples

    train_data = list(zip(x_train, y_train))
    np.random.shuffle(train_data)
    x_train_shuffled, y_train_shuffled = zip(*train_data)
    x_train_shuffled = np.array(x_train_shuffled)
    y_train_shuffled = np.array(y_train_shuffled)
    # 划分训练集和验证集的数据
    x_val, x_train_new = x_train_shuffled[:validation_samples], x_train_shuffled[validation_samples:]
    y_val, y_train_new = y_train_shuffled[:validation_samples], y_train_shuffled[validation_samples:]

    with tf.device('/gpu:0'):
        # 重新创建训练集和验证集的tf.data.Dataset
        train_dataset = tf.data.Dataset.from_tensor_slices((x_train_new, y_train_new))
        train_dataset = train_dataset.shuffle(buffer_size=train_samples).batch(batch_size)

        validation_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
        validation_dataset = validation_dataset.batch(validation_batch_size)  # 设定验证集的批大小

        for epoch in range(epochs):

            if no_improvement_count < patience:

                reconstruct_mse = []
                for step, (batch_x, batch_y) in enumerate(train_dataset):

                    if __Cannel_TASK_WAIT_HANDLE__ == False:

                        loss_dict = model.train_on_batch({'input_image': batch_x}, {
                            'decoder_output_layer': batch_x,  # 确保模型输出层名称正确
                            'classifier_output_layer': batch_y  # 确保模型输出层名称正确
                        })
                        reconstruct_mse.append(loss_dict[1])
                        # 记录和显示训练进度（假设只有一个批次）
                        if step % display_steps == 0:
                            print(f"Epoch: {epoch + 1}, Step: {step}, Loss_reconstructor: {loss_dict[1]:.6f}, Loss_classifier: {loss_dict[2]:.6f}, Accuracy: {loss_dict[3]:.4f}")
                            logging.info(f"训练轮次: {epoch + 1}, 训练步数: {step}, 重构损失: {loss_dict[1]:.6f}, 分类损失: {loss_dict[2]:.6f}, 分类准确率: {loss_dict[3]:.4f}")
                    else:
                        raise "用户已取消当前操作"
                if epochs - epoch <= 3:

                    # 假设 model.predict 返回的是一个列表，其中第一个元素是解码器的输出，第二个是分类器的输出
                    predictions = model.predict(validation_dataset, verbose=0)

                    # 获取整个验证数据集的所有标签
                    all_val_y = np.concatenate([y for _, y in validation_dataset], axis=0)

                    # 获取整个验证数据集的所有输入
                    all_val_x = np.concatenate([x for x, _ in validation_dataset], axis=0)

                    # 获取当前批次的预测结果
                    batch_x_reconstructions = predictions[0]
                    batch_y_predicts = predictions[1]

                    # 计算总的重构损失 MSE
                    val_losses_re = tf.reduce_mean((all_val_x - batch_x_reconstructions) ** 2,
                                                   axis=[1, 2, 3]).numpy().tolist()

                    # 计算总的分类损失
                    sparse_ce = SparseCategoricalCrossentropy(from_logits=True)
                    total_cla_loss = sparse_ce(all_val_y, batch_y_predicts).numpy()/len(all_val_y)

                    # 收集预测类别
                    val_predict_classes = tf.argmax(batch_y_predicts, axis=1).numpy()


                    # 计算平均损失
                    avg_val_loss_re = np.mean(val_losses_re)
                    avg_val_loss_cla = total_cla_loss
                    print(f"Average Reconstruction Loss: {avg_val_loss_re}")
                    logging.info(f"验证集平均重建损失: {avg_val_loss_re}")

                    # 输出分类损失的平均值
                    print(f"Average Classification Loss: {avg_val_loss_cla}")
                    logging.info(f"验证集平均分类损失: {avg_val_loss_cla}")

                    # 更新最佳验证损失
                    best_val_loss = min(best_val_loss, avg_val_loss_re)
                    no_improvement_count = no_improvement_count + 1 if best_val_loss != avg_val_loss_re else 0

                    # 输出重建损失的最大值
                    max_val_loss_re = np.max(val_losses_re)

                    # 计算分类准确度
                    num_classes = len(np.unique(all_val_y))
                    class_accuracies = []

                    for class_idx in range(num_classes):
                        if __Cannel_TASK_WAIT_HANDLE__ == False:
                            class_mask = (all_val_y == class_idx)
                            class_correct = np.sum(val_predict_classes[class_mask] == class_idx)
                            class_total = np.sum(class_mask)
                            class_accuracy = class_correct / class_total if class_total > 0 else 0
                            class_accuracies.append(class_accuracy)
                            print(f"Class {kwn[int(class_idx)]} Accuracy: {class_accuracy:.4f}")
                            logging.info(f"类别: {kwn[int(class_idx)]} 验证准确率: {class_accuracy:.4f}")
                        else:
                            raise "用户已取消当前操作"

                    if epoch == epochs - 1:

                        feature = intermediate_model.predict(validation_dataset, verbose=0)
                        classes = [kwn[index] for index in val_predict_classes]
                        X_pca, pca = draw_pca(feature, classes)
                        if pca_if == 1:
                            centers, distances = compute_class_centers_and_distances(X_pca, val_predict_classes)
                        else:
                            centers, distances = compute_class_centers_and_distances(feature, val_predict_classes)
                        class_cdfs, parameters = compute_distance_cdfs(distances)
                        # # 定义x值范围
                        # x_values = np.linspace(0, max(max(dists) for dists in distances), 1000)
                        # # 创建一个figure
                        # plt.figure()
                        # for i, cdf in enumerate(class_cdfs):
                        #     ccdf = lambda x: 1 - cdf(x)
                        #     y_values = [ccdf(point) for point in x_values]
                        #
                        #     # 在同一张图上绘制所有类别的CCDF
                        #     plt.plot(x_values, y_values, label=f'Class {i}')
                        #
                        # # 添加图例
                        # plt.legend()
                        # # 设置图表标题和标签
                        # plt.title("CDF for All Classes")
                        # plt.xlabel('Distance')
                        # plt.ylabel('Complementary Cumulative Probability')
                        # # 其他图表设置
                        # plt.grid(True)
                        # plt.tick_params(axis='both', which='major', labelsize=12)

                        # 从每个类别中挑选一个原图和重建的图
                        plt.figure(figsize=(10, 5 * num_classes))

                        for class_idx in range(num_classes):
                            if __Cannel_TASK_WAIT_HANDLE__ == False:
                                class_mask = (all_val_y == class_idx)

                                # 从该类别中随机选择一个索引
                                sample_indices = np.where(class_mask)[0]
                                if len(sample_indices) > 0:
                                    sample_index = np.random.choice(sample_indices)

                                    # 绘制原始图像
                                    plt.subplot(num_classes, 2, class_idx * 2 + 1)
                                    plt.imshow(all_val_x[sample_index], cmap='gray')
                                    plt.title(f"Original Class {class_idx}")
                                    plt.axis('off')

                                    # 绘制重建图像
                                    plt.subplot(num_classes, 2, class_idx * 2 + 2)
                                    plt.imshow(batch_x_reconstructions[sample_index], cmap='gray')
                                    plt.title(f"Reconstructed Class {kwn[int(class_idx)]}")
                                    plt.axis('off')
                                    # 从该类别中随机选择九个索引
                                    sample_indices = np.where(class_mask)[0]
                                    if len(sample_indices) >= 9:
                                        selected_indices = np.random.choice(sample_indices, size=9, replace=False)
                                    else:
                                        print("Not enough samples in this class.")
                                        selected_indices = sample_indices

                                    # 创建一个新的图形
                                    plt.figure(figsize=(12, 12))
                                    # plt.title('')
                                    # 绘制原始图像
                                    for i, idx in enumerate(selected_indices):
                                        plt.subplot(3, 3, i + 1)
                                        plt.imshow(all_val_x[idx], cmap='gray')
                                        # plt.title(f"Original Class {class_idx} Sample {i + 1}")
                                        plt.axis('off')

                                    # 创建另一个新的图形
                                    plt.figure(figsize=(12, 12))

                                    # 绘制重建图像
                                    for i, idx in enumerate(selected_indices):
                                        plt.subplot(3, 3, i + 1)
                                        plt.imshow(batch_x_reconstructions[idx], cmap='gray')
                                        # plt.title(f"Reconstructed Class {class_idx} Sample {i + 1}")
                                        plt.axis('off')
                                    # plt.show()
                            else:
                                raise "用户已取消当前操作"

                        plt.tight_layout()

    return max_val_loss_re, class_cdfs, centers, pca, parameters, X_pca, classes

def train(model, intermediate_model, pca_if, x_train, y_train, validation_split, batch_size, validation_batch_size, epochs, display_steps,
        extra_input_train=None):

    best_val_loss = float('inf')  # 初始化最佳验证损失为正无穷大
    no_improvement_count = 0  # 记录连续没有改善的epoch数
    patience = 3

    total_train_examples = len(x_train)  # 假定x_train和y_train长度相同，且与额外输入对应
    validation_samples = int(total_train_examples * validation_split)
    train_samples = total_train_examples - validation_samples

    if extra_input_train is not None:
        # 划分训练集和验证集的数据
        x_val, x_train_new = x_train[:validation_samples], x_train[validation_samples:]
        y_val, y_train_new = y_train[:validation_samples], y_train[validation_samples:]
        extra_input_val, extra_input_train_new = extra_input_train[:validation_samples], extra_input_train[
                                                                                         validation_samples:]

        # 重新创建训练集和验证集的tf.data.Dataset
        train_dataset = tf.data.Dataset.from_tensor_slices(((x_train_new, extra_input_train_new), y_train_new))
        train_dataset = train_dataset.shuffle(buffer_size=train_samples).batch(batch_size)

        validation_dataset = tf.data.Dataset.from_tensor_slices(((x_val, extra_input_val), y_val))
        validation_dataset = validation_dataset.batch(validation_batch_size)  # 设定验证集的批大小

        for epoch in range(epochs):

            if no_improvement_count < patience:

                reconstruct_mse = []
                for step, (batch_x, batch_y) in enumerate(train_dataset):

                    loss_dict = model.train_on_batch({'input_image': batch_x}, {
                        'decoder_output_layer': batch_x,  # 确保模型输出层名称正确
                        'classifier_output_layer': batch_y  # 确保模型输出层名称正确
                    })
                    reconstruct_mse.append(loss_dict[1])
                    # 记录和显示训练进度（假设只有一个批次）
                    if step % display_steps == 0:
                        print("Epoch:", epoch + 1, "Step:", step, "Loss_reconstructor:", loss_dict[1],
                              "Loss_classifier:", loss_dict[2], "Accuracy:", loss_dict[3])

                if epochs - epoch <= 3:

                    # 假设 model.predict 返回的是一个列表，其中第一个元素是解码器的输出，第二个是分类器的输出
                    predictions = model.predict(validation_dataset, verbose=0)

                    # 获取整个验证数据集的所有标签
                    all_val_y = np.concatenate([y for _, y in validation_dataset], axis=0)

                    # 获取整个验证数据集的所有输入
                    all_val_x = np.concatenate([x for x, _ in validation_dataset], axis=0)

                    # 获取当前批次的预测结果
                    batch_x_reconstructions = predictions[0]
                    batch_y_predicts = predictions[1]

                    # 计算总的重构损失 MSE
                    val_losses_re = tf.reduce_mean((all_val_x - batch_x_reconstructions) ** 2,
                                                   axis=[1, 2, 3]).numpy().tolist()

                    # 计算总的分类损失
                    sparse_ce = SparseCategoricalCrossentropy(from_logits=True)
                    total_cla_loss = sparse_ce(all_val_y, batch_y_predicts).numpy()/len(all_val_y)

                    # 收集预测类别
                    val_predict_classes = tf.argmax(batch_y_predicts, axis=1).numpy()

                    # 计算平均损失
                    avg_val_loss_re = np.mean(val_losses_re)
                    avg_val_loss_cla = total_cla_loss
                    print(f"Average Reconstruction Loss: {avg_val_loss_re}")

                    # 输出分类损失的平均值
                    print(f"Average Classification Loss: {avg_val_loss_cla}")

                    # 更新最佳验证损失
                    best_val_loss = min(best_val_loss, avg_val_loss_re)
                    no_improvement_count = no_improvement_count + 1 if best_val_loss != avg_val_loss_re else 0

                    # 输出重建损失的最大值
                    max_val_loss_re = np.max(val_losses_re)

                    # 计算分类准确度
                    num_classes = len(np.unique(all_val_y))
                    class_accuracies = []

                    for class_idx in range(num_classes):
                        class_mask = (all_val_y == class_idx)
                        class_correct = np.sum(val_predict_classes[class_mask] == class_idx)
                        class_total = np.sum(class_mask)
                        class_accuracy = class_correct / class_total if class_total > 0 else 0
                        class_accuracies.append(class_accuracy)
                        print(f"Class {class_idx} Accuracy: {class_accuracy:.4f}")

                    # 从每个类别中挑选一个原图和重建的图
                    plt.figure(figsize=(6, 5 * num_classes))

                    for class_idx in range(num_classes):
                        class_mask = (all_val_y == class_idx)

                        # 从该类别中随机选择一个索引
                        sample_indices = np.where(class_mask)[0]
                        if len(sample_indices) > 0:
                            sample_index = np.random.choice(sample_indices)

                            # 绘制原始图像
                            plt.subplot(num_classes, 2, class_idx * 2 + 1)
                            plt.imshow(all_val_x[sample_index], cmap='gray')
                            plt.title(f"Original Class {class_idx}")
                            plt.axis('off')

                            # 绘制重建图像
                            plt.subplot(num_classes, 2, class_idx * 2 + 2)
                            plt.imshow(batch_x_reconstructions[sample_index], cmap='gray')
                            plt.title(f"Reconstructed Class {class_idx}")
                            plt.axis('off')

                    plt.tight_layout()

                    # 从该类别中随机选择九个索引
                    sample_indices = np.where(class_mask)[0]
                    if len(sample_indices) >= 9:
                        selected_indices = np.random.choice(sample_indices, size=9, replace=False)
                    else:
                        print("Not enough samples in this class.")
                        selected_indices = sample_indices

                    # 创建一个新的图形
                    plt.figure(figsize=(12, 12))

                    # 绘制原始图像
                    for i, idx in enumerate(selected_indices):
                        plt.subplot(3, 3, i + 1)
                        plt.imshow(all_val_x[idx], cmap='gray')
                        plt.title(f"Original Class {class_idx} Sample {i + 1}")
                        plt.axis('off')

                    # 创建另一个新的图形
                    plt.figure(figsize=(12, 12))

                    # 绘制重建图像
                    for i, idx in enumerate(selected_indices):
                        plt.subplot(3, 3, i + 1)
                        plt.imshow(batch_x_reconstructions[idx], cmap='gray')
                        plt.title(f"Reconstructed Class {class_idx} Sample {i + 1}")
                        plt.axis('off')
                    # plt.show()
    else:
        # 划分训练集和验证集的数据
        x_val, x_train_new = x_train[:validation_samples], x_train[validation_samples:]
        y_val, y_train_new = y_train[:validation_samples], y_train[validation_samples:]

        # 重新创建训练集和验证集的tf.data.Dataset
        train_dataset = tf.data.Dataset.from_tensor_slices((x_train_new, y_train_new))
        train_dataset = train_dataset.shuffle(buffer_size=train_samples).batch(batch_size)

        validation_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
        validation_dataset = validation_dataset.batch(validation_batch_size)  # 设定验证集的批大小

        for epoch in range(epochs):

            if no_improvement_count < patience:

                reconstruct_mse = []
                for step, (batch_x, batch_y) in enumerate(train_dataset):

                    loss_dict = model.train_on_batch({'input_image': batch_x}, {
                        'decoder_output_layer': batch_x,  # 确保模型输出层名称正确
                        'classifier_output_layer': batch_y  # 确保模型输出层名称正确
                    })
                    reconstruct_mse.append(loss_dict[1])
                    # 记录和显示训练进度（假设只有一个批次）
                    if step % display_steps == 0:
                        print("Epoch:", epoch + 1, "Step:", step, "Loss_reconstructor:", loss_dict[1],
                              "Loss_classifier:", loss_dict[2], "Accuracy:", loss_dict[3])

                if epochs - epoch <= 3:

                    # 假设 model.predict 返回的是一个列表，其中第一个元素是解码器的输出，第二个是分类器的输出
                    predictions = model.predict(validation_dataset, verbose=0)

                    # 获取整个验证数据集的所有标签
                    all_val_y = np.concatenate([y for _, y in validation_dataset], axis=0)

                    # 获取整个验证数据集的所有输入
                    all_val_x = np.concatenate([x for x, _ in validation_dataset], axis=0)

                    # 获取当前批次的预测结果
                    batch_x_reconstructions = predictions[0]
                    batch_y_predicts = predictions[1]

                    # 计算总的重构损失 MSE
                    val_losses_re = tf.reduce_mean((all_val_x - batch_x_reconstructions) ** 2,
                                                   axis=[1, 2, 3]).numpy().tolist()

                    # 计算总的分类损失
                    sparse_ce = SparseCategoricalCrossentropy(from_logits=True)
                    total_cla_loss = sparse_ce(all_val_y, batch_y_predicts).numpy()/len(all_val_y)

                    # 收集预测类别
                    val_predict_classes = tf.argmax(batch_y_predicts, axis=1).numpy()

                    # 计算平均损失
                    avg_val_loss_re = np.mean(val_losses_re)
                    avg_val_loss_cla = total_cla_loss
                    print(f"Average Reconstruction Loss: {avg_val_loss_re}")

                    # 输出分类损失的平均值
                    print(f"Average Classification Loss: {avg_val_loss_cla}")

                    # 更新最佳验证损失
                    best_val_loss = min(best_val_loss, avg_val_loss_re)
                    no_improvement_count = no_improvement_count + 1 if best_val_loss != avg_val_loss_re else 0

                    # 输出重建损失的最大值
                    max_val_loss_re = np.max(val_losses_re)

                    # 计算分类准确度
                    num_classes = len(np.unique(all_val_y))
                    class_accuracies = []

                    for class_idx in range(num_classes):
                        class_mask = (all_val_y == class_idx)
                        class_correct = np.sum(val_predict_classes[class_mask] == class_idx)
                        class_total = np.sum(class_mask)
                        class_accuracy = class_correct / class_total if class_total > 0 else 0
                        class_accuracies.append(class_accuracy)
                        print(f"Class {class_idx} Accuracy: {class_accuracy:.4f}")

                    if epoch == epochs - 1:

                        feature = intermediate_model.predict(validation_dataset, verbose=0)
                        X_pca, pca = draw_pca(feature, val_predict_classes)
                        if pca_if == 1:
                            centers, distances = compute_class_centers_and_distances(X_pca,val_predict_classes)
                        else:
                            centers, distances = compute_class_centers_and_distances(feature, val_predict_classes)
                        class_cdfs, kernels = compute_distance_cdfs(distances)
                        # 定义x值范围
                        x_values = np.linspace(0, max(max(dists) for dists in distances), 1000)
                        # 创建一个figure
                        plt.figure()
                        for i, cdf in enumerate(class_cdfs):
                            ccdf = lambda x: 1 - cdf(x)
                            y_values = np.array([ccdf(point) for point in x_values])

                            # 在同一张图上绘制所有类别的CCDF
                            plt.plot(x_values, y_values, label=f'Class {i}')

                        # 添加图例
                        plt.legend()
                        # 设置图表标题和标签
                        plt.title("CDF for All Classes")
                        plt.xlabel('Distance')
                        plt.ylabel('Complementary Cumulative Probability')
                        # 其他图表设置
                        plt.grid(True)
                        plt.tick_params(axis='both', which='major', labelsize=12)

                        # 从每个类别中挑选一个原图和重建的图
                        plt.figure(figsize=(10, 5 * num_classes))

                        for class_idx in range(num_classes):
                            class_mask = (all_val_y == class_idx)

                            # 从该类别中随机选择一个索引
                            sample_indices = np.where(class_mask)[0]
                            if len(sample_indices) > 0:
                                sample_index = np.random.choice(sample_indices)

                                # 绘制原始图像
                                plt.subplot(num_classes, 2, class_idx * 2 + 1)
                                plt.imshow(all_val_x[sample_index], cmap='gray')
                                plt.title(f"Original Class {class_idx}")
                                plt.axis('off')

                                # 绘制重建图像
                                plt.subplot(num_classes, 2, class_idx * 2 + 2)
                                plt.imshow(batch_x_reconstructions[sample_index], cmap='gray')
                                plt.title(f"Reconstructed Class {class_idx}")
                                plt.axis('off')

                        plt.tight_layout()

                        # 从该类别中随机选择九个索引
                        sample_indices = np.where(class_mask)[0]
                        if len(sample_indices) >= 9:
                            selected_indices = np.random.choice(sample_indices, size=9, replace=False)
                        else:
                            print("Not enough samples in this class.")
                            selected_indices = sample_indices

                        # 创建一个新的图形
                        plt.figure(figsize=(12, 12))

                        # 绘制原始图像
                        for i, idx in enumerate(selected_indices):
                            plt.subplot(3, 3, i + 1)
                            plt.imshow(all_val_x[idx], cmap='gray')
                            plt.title(f"Original Class {class_idx} Sample {i + 1}")
                            plt.axis('off')

                        # 创建另一个新的图形
                        plt.figure(figsize=(12, 12))

                        # 绘制重建图像
                        for i, idx in enumerate(selected_indices):
                            plt.subplot(3, 3, i + 1)
                            plt.imshow(batch_x_reconstructions[idx], cmap='gray')
                            plt.title(f"Reconstructed Class {class_idx} Sample {i + 1}")
                            plt.axis('off')

    return max_val_loss_re, class_cdfs, centers, pca, kernels