import logging
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Dense, Reshape, Dropout, BatchNormalization
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_2x2, plot_images_2x2


base_path = "../files/autoencoder/transformer/test"


# 配置日志，默认打印到控制台，也可以设置打印到文件
setup_logging()
# setup_logging(log_file=base_path + "/trace.txt")
# 获取日志记录器并记录日志
logger = logging.getLogger("[beam-trace-autocoder]")



# ============================================= 自编码器相关 =======================================
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Reshape, Conv2D, UpSampling2D, LayerNormalization, \
    GlobalAveragePooling1D, Add
from tensorflow.keras.models import Model
from official.nlp.modeling.layers import Attention  # 导入 Performer 的实现


# 定义一个 Performer 块
def performer_block(inputs, num_heads, ff_dim, dropout_rate=0.1):
    """
    Performer 块，包含 Performer 自注意力和前馈网络。

    参数:
        inputs: 输入张量。
        num_heads: 多头注意力的头数。
        ff_dim: 前馈网络的维度。
        dropout_rate: Dropout 比率。

    返回:
        输出张量。
    """
    # Performer 自注意力机制
    attention_output = Attention(num_heads=num_heads, key_dim=inputs.shape[-1], use_performer=True)(inputs, inputs)
    attention_output = Add()([inputs, attention_output])  # 残差连接
    attention_output = LayerNormalization(epsilon=1e-6)(attention_output)  # 层归一化

    # 前馈网络
    ff_output = Dense(ff_dim, activation="relu")(attention_output)
    ff_output = Dense(inputs.shape[-1])(ff_output)
    ff_output = Add()([attention_output, ff_output])  # 残差连接
    ff_output = LayerNormalization(epsilon=1e-6)(ff_output)  # 层归一化

    return ff_output


# 构建基于 Performer 的自编码器
def build_transformer_autoencoder(input_shape=(32, 32, 1), patch_size=4, num_heads=2, ff_dim=128,
                                  num_transformer_blocks=2, encoding_dim=128):
    """
    构建基于 Performer 的自编码器模型。

    参数:
        input_shape: 输入图像的形状，默认为 (32, 32, 1)。
        patch_size: 每个 patch 的大小，默认为 4。
        num_heads: 多头注意力的头数，默认为 2。
        ff_dim: 前馈网络的维度，默认为 128。
        num_transformer_blocks: Transformer 块的数量，默认为 2。
        encoding_dim: 编码后的维度，默认为 128。

    返回:
        autoencoder: 完整的自编码器模型。
        encoder: 编码器模型。
        decoder: 解码器模型。
    """
    # 输入层
    input_layer = Input(shape=input_shape)

    # 将输入图像分割成 patch
    patches = tf.image.extract_patches(
        images=input_layer,
        sizes=[1, patch_size, patch_size, 1],  # patch 大小
        strides=[1, patch_size, patch_size, 1],  # patch 步长
        rates=[1, 1, 1, 1],  # 采样率
        padding='VALID'  # 不填充
    )
    patches = Reshape((-1, patch_size * patch_size * input_shape[-1]))(patches)  # 将 patch 展平

    # 线性映射到 encoding_dim
    patches = Dense(encoding_dim)(patches)

    # Performer 编码器
    for _ in range(num_transformer_blocks):
        patches = performer_block(patches, num_heads, ff_dim)

    # 全局平均池化
    encoded = GlobalAveragePooling1D()(patches)

    # 解码器
    x = Dense(patch_size * patch_size * input_shape[-1])(encoded)  # 将编码后的向量映射回 patch 大小
    x = Reshape((patch_size, patch_size, input_shape[-1]))(x)  # 恢复 patch 形状
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)  # 卷积层
    x = UpSampling2D((2, 2))(x)  # 上采样
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)  # 卷积层
    x = UpSampling2D((2, 2))(x)  # 上采样
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)  # 输出层

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


# ============================================= 主流程相关 =======================================
def get_traces_phaseBits(bit_num):
    """ 获取波束指向和对应码阵 """
    phi_start = 0
    phi_end = 1
    theta_start = 0
    theta_end = 90
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向, (theta, phi): (30,  phi) --> (35,  phi)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def evaluate_performance(originals, predictions, original_shape):
    """评估模型性能（修正形状处理）"""
    accuracies = []
    for orig, pred in zip(originals, predictions):
        # 确保比较的数组形状一致
        orig_flat = orig.flatten()
        pred_flat = pred.flatten() if pred.ndim > 1 else pred

        # 检查长度是否匹配
        if len(orig_flat) != len(pred_flat):
            pred_flat = pred.reshape(original_shape).flatten()

        acc = np.mean(orig_flat == pred_flat)
        accuracies.append(acc)

    return {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }


def save_results(base_dir, original_arrays, encoded_phaseBits, decoded_arrays, shape, bit_num):

    dir_compare = base_dir + "/compare/"

    total_correct = 0
    total_elements = 0
    accuracies = []

    for idx, (orig, enc, dec) in enumerate(zip(original_arrays, encoded_phaseBits, decoded_arrays)):
        # 计算准确率
        correct = np.sum(orig == dec)
        total_correct += correct
        total_elements += orig.size

        accuracy = correct / orig.size
        accuracies.append(accuracy)

        if idx < 3:  # 只显示前3个样本的详细比较
            logger.info(f"Sample {idx}: Accuracy = {accuracy:.2%}")
            logger.info(f"Original phaseBit {idx}:\n{orig}")
            logger.info(f"Decoded phaseBit {idx}:\n{dec}")
            logger.info("\n")

        if idx % 10 == 0:
            # 计算方向图, 指向, PSLL
            pattern_orig, point_orig, psll_orig = phaseBit_2_pattern_point(orig, bit_num)
            pattern_dec, point_dec, psll_dec = phaseBit_2_pattern_point(dec, bit_num)

            # 保存比较图片
            plot_images_2x2(orig, dec, pattern_orig, pattern_dec,
                            f"Accuracy = {accuracy:.2%}, "
                            f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}], "
                            f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]")
            # save_images_2x2(dir_compare + str(idx) + ".jpg",
            #                 orig, f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}]",
            #                 dec, f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]",
            #                 pattern_orig, "pattern(origin)", pattern_dec, "pattern(decoder)",
            #                 f"Accuracy = {accuracy:.2%}")

        # 保存.csv
        # np.savetxt(os.path.join(base_dir, f'original_{idx}.csv'), orig, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'encoded_{idx}.csv'), enc, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'decoded_{idx}.csv'), dec, delimiter=',')

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


def save_models(encoder, decoder, model_dir):
    os.makedirs(model_dir, exist_ok=True)
    encoder.save(os.path.join(model_dir, 'encoder.h5'))
    decoder.save(os.path.join(model_dir, 'decoder.h5'))


def load_models(model_dir='/files'):
    encoder = load_model(os.path.join(model_dir, 'encoder.h5'))
    decoder = load_model(os.path.join(model_dir, 'decoder.h5'))
    return encoder, decoder


def main(path_dir, bit_num):
    # 生成训练集数据
    traces, phaseRads, phaseBits = get_traces_phaseBits(bit_num)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(64, 64, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值

    # 将数据集分为训练集和测试集 (80%训练，20%测试)
    X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
    logger.info(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")

    # 构建卷积自编码器
    input_shape = (64, 64, 1)
    patch_size = 8
    encoding_dim = 256  # 编码维度，可根据需要调整
    autoencoder, encoder, decoder = build_transformer_autoencoder(input_shape, patch_size, encoding_dim)
    # 打印模型结构
    autoencoder.summary()
    encoder.summary()
    decoder.summary()

    logger.info(f"encoding_dim={encoding_dim}")

    # 编译自编码器 - 使用二元交叉熵损失
    autoencoder.compile(optimizer='adam', loss='mse')
    # autoencoder.compile(optimizer=Adam(learning_rate=0.001), loss=BinaryCrossentropy())

    # 训练自编码器 - 只需要在训练集上训练
    autoencoder.fit(X_train, X_train,
                    epochs=50,  # 增加训练轮数
                    batch_size=32,
                    shuffle=True,
                    validation_data=(X_test, X_test))  # 添加验证集

    # 在训练集和测试集上都进行预测
    datasets = {'train': X_train, 'test': X_test}
    all_accuracies = {}

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        # 编码和解码
        encoded_phaseBits = encoder.predict(dataset)
        decoded_probs = decoder.predict(encoded_phaseBits)  # 直接使用自编码器进行解码

        # 将概率转换为0/1二进制值
        decoded_phaseBits = (decoded_probs > 0.5).astype(np.int32)

        # 将解码结果还原为原始形状
        decoded_arrays = [arr.reshape(64, 64) for arr in decoded_phaseBits]
        original_arrays = [arr.reshape(64, 64) for arr in dataset]

        # 保存结果并获取统计信息
        stats = save_results(path_dir + f'/dataset_{name}',
                             original_arrays, encoded_phaseBits, decoded_arrays, (64, 64), bit_num)
        all_accuracies[name] = stats

    # # 保存统计信息
    # save_statistics({
    #     'train_max': all_accuracies['train']['max'],
    #     'train_min': all_accuracies['train']['min'],
    #     'train_mean': all_accuracies['train']['mean'],
    #     'train_std': all_accuracies['train']['std'],
    #     'test_max': all_accuracies['test']['max'],
    #     'test_min': all_accuracies['test']['min'],
    #     'test_mean': all_accuracies['test']['mean'],
    #     'test_std': all_accuracies['test']['std']
    # }, path_dir + "/result.txt")
    #
    # # 保存模型
    # save_models(encoder, decoder, path_dir)


if __name__=="__main__":
    main(base_path, bit_num = 1)