import argparse
import numpy as np
import os
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Dense, Reshape, Dropout, BatchNormalization
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split

from util.util_log import setup_logging

from multi_beam_trace.util_create_trace import create_scan_traces_theta, \
    create_scan_traces_phi, create_scan_traces_theta_phi

from beam_trace.util_phase_pattern import traces_2_phaseRads, phases_rad_2_bit, phaseBit_2_pattern_point
from beam_trace.util_plot import save_images_2x2


# ============================================= 注意力相关 =======================================
from tensorflow.keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply

def channel_attention(input_feature, ratio=8):
    """通道注意力机制"""
    channel_axis = -1
    channel = input_feature.shape[channel_axis]

    # Squeeze: 全局平均池化
    x = GlobalAveragePooling2D()(input_feature)
    x = Reshape((1, 1, channel))(x)

    # Excitation: 全连接层
    x = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True)(x)
    x = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True)(x)

    # 加权
    x = multiply([input_feature, x])
    return x

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, concatenate, Activation

def spatial_attention(input_feature):
    """空间注意力机制"""
    kernel_size = 7
    avg_pool = tf.reduce_mean(input_feature, axis=-1, keepdims=True)
    max_pool = tf.reduce_max(input_feature, axis=-1, keepdims=True)
    concat = concatenate([avg_pool, max_pool], axis=-1)
    x = Conv2D(1, (kernel_size, kernel_size), padding='same', activation='sigmoid')(concat)
    x = multiply([input_feature, x])
    return x


# ============================================= 自编码器相关 =======================================
def build_conv2d_se_autoencoder(input_shape=(64, 64, 1), encoding_dim=256):
    """构建带通道注意力的卷积自编码器模型"""
    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = channel_attention(x)  # 添加通道注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Flatten()(x)
    encoded = Dense(encoding_dim, activation='relu')(x)

    # 解码器
    x = Dense(8 * 8 * 128, activation='relu')(encoded)
    x = Reshape((8, 8, 128))(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = UpSampling2D((2, 2))(x)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


def build_conv2d_cbam_autoencoder(input_shape=(64, 64, 1), encoding_dim=256):
    """构建带空间注意力的卷积自编码器模型"""
    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Flatten()(x)
    encoded = Dense(encoding_dim, activation='relu')(x)

    # 解码器
    x = Dense(8 * 8 * 128, activation='relu')(encoded)
    x = Reshape((8, 8, 128))(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


def build_conv2d_se_cbam_autoencoder(input_shape=(64, 64, 1), encoding_dim=256):
    """构建带通道和空间注意力的卷积自编码器模型"""
    # 输入层
    input_layer = Input(shape=input_shape)

    # 编码器
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = MaxPooling2D((2, 2), padding='same')(x)

    x = Flatten()(x)
    encoded = Dense(encoding_dim, activation='relu')(x)

    # 解码器
    x = Dense(8 * 8 * 128, activation='relu')(encoded)
    x = Reshape((8, 8, 128))(x)

    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = channel_attention(x)  # 添加通道注意力
    x = spatial_attention(x)  # 添加空间注意力
    x = UpSampling2D((2, 2))(x)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    # 构建自编码器模型
    autoencoder = Model(input_layer, decoded)

    # 编码模型
    encoder = Model(input_layer, encoded)

    # 解码模型
    decoder = Model(encoded, decoded)

    return autoencoder, encoder, decoder


# ============================================= 主流程相关 =======================================
def get_traces_phaseBits(bit_num, theta_start, theta_end, phi_start, phi_end):
    """ 获取波束指向和对应码阵 """
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")
    traces = []
    phaseRads = []
    phaseBits = []
    for phi in range(phi_start, phi_end, 1):
        # 生成轨迹指向
        traces_idx = create_scan_traces_theta(theta_start, theta_end, phi)  # theta方向, (theta, phi): (30,  phi) --> (35,  phi)
        # 生成码阵
        phaseRads_idx = traces_2_phaseRads(traces_idx)
        # 码阵bit化
        phaseBits_idx = phases_rad_2_bit(phaseRads_idx, bit_num)
        # 记录
        traces.extend(traces_idx)
        phaseRads.extend(phaseRads_idx)
        phaseBits.extend(phaseBits_idx)
    logger.info(f"len of traces: {len(traces)}")
    return traces, phaseRads, phaseBits


def evaluate_performance(originals, predictions, original_shape):
    """评估模型性能（修正形状处理）"""
    accuracies = []
    for orig, pred in zip(originals, predictions):
        # 确保比较的数组形状一致
        orig_flat = orig.flatten()
        pred_flat = pred.flatten() if pred.ndim > 1 else pred

        # 检查长度是否匹配
        if len(orig_flat) != len(pred_flat):
            pred_flat = pred.reshape(original_shape).flatten()

        acc = np.mean(orig_flat == pred_flat)
        accuracies.append(acc)

    return {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }


def save_results(base_dir, original_arrays, encoded_phaseBits, decoded_arrays, shape, bit_num):
    # base_dir = f'/files/data_set_{dataset_name}'
    # os.makedirs(base_dir, exist_ok=True)

    dir_compare = base_dir + "/compare/"

    total_correct = 0
    total_elements = 0
    accuracies = []

    for idx, (orig, enc, dec) in enumerate(zip(original_arrays, encoded_phaseBits, decoded_arrays)):
        # 计算准确率
        correct = np.sum(orig == dec)
        total_correct += correct
        total_elements += orig.size

        accuracy = correct / orig.size
        accuracies.append(accuracy)

        if idx < 3:  # 只显示前3个样本的详细比较
            logger.info(f"Sample {idx}: Accuracy = {accuracy:.2%}")
            logger.info(f"Original phaseBit {idx}:\n{orig}")
            logger.info(f"Decoded phaseBit {idx}:\n{dec}")
            logger.info("\n")

        if idx % 10 == 0:
            # 计算方向图, 指向, PSLL
            pattern_orig, point_orig, psll_orig = phaseBit_2_pattern_point(orig, bit_num)
            pattern_dec, point_dec, psll_dec = phaseBit_2_pattern_point(dec, bit_num)

            # 保存比较图片
            # plot_images_2x2(orig, dec, pattern_orig, pattern_dec,
            #                 f"Accuracy = {accuracy:.2%}, "
            #                 f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}], "
            #                 f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]")
            save_images_2x2(dir_compare + str(idx) + ".jpg",
                            orig, f"phaseBit(origin):[{point_orig[1]}]-[{psll_orig[0]:.2f},{psll_orig[1]}]",
                            dec, f"phaseBit(decoder):[{point_dec[1]}]-[{psll_dec[0]:.2f},{psll_dec[1]}]",
                            pattern_orig, "pattern(origin)", pattern_dec, "pattern(decoder)",
                            f"Accuracy = {accuracy:.2%}")

        # 保存.csv
        # np.savetxt(os.path.join(base_dir, f'original_{idx}.csv'), orig, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'encoded_{idx}.csv'), enc, delimiter=',')
        # np.savetxt(os.path.join(base_dir, f'decoded_{idx}.csv'), dec, delimiter=',')

    # 计算当前数据集的统计信息
    overall_accuracy = total_correct / total_elements
    logger.info(f"\n总体准确率: {overall_accuracy:.2%}")
    logger.info(f"准确率统计:")

    stats = {
        'max': np.max(accuracies),
        'min': np.min(accuracies),
        'mean': np.mean(accuracies),
        'std': np.std(accuracies)
    }

    for metric, value in stats.items():
        logger.info(f"{metric.capitalize()}: {value:.4f}")

    return stats


def save_statistics(statistics, file_path):
    with open(file_path, 'w') as f:
        for key, value in statistics.items():
            f.write(f'{key.capitalize()}: {value}\n')


def save_models(encoder, decoder, model_dir):
    os.makedirs(model_dir, exist_ok=True)
    encoder.save(os.path.join(model_dir, 'encoder.h5'))
    decoder.save(os.path.join(model_dir, 'decoder.h5'))


def load_models(model_dir='/files'):
    encoder = load_model(os.path.join(model_dir, 'encoder.h5'))
    decoder = load_model(os.path.join(model_dir, 'decoder.h5'))
    return encoder, decoder


def main(path_dir, bit_num, theta_start, theta_end, phi_start, phi_end):
    # 生成训练集数据
    traces, phaseRads, phaseBits = get_traces_phaseBits(bit_num, theta_start, theta_end, phi_start, phi_end)

    # 将phaseBits转换为适合卷积自编码器的格式
    X = np.array([arr.reshape(64, 64, 1) for arr in phaseBits])  # 保持二维结构，并增加通道维度
    X = X.astype('float32')  # 保持0/1值

    # 将数据集分为训练集和测试集 (80%训练，20%测试)
    X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
    logger.info(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")

    # 构建卷积自编码器
    encoding_dim = 256  # 编码维度，可根据需要调整
    # autoencoder, encoder, decoder = build_conv2d_se_autoencoder(encoding_dim=encoding_dim)
    # autoencoder, encoder, decoder = build_conv2d_cbam_autoencoder(encoding_dim=encoding_dim)
    autoencoder, encoder, decoder = build_conv2d_se_cbam_autoencoder(encoding_dim=encoding_dim)
    # 打印模型结构
    autoencoder.summary()
    encoder.summary()
    decoder.summary()

    logger.info(f"encoding_dim={encoding_dim}")

    # 编译自编码器 - 使用二元交叉熵损失
    autoencoder.compile(optimizer=Adam(learning_rate=0.001), loss=BinaryCrossentropy())

    # 训练自编码器 - 只需要在训练集上训练
    autoencoder.fit(X_train, X_train,
                    epochs=50,  # 增加训练轮数
                    batch_size=32,
                    shuffle=True,
                    validation_data=(X_test, X_test))  # 添加验证集

    # 在训练集和测试集上都进行预测
    datasets = {'train': X_train, 'test': X_test}
    all_accuracies = {}

    for name, dataset in datasets.items():
        logger.info(f"\n===== 在{name}上进行预测 =====")
        # 编码和解码
        encoded_phaseBits = encoder.predict(dataset)
        decoded_probs = decoder.predict(encoded_phaseBits)  # 直接使用自编码器进行解码

        # 将概率转换为0/1二进制值
        decoded_phaseBits = (decoded_probs > 0.5).astype(np.int32)

        # 将解码结果还原为原始形状
        decoded_arrays = [arr.reshape(64, 64) for arr in decoded_phaseBits]
        original_arrays = [arr.reshape(64, 64) for arr in dataset]

        # 保存结果并获取统计信息
        stats = save_results(path_dir + f'/dataset_{name}',
                             original_arrays, encoded_phaseBits, decoded_arrays, (64, 64), bit_num)
        all_accuracies[name] = stats

    # 保存统计信息
    save_statistics({
        'train_max': all_accuracies['train']['max'],
        'train_min': all_accuracies['train']['min'],
        'train_mean': all_accuracies['train']['mean'],
        'train_std': all_accuracies['train']['std'],
        'test_max': all_accuracies['test']['max'],
        'test_min': all_accuracies['test']['min'],
        'test_mean': all_accuracies['test']['mean'],
        'test_std': all_accuracies['test']['std']
    }, path_dir + "/result.txt")

    # 保存模型
    save_models(encoder, decoder, path_dir)


if __name__=="__main__":
    # 创建参数解析器
    parser = argparse.ArgumentParser(description="Process some parameters.")
    parser.add_argument("--base_path", type=str,
                        default="../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90)",
                        help="Base directory path (default: ../files/dissertation/chapter_3/tf-ae1d-bit-cnn2d-(1,90))")
    parser.add_argument("--bit_num", type=int, default=1, help="Number of bits (default: 1)")
    parser.add_argument("--theta_start", type=int, default=1, help="theta_start (default: 1)")
    parser.add_argument("--theta_end", type=int, default=60, help="theta_end (default: 60)")
    parser.add_argument("--phi_start", type=int, default=0, help="phi_start (default: 0)")
    parser.add_argument("--phi_end", type=int, default=360, help="phi_end (default: 360)")

    args = parser.parse_args()

    base_path = args.base_path
    bit_num = args.bit_num
    theta_start, theta_end, phi_start, phi_end = args.theta_start, args.theta_end, args.phi_start, args.phi_end

    # 初始化日志
    logger = setup_logging(base_path + "/trace.txt")
    # 示例日志记录
    logger.info(f"Starting execution with base_path: {base_path}")
    logger.info(f"Using bit_num: {bit_num}")
    logger.info(f"theta_start={theta_start}, theta_end={theta_end}, phi_start={phi_start}, phi_end={phi_end}")

    main(path_dir=base_path, bit_num=bit_num,
         theta_start=theta_start, theta_end=theta_end, phi_start=phi_start, phi_end=phi_end)