import tensorflow as tf
from tensorflow.keras import layers, Model

class CFM(Model):
    def __init__(self, attention_dim, dropout_rate=0.3):
        super(CFM, self).__init__()
        self.attention_dim = attention_dim

        # 归一化层
        self.layerNormalization = layers.LayerNormalization(axis=-1, epsilon=1e-6)

        # 定义时域模块查询（Query）、键（Key）、值（Value）层
        self.query_dense_t = layers.Dense(attention_dim)
        self.key_dense_t = layers.Dense(attention_dim)
        self.value_dense_t = layers.Dense(attention_dim)

        # 定义频域模块查询（Query）、键（Key）、值（Value）层
        self.query_dense_f = layers.Dense(attention_dim)
        self.key_dense_f = layers.Dense(attention_dim)
        self.value_dense_f = layers.Dense(attention_dim)

        # 使用Softmax来标准化注意力权重
        self.softmax = layers.Softmax(axis=-1)

        # Dropout层
        self.dropout = layers.Dropout(dropout_rate)

    def call(self, T_feature, F_feature):

        # 归一化层
        time_feature = self.layerNormalization(T_feature)
        frequency_feature = self.layerNormalization(F_feature)

        # 对时域特征进行变换 得到qkv
        query_t = self.query_dense_t(time_feature)
        key_t = self.key_dense_t(time_feature)
        value_t = self.value_dense_t(time_feature)

        # 对频域特征进行变换
        query_f = self.query_dense_f(frequency_feature)
        key_f = self.key_dense_f(frequency_feature)
        value_f = self.value_dense_f(frequency_feature)

        # 计算时域缩放点积注意力
        attention_scores_t = tf.matmul(query_t, key_f, transpose_b=True)  # 计算点积得分
        attention_scores_t = attention_scores_t / tf.sqrt(tf.cast(self.attention_dim, tf.float32))  # 缩放
        attention_weights_t = self.softmax(attention_scores_t)  # 对得分进行Softmax归一化，得到注意力权重

        # 计算频域缩放点积注意力
        attention_scores_f = tf.matmul(query_f, key_t, transpose_b=True)  # 计算点积得分
        attention_scores_f = attention_scores_f / tf.sqrt(tf.cast(self.attention_dim, tf.float32))  # 缩放
        attention_weights_f = self.softmax(attention_scores_f)  # 对得分进行Softmax归一化，得到注意力权重

        # 注意力权重dropout
        attention_weights_t = self.dropout(attention_weights_t)
        attention_weights_f = self.dropout(attention_weights_f)

        # 使用注意力权重对值（Value）进行加权求和
        attention_output_f = tf.matmul(attention_weights_t, value_t)
        attention_output_t = tf.matmul(attention_weights_f, value_f)

        # 将时域和频域的交叉注意力结果进行融合，根据需求选择其他方式（如拼接、加权求和等）
        # attention_output = tf.concat([attention_output_f,attention_output_t], axis=-1)
        # attention_output = 0.5 * attention_output_f + 0.5 * attention_output_t      # 可调权重
        # attention_output = value_t + value_f + attention_output_t + attention_output_f
        # attention_output = tf.concat([value_t, value_f, attention_output_t, attention_output_f], axis=-1)
        attention_output = tf.concat([T_feature,F_feature], axis=-1) + tf.concat([attention_output_f, attention_output_t], axis=-1)

        # 交叉注意力输出的 Dropout
        attention_output = self.dropout(attention_output)

        return attention_output