from sre_parse import IN_UNI_IGNORE
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, Flatten, Concatenate, Dropout  # 网络层
from tensorflow.keras.regularizers import l2  #正则化器

class DeepFM(Model):  #继承Model类
    """
        DeepFM 模型
        参数:
        :param feature_columns: 特征列信息 (dense_feats, sparse_feats)
        :param hidden_units: DNN 隐藏层单元数
        :param dropout_rate: Dropout 比率
        :param embed_reg: 嵌入层正则化系数
        :param w_reg: FM 层权重正则化系数
        """
    def __init__(self, feature_columns, hidden_units=(200, 200, 200), dropout_rate=0.5,embed_reg=1e-4, w_reg=1e-4):
        super(DeepFM, self).__init__()
        self.dense_feat_columns, self.sparse_feat_columns = feature_columns
        self.dense_layer = Dense(1, kernel_regularizer=l2(w_reg), activation=None)

        # 嵌入层
        self.embed_layers = {
            feat['name']: Embedding(
                input_dim = feat['vocab_size'],
                output_dim = feat['embed_dim'],
                embeddings_regularizer = l2(embed_reg)
            )
            for feat in self.sparse_feat_columns
        }

        # DNN部分
        self.dnn_network = [Dense(units, activation='relu') for units in hidden_units]
        self.dropout = Dropout(dropout_rate)
        self.dense_final = Dense(1, activation=None)

    def call(self, inputs, training=None):
        # 将输入拆分: 密集特征 + 稀疏特征
        dense_inputs = inputs[:, :len(self.dense_feat_columns)]
        sparse_inputs = inputs[:, len(self.dense_feat_columns):]
        #=======================FM部分==============================
        # 一阶项: 线性回归部分
        fm_first_order = self.dense_layer(dense_inputs)

        # 二阶项: 特征交互部分
        embeddings = []
        for i, feat in enumerate(self.sparse_feat_columns):
            feat_name = feat['name']
            embed = self.embed_layers[feat_name](sparse_inputs[:, i])
            embeddings.append(embed)
        
        # 平方和&和的平方
        sum_square = tf.square(tf.reduce_sum(tf.stack(embeddings, axis=1), axis=1))
        square_sum = tf.reduce_sum(tf.square(tf.stack(embeddings, axis=1)), axis=1)
        fm_second_order = 0.5 * tf.reduce_sum(sum_square - square_sum, axis=1, keepdims=True)

        #======================DNN部分================================
        # 拼接嵌入向量
        dnn_input = tf.concat([Flatten()(tf.concat(embeddings, axis=1)), dense_inputs], axis=-1)
        
        # 通过DNN
        dnn_output = dnn_input
        for layer in self.dnn_network:
            dnn_output = layer(dnn_output)
            dnn_output = self.dropout(dnn_output, training=training)
        dnn_output = self.dense_final(dnn_output)

        # ====================输出部分================================
        outputs = tf.nn.sigmoid(fm_first_order + fm_second_order + dnn_output)
        return outputs
    
    def build_graph(self, input_shape):
        """用于构建模型便于summary()"""
        input_ = Input(shape=input_shape)
        
        dense_inputs = input_[:, :len(self.dense_feat_columns)]
        sparse_inputs = input_[:, len(self.dense_feat_columns):]

        # 嵌入层映射
        embeddings = []
        for i, feat in enumerate(self.sparse_feat_columns):
            feat_name = feat['name']
            embed = self.embed_layers[feat_name](sparse_inputs[:, i])
            embeddings.append(embed)

        # ⚠️ 用 Keras Lambda 层包裹 TensorFlow 操作
        def fm_second_order_fn(x):
            stacked = tf.stack(x, axis=1)
            sum_square = tf.square(tf.reduce_sum(stacked, axis=1))
            square_sum = tf.reduce_sum(tf.square(stacked), axis=1)
            return 0.5 * tf.reduce_sum(sum_square - square_sum, axis=1, keepdims=True)
        
        from tensorflow.keras.layers import Lambda

        fm_first_order = self.dense_layer(dense_inputs)
        fm_second_order = Lambda(fm_second_order_fn)(embeddings)

        # DNN 输入拼接
        concat_embed = Lambda(lambda x: tf.concat(x, axis=1))(embeddings)
        flatten_embed = Flatten()(concat_embed)
        dnn_input = Concatenate(axis=-1)([flatten_embed, dense_inputs])

        # DNN 前向
        dnn_output = dnn_input
        for layer in self.dnn_network:
            dnn_output = layer(dnn_output)
            dnn_output = self.dropout(dnn_output)
        dnn_output = self.dense_final(dnn_output)

        # 输出
        outputs = Lambda(lambda x: tf.nn.sigmoid(x[0] + x[1] + x[2]))([fm_first_order, fm_second_order, dnn_output])

        return Model(inputs=[input_], outputs=outputs)

# test
# 定义特征列示例
dense_features = [{'name': 'age'}, {'name': 'salary'}]  # 密集特征
sparse_features = [
    {'name': 'gender', 'vocab_size': 3, 'embed_dim': 8},
    {'name': 'occupation', 'vocab_size': 10, 'embed_dim': 8},
    {'name': 'city', 'vocab_size': 20, 'embed_dim': 8}
]
# 创建模型
model = DeepFM(feature_columns=(dense_features, sparse_features),
               hidden_units=(128, 64),
               dropout_rate=0.2)

# 构建模型 (假设输入维度: 2个密集特征 + 3个稀疏特征)
model.build_graph(input_shape=(5,)).summary()








        
