# -*- coding: utf-8 -*-
from itertools import chain
import tensorflow as tf
from deepctr.inputs import SparseFeat, VarLenSparseFeat, combined_dnn_input, \
    create_embedding_matrix, embedding_lookup, \
    get_dense_input, varlen_embedding_lookup, get_varlen_pooling_list, mergeDict
from deepctr.layers.core import DNN
from deepctr.layers.utils import concat_func, add_func
from tensorflow import keras
from tensorflow.keras.layers import Concatenate, Flatten, Dense

DEFAULT_GROUP_NAME = "default_group"


def input_from_feature_columns(embedding_matrix_dict, features, feature_columns, support_dense=True,
                               support_group=False):
    sparse_feature_columns = list(
        filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if feature_columns else []
    varlen_sparse_feature_columns = list(
        filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if feature_columns else []

    group_sparse_embedding_dict = embedding_lookup(embedding_matrix_dict, features, sparse_feature_columns)
    dense_value_list = get_dense_input(features, feature_columns)
    if not support_dense and len(dense_value_list) > 0:
        raise ValueError("DenseFeat is not supported in dnn_feature_columns")

    sequence_embed_dict = varlen_embedding_lookup(embedding_matrix_dict, features, varlen_sparse_feature_columns)
    group_varlen_sparse_embedding_dict = get_varlen_pooling_list(sequence_embed_dict, features,
                                                                 varlen_sparse_feature_columns)
    group_embedding_dict = mergeDict(group_sparse_embedding_dict, group_varlen_sparse_embedding_dict)
    if not support_group:
        group_embedding_dict = list(chain.from_iterable(group_embedding_dict.values()))
    return group_embedding_dict, dense_value_list


def get_linear_output(embedding_matrix_dict, features, feature_columns):
    # linear_emb_list, dense_input_list = input_from_feature_columns(embedding_matrix_dict, features, feature_columns)
    linear_emb_list, _ = input_from_feature_columns(embedding_matrix_dict, features, feature_columns)

    sparse_input = concat_func(linear_emb_list)
    sparse_input = tf.squeeze(sparse_input, axis=-2)
    # dense_input = concat_func(dense_input_list)

    return sparse_input
    # return concat_func([sparse_input, dense_input])


class FM(keras.layers.Layer):
    """Factorization Machine models pairwise (order-2) feature interactions
     without linear term and bias.

      Input shape
        - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.

      Output shape
        - 2D tensor with shape: ``(batch_size, 1)``.

      References
        - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
    """

    def __init__(self, **kwargs):
        super(FM, self).__init__(**kwargs)

    def __call__(self, inputs):
        summed_square = tf.math.reduce_sum(inputs, axis=1)  # (None, K)
        summed_square = tf.math.square(summed_square)

        squared_sum = tf.math.square(inputs)  # (None, F, K)
        squared_sum = tf.reduce_sum(squared_sum, axis=1)  # (None, K)

        second_order = 0.5 * tf.math.subtract(summed_square, squared_sum)  # (None, K)

        return second_order


class DeepFM(keras.layers.Layer):
    def __init__(self, linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128), output_dim=32,
                 l2_reg=0, init_std=0.0001, seed=1024, dnn_dropout=0, seq_mask_zero=True,
                 dnn_activation='relu', dnn_use_bn=False, use_fm=True, use_deep=True, **kwargs):
        assert (use_fm or use_deep)
        super(DeepFM, self).__init__(**kwargs)
        self.linear_feature_columns = linear_feature_columns
        self.dnn_feature_columns = dnn_feature_columns
        self.fm_group = fm_group
        self.dnn_hidden_units = dnn_hidden_units
        self.output_dim = output_dim
        self.l2_reg = l2_reg
        self.init_std = init_std
        self.seed = seed
        self.dnn_dropout = dnn_dropout
        self.seq_mask_zero = seq_mask_zero
        self.dnn_activation = dnn_activation
        self.dnn_use_bn = dnn_use_bn
        self.use_fm = use_fm
        self.use_deep = use_deep

        # create embedding matrix
        self.embedding_matrix_dict = create_embedding_matrix(self.dnn_feature_columns, self.l2_reg, self.init_std,
                                                             self.seed,
                                                             prefix="two_order_", seq_mask_zero=self.seq_mask_zero)
        # create embedding for one order part
        for i in range(len(self.linear_feature_columns)):
            if isinstance(self.linear_feature_columns[i], SparseFeat):
                self.linear_feature_columns[i] = self.linear_feature_columns[i]._replace(embedding_dim=1)
            if isinstance(self.linear_feature_columns[i], VarLenSparseFeat):
                self.linear_feature_columns[i] = self.linear_feature_columns[i]._replace(
                    sparsefeat=self.linear_feature_columns[i].sparsefeat._replace(embedding_dim=1))
        self.one_order_embedding_matrix_dict = create_embedding_matrix(self.linear_feature_columns, self.l2_reg,
                                                                       self.init_std, self.seed, prefix="one_order_",
                                                                       seq_mask_zero=self.seq_mask_zero)
        self.fm = FM()
        self.dnn_1 = DNN(self.dnn_hidden_units, self.dnn_activation, self.l2_reg, self.dnn_dropout,
                         self.dnn_use_bn, self.seed)
        self.dnn_2 = Dense(self.output_dim)

    def __call__(self, inputs):
        group_embedding_dict, dense_value_list = input_from_feature_columns(self.embedding_matrix_dict, inputs,
                                                                            self.dnn_feature_columns,
                                                                            support_group=True)

        first_order = get_linear_output(self.one_order_embedding_matrix_dict, inputs, self.linear_feature_columns)
        second_order = add_func([self.fm(concat_func(v, axis=1))
                                 for k, v in group_embedding_dict.items() if k in self.fm_group])

        dnn_input = combined_dnn_input(list(chain.from_iterable(
            group_embedding_dict.values())), dense_value_list)
        deep = self.dnn_1(dnn_input)
        if self.use_deep and self.use_fm:
            output = Concatenate()([first_order, second_order, deep])
        elif self.use_deep:
            output = deep
        elif self.use_fm:
            output = Concatenate()([first_order, second_order])
        output = self.dnn_2(output)
        return tf.expand_dims(output, axis=-2)

    def get_config(self):
        config = super(DeepFM, self).get_config()
        config.update({
            "linear_feature_columns": self.linear_feature_columns,
            "dnn_feature_columns": self.dnn_feature_columns,
            "fm_group": self.fm_group,
            "dnn_hidden_units": self.dnn_hidden_units,
            "output_dim": self.output_dim,
            "l2_reg": self.l2_reg,
            "init_std": self.init_std,
            "seed": self.seed,
            "dnn_dropout": self.dnn_dropout,
            "seq_mask_zero": self.seq_mask_zero,
            "dnn_activation": self.dnn_activation,
            "dnn_use_bn": self.dnn_use_bn,
            "use_fm": self.use_fm,
            "use_deep": self.use_deep
        })
        return config


class DeepFMVariant(keras.layers.Layer):
    def __init__(self, dnn_hidden_units=(128, 128), l2_reg=0,
                 seed=1024, dnn_dropout=0, dnn_activation='relu',
                 dnn_use_bn=False, use_fm=True, use_deep=True, **kwargs):
        assert (use_fm or use_deep)
        super(DeepFMVariant, self).__init__(**kwargs)
        self.dnn_hidden_units = dnn_hidden_units
        self.l2_reg = l2_reg
        self.seed = seed
        self.dnn_dropout = dnn_dropout
        self.dnn_activation = dnn_activation
        self.dnn_use_bn = dnn_use_bn
        self.use_fm = use_fm
        self.use_deep = use_deep

        self.fm = FM()
        self.dnn = DNN(self.dnn_hidden_units, self.dnn_activation, self.l2_reg,
                       self.dnn_dropout, self.dnn_use_bn, self.seed)

    def __call__(self, inputs):
        second_order = self.fm(inputs)

        dnn_input = Flatten()(inputs)
        deep = self.dnn(dnn_input)
        if self.use_deep and self.use_fm:
            output = Concatenate()([second_order, deep])
        elif self.use_deep:
            output = Concatenate()([deep])
        elif self.use_fm:
            output = Concatenate()([second_order])
        return tf.expand_dims(output, axis=-2)

    def get_config(self):
        config = super(DeepFM, self).get_config()
        config.update({
            "dnn_hidden_units": self.dnn_hidden_units,
            "l2_reg": self.l2_reg,
            "init_std": self.init_std,
            "seed": self.seed,
            "dnn_dropout": self.dnn_dropout,
            "dnn_activation": self.dnn_activation,
            "dnn_use_bn": self.dnn_use_bn,
            "use_fm": self.use_fm,
            "use_deep": self.use_deep
        })
        return config

