import os
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_recommenders_addons import dynamic_embedding as de


class LRModel(tf.keras.Model):

    def __init__(self, embedding_size=1, embedding_initializer=None, is_training=True):

        if not is_training:
            de.enable_inference_mode()

        super(LRModel, self).__init__()
        self.embedding_size = embedding_size

        if embedding_initializer is None:
            embedding_initializer = tf.keras.initializers.Zeros()

        self.embedding = de.keras.layers.SquashedEmbedding(
            embedding_size,
            initializer=embedding_initializer,
            name='embedding')

        self.bias = tf.Variable(0, tf.int64)

    # @tf.function
    def call(self, features):
        user_id = tf.reshape(features['user_id'], (-1, 1))
        user_gender = tf.reshape(features['user_gender'], (-1, 1))
        user_age = tf.reshape(features['user_age'], (-1, 1))

        movie_id = tf.reshape(features['movie_id'], (-1, 1))
        movie_title = tf.reshape(features['movie_title'], (-1, 1))
        movie_genres = tf.reshape(features['movie_genres'], (-1, 1))

        user_id_latent = self.embedding(user_id)
        user_gender_latent = self.embedding(user_gender)
        user_age_latent = self.embedding(user_age)

        movie_id_latent = self.embedding(movie_id)
        movie_title_latent = self.embedding(movie_title)
        movie_genres_latent = self.embedding(movie_genres)

        latent = tf.concat([user_id_latent, user_gender_latent, user_age_latent, movie_id_latent, movie_title_latent, movie_genres_latent], axis=1)

        x = tf.reduce_sum(latent, axis=1)
        y = tf.keras.activations.sigmoid(x)

        return y


def get_dataset(batch_size=1, dataset_type="train"):
    dataset = tfds.load('movielens/1m-ratings', split=dataset_type)
    features = dataset.map(
        lambda x: {
            "user_id": tf.strings.to_hash_bucket("user_id" + x["user_id"], 100000),
            "user_gender": tf.strings.to_hash_bucket("user_gender" + tf.cast(x["user_gender"], dtype=tf.string), 100000),
            "user_age": tf.strings.to_hash_bucket("user_age" + tf.cast(x["bucketized_user_age"], dtype=tf.string), 100000),
            "movie_id": tf.strings.to_hash_bucket("movie_id" + tf.cast(x["movie_id"], dtype=tf.string), 100000),
            "movie_title": tf.strings.to_hash_bucket("movie_title" + tf.cast(x["movie_title"], dtype=tf.string), 100000),
            "movie_genres": tf.strings.to_hash_bucket("movie_genres" + tf.cast(x["movie_genres"], dtype=tf.string), 100000)
        })
    ratings = dataset.map(lambda x: tf.constant(1) if x['user_rating'] > 3 else tf.constant(0))
    dataset = dataset.zip((features, ratings))
    dataset = dataset.shuffle(4096, reshuffle_each_iteration=False)
    if batch_size > 1:
        dataset = dataset.batch(batch_size)

    return dataset


def train(embedding_size, train_batch_size, test_batch_size, model_dir, epochs, steps_per_epoch, test_step):
    dataset = get_dataset(train_batch_size)
    model = LRModel(embedding_size, embedding_size, tf.keras.initializers.RandomNormal(0.0, 0.5))
    optimizer = tf.keras.optimizers.Adam(1E-3)
    optimizer = de.DynamicEmbeddingOptimizer(optimizer)

    auc = tf.keras.metrics.AUC()
    model.compile(optimizer=optimizer, loss=tf.keras.losses.binary_crossentropy, metrics=[auc])

    if os.path.exists(model_dir):
        model.load_weights(model_dir)

    model.fit(dataset, epochs=epochs, steps_per_epoch=steps_per_epoch)

    save_options = tf.saved_model.SaveOptions(namespace_whitelist=['TFRA'])
    model.save(model_dir, options=save_options)

    de.enable_inference_mode()

    dataset = get_dataset(batch_size=test_batch_size, dataset_type="test")
    metric = tf.keras.metrics.AUC()

    it = iter(dataset)
    for step in range(test_step):
        features, ratings = it.get_next()
        predictions = model(features)
        metric.update_state(ratings, predictions)
        if step % 10 == 0:
            print('samples: %d, auc: %5.4f' % (step, metric.result().numpy()))


if __name__ == '__main__':
    train(embedding_size=1, train_batch_size=32, test_batch_size=32, model_dir='lr_model_dir', epochs=1, steps_per_epoch=2000, test_step=1000)
