"""
Good for sparse data. eg. user and item ID as features
教程可参考 https://blog.csdn.net/shenxiaolu1984/article/details/78740481
"""

import numpy as np
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import pandas as pd


class MyOneHotEncoder(OneHotEncoder):
    def __init__(self, data, labels, train_ratio=0.9, sparse=True, n_values="auto", categorical_features="all",
                 dtype=np.float64, handle_unknown='error'):
        super().__init__(n_values=n_values, categorical_features=categorical_features,
                         dtype=dtype, sparse=sparse, handle_unknown=handle_unknown)

        self.les = [LabelEncoder() for _ in data.columns]
        [le.fit(x[col]) for le, col in zip(self.les, data.columns)]
        self.fit(self.to_id(data))
        self.x_dim = self.n_values_.sum()

        data = self.transform(self.to_id(data))
        labels = labels.values[:, None]
        sp = int(data.shape[0]*train_ratio)
        self.train_sparse_x = data[:sp]
        self.train_y = labels[:sp]
        self.test_sparse_x = data[sp:]
        self.test_y = labels[sp:]

    def to_id(self, data):
        return np.concatenate([le.transform(data[col])[:, None] for le, col in zip(self.les, data.columns)], axis=1)

    def sparse_tensor_transform(self, sparse_matrix):
        sparse_m = sparse_matrix.tocoo()
        indices = np.array([sparse_m.row, sparse_m.col]).T
        return tf.SparseTensorValue(indices, sparse_m.data, sparse_m.shape)

    def next_batch(self, batch_size):
        x, y = self.train_sparse_x, self.train_y
        p = 0
        while True:
            p_ = p + batch_size
            if p_ > len(y):
                p = 0
                new_idx = np.random.permutation(np.arange(len(x)))
                x[:] = x[new_idx]
                y[:] = y[new_idx]
                continue
            bx = x[p: p_]
            by = y[p: p_]
            p += batch_size
            yield self.sparse_tensor_transform(bx), by


class FM:
    def __init__(self, x_dim, y_dim=1, k=6, lr=0.001,):
        self.tfy = tf.placeholder(tf.float32, shape=[None, y_dim])
        self.tfx = tf.sparse_placeholder(tf.float32)
        b = tf.Variable(np.full((1, y_dim), 0.1), dtype=tf.float32)
        w = tf.Variable(np.random.normal(loc=0, scale=0.1, size=(x_dim, y_dim)), dtype=tf.float32)
        xw = tf.sparse_tensor_dense_matmul(sp_a=self.tfx, b=w)  # [batch, y_dim]

        # hidden vector
        h_vec = tf.Variable(np.random.normal(loc=0, scale=0.1, size=(x_dim, k)), dtype=tf.float32)

        # hidden relationship
        h = 1 / 2 * tf.reduce_sum(
            tf.square(tf.sparse_tensor_dense_matmul(sp_a=self.tfx, b=h_vec))  # xv = [batch, k]
            - tf.sparse_tensor_dense_matmul(sp_a=tf.square(self.tfx), b=tf.square(h_vec)),  # x2v2 = [batch, k]
            axis=1, keep_dims=True
        )  # [batch, 1]

        # FM formula
        logits = xw + b + h

        self.prob = tf.nn.sigmoid(logits)
        self._auc = tf.metrics.auc(labels=self.tfy, predictions=self.prob,)[1]

        self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.tfy, logits=logits))
        self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)

        self.sess = tf.Session()
        self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

    def predict(self, x):
        prob = self.sess.run(self.prob, feed_dict={self.tfx: x})
        return np.where(prob < 0.5, 0, 1)

    def auc(self, x, y):
        return self.sess.run(self._auc, {self.tfx: x, self.tfy: y})

    def train(self, x, y):
        loss, auc, _ = self.sess.run([self.loss, self._auc, self.train_op], feed_dict={self.tfx: x, self.tfy: y})
        return auc, loss


path_data = "./datasets/avazu-ctr.csv"          # ads data from: https://www.kaggle.com/c/avazu-ctr-prediction/data
data = pd.read_csv(path_data, nrows=200000)

x = data[["id", "C1", "banner_pos", "site_domain", "site_category", "app_domain", "app_category",
          "device_id", "device_type", "C16", "C17", "C18", "C19", "C20"]]
y = data["click"]

ohe = MyOneHotEncoder(x, y)
fm = FM(ohe.x_dim)
print(x.head(3))
print("x_dim =", ohe.x_dim)

# training
for t, (bx, by) in zip(range(2000), ohe.next_batch(64)):
    loss, auc = fm.train(bx, by)
    if t % 50 == 0:
        print(t, "| loss:", loss, "| AUC:", auc)

# testing
test_sparse_x = ohe.sparse_tensor_transform(ohe.test_sparse_x)
pred = fm.predict(test_sparse_x)
pred_real = np.concatenate((pred, ohe.test_y), axis=1)
print(pred_real[:30])
print("Test AUC =", fm.auc(test_sparse_x, ohe.test_y))