import tensorflow as tf
from scipy.sparse import csr
import numpy as np
from numba import jit
from utils.read_data import load_txt


class TFFM(object):
    def __init__(self):
        train_file = './data/FM/ua.base'
        test_file = './data/FM/ua.test'

        train_data = load_txt(file=train_file)
        test_data = load_txt(file=test_file)

        x_train_dict = {'users': train_data[:, 0].T.A[0], 'items': train_data[:, 1].T.A[0]}
        x_test_dict = {'users': test_data[:, 0].T.A[0], 'items': test_data[:, 1].T.A[0]}

        x_train = self.vectorize_dict(x_train_dict, n=len(train_data), g=2)
        x_test = self.vectorize_dict(x_test_dict, p=x_train.shape[1], n=len(test_data), g=2)

        self.y_train = train_data[:, 2].T.A[0]
        self.y_test = test_data[:, 2].T.A[0]

        # Return a dense(密集) matrix representation of this matrix
        n, p = x_train.todense().shape
        k = 10

        self.tensor_x = tf.Variable(x_train.todense(), dtype=tf.float32)

        # train
        self.max_itor = 200

        self.W = tf.Variable(tf.random.normal(shape=[len(self.tensor_x[0]), 1]), dtype=tf.float32)
        # interaction factors
        self.v = tf.Variable(tf.random.normal(shape=[k, p], mean=0, stddev=0.01))
        self.learnint_rate = 1e-9

    @staticmethod
    def vectorize_dict(dic, ix=None, p=None, n=0, g=0):
        """
        csr: Compressed Sparse Row matrix
        csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
        　　where data, row_ind and col_ind satisfy the relationship a[row_ind[k], col_ind[k]] = data[k].

        根据user/items的id，建立稀疏矩阵(Sparese matrix)

        :param dic: dictionary of feature lists. Keys are the name of features(key是user和item组成的dict)
        :param ix: index generator (default None)
        :param p: dimension of feature space (number of columns in the sparse matrix) (default None)（控制特征数量）
        :param n: 样本量
        :param g: num group: eg: uese/items---> g=2
        :return:
        """
        ix = ix if ix else dict()

        col_ix = np.empty(n * g, dtype=int)
        i = 0
        for k, feature_list in dic.items():
            # ix记录每个用户评分次数/产品被评分次数
            for num, info in enumerate(feature_list):
                key = str(info) + str(k)
                ix[key] = ix.get(key, 0) + 1
                # 附加索引'l'以防止将具有相同id的不同列映射到同一个索引
                col_ix[i + num * g] = ix[key]
            i += 1

        # 将元素重复g次
        row_ix = np.repeat(np.arange(n), g)
        data = np.ones(n * g)
        p = p if p else len(ix)

        # 获取数组col_ix中值<p的元素的下标
        ixx = np.where(col_ix < p)

        # 将数据data[ixx]，按行idx row_ix[ixx], 列idx col_ix[ixx]来生成n * p的矩阵
        return csr.csr_matrix((data[ixx], (row_ix[ixx], col_ix[ixx])), shape=(n, p))

    @staticmethod
    @jit
    def fun_fm(x, a, v):
        """
        FM计算公式
        :param x:
        :param a:
        :param v:
        :return:
        """
        linear_terms = tf.reduce_sum(tf.matmul(x, a))
        pair_interactions = 0.5 * tf.reduce_sum(tf.subtract(tf.pow(tf.matmul(x, tf.transpose(v)), 2),
                                                            tf.matmul(tf.pow(x, 2), tf.transpose(tf.pow(v, 2)))), axis=1)
        return tf.add(linear_terms, pair_interactions)

    def tf_loss(self, para):
        """

        :param x:
        :param y:
        :param a:
        :return:
        """
        a, v = para
        pre_y = self.fun_fm(self.tensor_x, a, v)
        error = tf.reduce_mean(input_tensor=tf.square(tf.subtract(self.y_train,pre_y)))

        lambda_w = tf.constant(0.001, name='lambda_w')
        lambda_v = tf.constant(0.001, name='lambda_v')

        # 矩阵范数
        a_norm = np.linalg.norm(np.linalg.norm(a))
        v_norm = np.linalg.norm(np.linalg.norm(v))

        l2_norm = tf.reduce_sum(tf.add(tf.multiply(lambda_w, tf.pow(a_norm, 2)),
                                       tf.multiply(lambda_v, tf.pow(v_norm, 2))))
        print('error: {}'.format(error))
        print('l2_norm: {}'.format(l2_norm))

        return tf.add(error, l2_norm), tf.add(error, l2_norm)

    def tf_grad(self, a, v):
        with tf.GradientTape() as tape:
            loss_ = self.tf_loss(para=(a, v))
        return tape.gradient(loss_, (a, v))

    def train(self):
        for step in range(self.max_itor):
            print('*' * 30)
            delta_w, delta_v = self.tf_grad(a=self.W, v=self.v)
            change_w = delta_w * self.learnint_rate
            change_v = delta_v * self.learnint_rate
            self.W.assign_sub(change_w)
            self.v.assign_sub(change_v)
            loss_value = self.tf_loss(para=(self.W, self.v))[0]
            if loss_value <= 3.8:
                break
            if step == 0 or step % 10 == 0:
                print('step:{}, loss: {}'.format(step, loss_value))

    def model_evalution(self):
        # 模型检验
        pre_train_y = self.fun_fm(self.tensor_x, self.W.numpy(), self.v.numpy())
        rmse = np.sqrt(np.power(self.y_train - pre_train_y, 2).sum())
        print('rmse: {}'.format(rmse))


def ru():
    tf_fm = TFFM()
    tf_fm.train()
    tf_fm.model_evalution()


if __name__ == '__main__':
    ru()
