import os
import torch
import pandas as pd
import scipy.sparse as sp
import numpy as np

from conf import ROOT


class BPR(torch.nn.Module):
    def __init__(self, user_num, item_num, embed_dim):
        super().__init__()
        self.embed_user = torch.nn.Embedding(user_num, embed_dim)
        self.embed_item = torch.nn.Embedding(item_num, embed_dim)

        torch.nn.init.xavier_normal(self.embed_user.weight)
        torch.nn.init.xavier_normal(self.embed_item.weight)

    def forward(self, user, item_i, item_j):
        user_embed = self.embed_user(user)
        item_i_embed = self.embed_item(item_i)
        item_j_embed = self.embed_item(item_j)

        prediction_i = (user_embed * item_i_embed).sum()
        prediction_j = (user_embed * item_j_embed).sum()
        return prediction_i, prediction_j


def read_interaction_mat(project_root, flag):
    """
    read interaction matrix.
    :param project_root: 项目根目录
    :param flag: train, test, or validtion
    :return: interactions
    """
    valid_flags = ["train", "test", "validation"]
    if flag not in valid_flags:
        raise Exception("flag must be one of {}".format(valid_flags))

    interaction_file = os.path.join(project_root, "data/reddit_small/reddit_{}.csv".format(flag))
    data = pd.read_csv(interaction_file, sep=',', header=0, names=["author", "link_id", "lv0_id", "counts", "label"])

    user_num = data["author"].max()
    comment_num = data["lv0_id"].max()

    mat = sp.dok_matrix((user_num, comment_num), dtype=np.int64)
    for i in range(user_num):
        user_id = data.iloc[i]["author"]
        comment_id = data.iloc[i]["lv0_id"]
        assert isinstance(user_id, np.int64), type(user_id)
        assert isinstance(comment_id, np.int64), type(comment_id)
        mat[user_id, comment_id] = 1

    return mat


if __name__ == '__main__':
    train_mat = read_interaction_mat(ROOT, "train")
    for item in train_mat.items():
        x, y = item[0][0], item[0][1]
        value = item[1]

