import numpy as np
import scipy.sparse as sp
import sklearn.metrics.pairwise as smp
from sklearn import datasets
from sklearn import preprocessing as prep

train_file = 'data/citeu/train_citeu_0.8.dat'
test_file = 'data/citeu/test_citeu_0.8.dat'
val_file = 'data/citeu/val_citeu_0.8.dat'
item_content_file = 'data/citeu/item_features_0based.txt'

user_number = 5551
item_number = 16980


class CiteuData:
    def __init__(self, sim_percentile=99.95, construct_graph_with_feature=False, graph_in_dense=False):
        self.num_user = user_number
        self.num_item = item_number
        self.graph_in_dense = graph_in_dense
        self.sim_percentile = sim_percentile
        self.construct_graph_with_feature = construct_graph_with_feature
        self.train_graph_ui = self.read_matrix_file(train_file)
        self.train_graph_uu = None
        self.train_graph_ii = None
        self.test_graph_ui = self.read_matrix_file(test_file)
        self.val_graph_ui = self.read_matrix_file(val_file)
        self.item_features = self.import_item_feature()
        self.construct_graph()

    @staticmethod
    def num_user_check(file):
        with open(file) as f:
            num_user = len(list(filter(lambda line: len(line) > 0, f.readlines())))
        return num_user

    @staticmethod
    def num_item_check(file):
        with open(file) as f:
            lines = filter(lambda line: len(line) > 0, f.readlines())
            num_item = max(map(lambda line: max([int(i) for i in line.split(' ')[1:]]), lines))
        return num_item + 1

    def read_matrix_file(self, file):
        if self.graph_in_dense:
            return self.read_matrix_file_in_dense(file)
        else:
            return self.read_matrix_file_in_sparse(file)

    @staticmethod
    def read_matrix_file_in_dense(file):
        matrix = np.zeros((user_number, item_number), dtype=np.int8)
        with open(file) as f:
            lines = filter(lambda line: len(line) > 0, f.readlines())
            items = map(lambda line: [int(item) for item in line.split(' ')[1:]], lines)
            for i, item_list in enumerate(items):
                matrix[i, item_list] = 1
        return matrix

    @staticmethod
    def read_matrix_file_in_sparse(file):
        row = []
        col = []
        with open(file) as f:
            lines = filter(lambda line: len(line) > 0, f.readlines())
            items = map(lambda line: [int(item) for item in line.split(' ')[1:]], lines)
            for i, item_list in enumerate(items):
                row += [i] * len(item_list)
                col += item_list
        data = [1] * len(col)
        return sp.coo_matrix((data, (row, col)), shape=(user_number, item_number), dtype=np.int8)

    @staticmethod
    def import_item_feature():
        item_content, _ = datasets.load_svmlight_file(item_content_file, zero_based=True, dtype=np.float32)

        item_content = CiteuData.tfidf(item_content)

        from sklearn.utils.extmath import randomized_svd
        u, s, _ = randomized_svd(item_content, n_components=300, n_iter=5)
        item_content = u * s

        _, item_feature = CiteuData.prep_standardize(item_content)

        feature_embedding = item_feature[1:]
        return feature_embedding

    def construct_graph(self):
        self.train_graph_uu = self.similarity_matrix(self.train_graph_ui, self.sim_percentile, self.graph_in_dense)
        if self.construct_graph_with_feature:
            self.train_graph_ii = self.similarity_matrix(self.item_features, self.sim_percentile)
        else:
            self.train_graph_ii = self.similarity_matrix(self.train_graph_iu, self.sim_percentile, self.graph_in_dense)

    @staticmethod
    def similarity_matrix(data, percentile, dense_output=True):
        assert 0.0 < percentile < 100.0
        if sp.isspmatrix(data):
            data = data.tocsr()
        elif dense_output is False:
            dense_output = True
        sim_matrix = smp.cosine_similarity(data, dense_output=dense_output)
        if dense_output:
            sim_matrix: np.ndarray
            threshold = np.percentile(sim_matrix, percentile)
            return np.where(sim_matrix > threshold, 1, 0).astype(np.int32)
        else:
            sim_matrix: sp.csr_matrix
            sim_matrix: sp.coo_matrix = sim_matrix.tocoo()
            # recalculate percentile
            matrix_size = sim_matrix.shape[0] * sim_matrix.shape[1]
            nonzero_size = sim_matrix.data.size
            q = np.true_divide(percentile, 100.0)
            q = 1.0 - (matrix_size * (1 - q)) / nonzero_size
            new_percentile = 100.0 * q
            if new_percentile < 0:
                sim_matrix.data = np.ones_like(sim_matrix.data, dtype=np.int32)
                return sim_matrix

            threshold = np.percentile(sim_matrix.data, new_percentile)

            keep = np.where(sim_matrix.data > threshold)
            sim_matrix.row = sim_matrix.row[keep[0]]
            sim_matrix.col = sim_matrix.col[keep[0]]
            sim_matrix.data = np.ones_like(sim_matrix.row, dtype=np.int32)
            return sim_matrix

    @property
    def train_graph_iu(self):
        return self.train_graph_ui.transpose()

    @staticmethod
    def prep_standardize(x):
        """
        takes sparse input and compute standardized version

        Note:
            cap at 5 std

        :param x: 2D scipy sparse data array to standardize (column-wise), must support row indexing
        :return: the object to perform scale (stores mean/std) for inference, as well as the scaled x
        """
        std = 5
        x_nzrow = x.any(axis=1)
        scaler = prep.StandardScaler().fit(x[x_nzrow, :])
        x_scaled = np.copy(x)
        x_scaled[x_nzrow, :] = scaler.transform(x_scaled[x_nzrow, :])
        x_scaled[x_scaled > std] = std
        x_scaled[x_scaled < -std] = -std
        x_scaled[np.absolute(x_scaled) < 1e-5] = 0
        return scaler, x_scaled

    @staticmethod
    def tfidf(R):
        row = R.shape[0]
        col = R.shape[1]
        Rbin = R.copy()
        Rbin[Rbin != 0] = 1.0
        R = R + Rbin
        tf = R.copy()
        tf.data = np.log(tf.data)
        idf = np.sum(Rbin, 0)
        idf = np.log(row / (1 + idf))
        idf = sp.spdiags(idf, 0, col, col)
        return tf * idf

    @staticmethod
    def create_pos_neg_sample_generator(train_graph_ui):
        users = train_graph_ui.shape[0]
        items = train_graph_ui.shape[1]
        item_ids = set(range(items))
        if sp.isspmatrix(train_graph_ui):
            graph_ui = train_graph_ui.tocsr()
            pos_sets = list(map(lambda b, e: set(graph_ui.indices[b:e]),
                                graph_ui.indptr[:-1], graph_ui.indptr[1:]))
        else:
            pos_sets = list(map(lambda i: set(np.argwhere(train_graph_ui[i] > 0).flatten()),
                                range(users)))
        neg_sets = list(map(lambda s: item_ids - s, pos_sets))

        def sampler(sets):
            import random
            while True:
                yield list(map(lambda s: random.sample(s, 1)[0], sets))

        return sampler(pos_sets), sampler(neg_sets)

    def get_pos_neg_sample_generator(self):
        return self.create_pos_neg_sample_generator(self.train_graph_ui)
