import random
from collections import defaultdict
import numpy
import torch
from torch import nn as nn
from scipy.sparse import coo_matrix
import numpy as np
from torch.nn import Module
from scipy import sparse
from sklearn.metrics import pairwise_distances

from ARM_TGNCF.new_GNN_layer import new_GNN_Layer
from ARM_TGNCF.Parse_ARM_TGNCF import ARM_args
from ARM_TGNCF.tool import get_sparse_eye

SEED = ARM_args.seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)


class ARM_TGNCF(Module):

    def __init__(self, the_user_num, the_item_num, the_rating_events, the_embed_size=64,
                 layers=None, is_use_cuda=True):

        super(ARM_TGNCF, self).__init__()
        if layers is None:
            layers = [64, 64]

        self.useCuda = is_use_cuda
        self.userNum = int(the_user_num)
        self.itemNum = int(the_item_num)
        self.u_Embed = nn.Embedding(self.userNum, the_embed_size)
        self.i_Embed = nn.Embedding(self.itemNum, the_embed_size)
        self.GNN_layers = torch.nn.ModuleList()
        self.rating_events = the_rating_events
        self.LaplacianMat, self.ui_LaplacianMat = self.syn_build_LaplacianMat(the_rating_events, 1)

        self.leakyRelu = nn.LeakyReLU()
        self.selfLoop = get_sparse_eye(self.userNum + self.itemNum)

        self.transForm1 = nn.Linear(in_features=layers[-1] * (len(layers)) * 2, out_features=64)
        self.transForm2 = nn.Linear(in_features=64, out_features=32)
        self.transForm3 = nn.Linear(in_features=32, out_features=1)

        self.transForm_cat = nn.Linear(in_features=layers[-1] * (len(layers)) * 3, out_features=64)

        for From, To in zip(layers[:-1], layers[1:]):
            gnn_layer = new_GNN_Layer(From, To, is_use_cuda=self.useCuda)
            self.GNN_layers.append(gnn_layer)

    def syn_build_LaplacianMat(self, rating_events, final_embed, uu_flg=False, ii_flg=False):

        rt_item = rating_events['itemId'] + self.userNum
        rt_item_0 = rating_events['itemId']

        ui_mat = coo_matrix((rating_events['rating'], (rating_events['userId'], rt_item_0)))

        ui_mat_upper_part = coo_matrix((rating_events['rating'], (rating_events['userId'], rt_item)))

        ui_mat = ui_mat.transpose()

        ui_mat.resize((self.itemNum, self.userNum + self.itemNum))

        ui_mat_upper_part.resize(self.userNum, self.userNum + self.itemNum)
        A0 = sparse.vstack([ui_mat_upper_part.astype(float), ui_mat.astype(float)])

        uu_tmp = sparse.dok_matrix((self.userNum, self.userNum + self.itemNum), dtype=np.float32)
        if uu_flg:

            u_idx = torch.LongTensor([j for j in range(self.userNum)])
            if self.useCuda:
                u_idx = u_idx.cuda()

            user_embed = final_embed[u_idx].squeeze().cpu().detach().numpy()

            uu = 1 - pairwise_distances(user_embed, metric="cosine")  # cosine

            uu_top_k = np.argsort(uu, axis=1)

            uu_top_k = uu_top_k[:, -ARM_args.u_top_k - 1:]

            for j in range(self.userNum):
                for tk in uu_top_k[j]:
                    uu_tmp[j, tk] = uu[j, tk]

        ii_tmp = sparse.dok_matrix((self.itemNum, self.userNum + self.itemNum), dtype=np.float32)
        if ii_flg:

            if ARM_args.ii_method == "similarity":

                i_idx = torch.LongTensor([j for j in range(self.itemNum)]) + self.userNum
                if self.useCuda:
                    i_idx = i_idx.cuda()

                item_embed = final_embed[i_idx].squeeze().cpu().detach().numpy()

                ii = 1 - pairwise_distances(item_embed, metric="cosine")  # cosine

                ii_top_k = np.argsort(ii, axis=1)

                ii_top_k = ii_top_k[:, -ARM_args.i_top_k - 1:]

                for j in range(self.itemNum):
                    for tp in ii_top_k[j]:
                        ii_tmp[j, tp + self.userNum] = ii[j, tp]

            else:
                ii_tmp = create_ii_interfere(self.rating_events, self.userNum, self.itemNum)

        Aui = sparse.vstack([uu_tmp, ii_tmp])

        selfLoop = sparse.eye(self.userNum + self.itemNum)

        A0 = A0 + selfLoop
        sumArr = A0.sum(axis=1)
        diag_ = list(np.array(sumArr.flatten())[0])
        diag_ = np.power(diag_, -0.5)
        D = sparse.diags(diag_)
        L = D * A0 * D
        L = sparse.coo_matrix(L)
        row = L.row
        col = L.col
        j = torch.LongTensor(np.array([row, col]))
        data_floatTensor = torch.FloatTensor(L.data)
        SparseA0 = torch.sparse.FloatTensor(j, data_floatTensor)

        Aui = Aui + selfLoop
        sumArr = Aui.sum(axis=1)
        diag_ = list(1. / np.array(sumArr.flatten())[0])
        # diag_ = np.power(diag_, -0.5)
        D = sparse.diags(diag_)
        L = D * Aui
        # L = D * Aui * D
        L = sparse.coo_matrix(L)
        row = L.row
        col = L.col
        j = torch.LongTensor(np.array([row, col]))
        data_floatTensor = torch.FloatTensor(L.data)
        SparseAui = torch.sparse.FloatTensor(j, data_floatTensor)

        return SparseA0, SparseAui

    def embed_similarity(self, phase, top_k, final_embed, tmp):

        if phase == 'user':
            idx = torch.LongTensor([j for j in range(self.userNum)])
            num = 0
        else:
            idx = torch.LongTensor([j for j in range(self.itemNum)]) + self.userNum
            num = self.userNum

        embeddings = final_embed[idx].squeeze().cpu().detach().numpy()

        for i in idx.tolist():
            embed_i = embeddings[i - num].T  #
            similarity_i = np.matmul(embeddings, embed_i)
            simi_value_top_k = []
            simi_index_top_k = []

            for cur_simi_index in range(len(similarity_i)):

                if cur_simi_index == i - num:
                    continue

                cur_simi_value = similarity_i[cur_simi_index]

                if len(simi_value_top_k) < top_k:
                    simi_value_top_k.append(cur_simi_value)
                    simi_index_top_k.append(cur_simi_index)
                elif cur_simi_value > min(simi_value_top_k):
                    min_index = simi_value_top_k.index(min(simi_value_top_k))
                    simi_value_top_k[min_index] = cur_simi_value
                    simi_index_top_k[min_index] = cur_simi_index

            for tk in range(len(simi_index_top_k)):
                tk_value = simi_value_top_k[tk]
                tk_index = simi_index_top_k[tk]
                tmp[i - num, tk_index + num] = tk_value

    def getFeatureMat(self):

        u_idx = torch.LongTensor([j for j in range(self.userNum)])
        i_idx = torch.LongTensor([j for j in range(self.itemNum)])
        if self.useCuda:
            u_idx = u_idx.cuda()
            i_idx = i_idx.cuda()

        user_embed = self.u_Embed(u_idx)
        item_embed = self.i_Embed(i_idx)
        features = torch.cat([user_embed, item_embed], dim=0)
        return features

    def forward(self, user_idx, item_idx):

        item_idx += self.userNum
        user_idx = list(user_idx.cpu().data)
        item_idx = list(item_idx.cpu().data)

        features = self.getFeatureMat()

        final_embed = features.clone()

        for gnn in self.GNN_layers:
            features = gnn(self.LaplacianMat, self.selfLoop, self.ui_LaplacianMat, features)
            features = nn.ReLU()(features)
            final_embed = torch.cat([final_embed, features.clone()], dim=1)

        user_embed = final_embed[user_idx]
        item_embed = final_embed[item_idx]
        embed = torch.cat([user_embed, item_embed], dim=1)

        embed = nn.ReLU()(self.transForm1(embed))
        embed = self.transForm2(embed)
        embed = self.transForm3(embed)
        # if args.forecast_method == 'inter':
        #     embed = nn.Sigmoid()(embed)
        predict = embed.flatten()

        return predict, user_embed, item_embed, final_embed


def create_ii_interfere(cur_events, user_num, item_num):
    threshold = -1
    support_threshold = 0.1
    user_history = defaultdict(list)
    item_history = defaultdict(list)
    ii_tmp = sparse.dok_matrix((item_num, user_num + item_num), dtype=np.float32)  # item关联矩阵

    if not len(cur_events):
        return ii_tmp

    for index, event in cur_events.iterrows():
        user_id = event['userId']
        item_id = event['itemId']
        if event['rating'] >= threshold:

            if user_id not in user_history:
                user_history[user_id] = []
            elif item_id in user_history[user_id]:
                user_history[user_id].remove(item_id)
            user_history[user_id].append(item_id)

            if item_id not in item_history:
                item_history[item_id] = []
            elif user_id in item_history[item_id]:
                item_history[item_id].remove(user_id)
            item_history[item_id].append(user_id)

    if ARM_args.ii_method == "path_length":
        for item_from in range(item_num):
            item_from_list = []
            item_score = []
            min_score = 0.0
            user_item_from = item_history[item_from]

            if len(user_item_from) == 0:
                continue

            for item_to in range(item_num):

                if item_from == item_to:
                    continue

                user_from_and_to = list(set(user_item_from) & set(item_history[item_to]))
                if not user_from_and_to:
                    continue
                score_ii = 0.0
                max_user = 6

                if len(user_from_and_to) > max_user:
                    rand_index = random.sample(range(1, len(user_from_and_to)), max_user)
                    user_from_and_to = numpy.array(user_from_and_to)
                    user_from_and_to = user_from_and_to[rand_index]

                for cur_user in user_from_and_to:
                    cur_user_history = user_history[cur_user]
                    if item_to in cur_user_history:
                        from_index = cur_user_history.index(item_from)
                        to_index = from_index

                        for index in range(from_index + 1, len(cur_user_history)):
                            if cur_user_history[index] == item_to:
                                to_index = index

                        if to_index > from_index:
                            score_temp = round(1 / (to_index - from_index), 3)
                            score_ii += score_temp

                if score_ii > min_score or len(item_from_list) < ARM_args.i_top_k:

                    if len(item_from_list) < ARM_args.i_top_k:
                        item_from_list.append(item_to)
                        item_score.append(score_ii)
                        if len(item_from_list) == ARM_args.i_top_k - 1:
                            min_score = min(item_score)

                    else:
                        index_temp = item_score.index(min(item_score))
                        item_from_list[index_temp] = item_to
                        item_score[index_temp] = score_ii
                        min_score = min(item_from_list)
            for k in range(len(item_score)):
                ii_tmp[item_from, item_from_list[k] + user_num] = item_score[k]

    elif ARM_args.ii_method == "ARM":

        old_user_num = len(user_history)
        for item_from in range(item_num):

            item_from_num = len(set(item_history[item_from]))

            support_item_from = item_from_num / old_user_num

            if support_item_from <= support_threshold:
                continue

            confidences = []
            confidences_item_to = []
            min_score = 0.0
            for item_to in range(item_num):
                if item_from == item_to:
                    continue
                from_to_to_num = 0
                if not ARM_args.ARM_sequential:
                    from_to_to_num = len(set(item_history[item_from]) & set(item_history[item_to]))
                else:
                    for user in set(item_history[item_from]) & set(item_history[item_to]):
                        if user_history[user].index(item_from) < user_history[user].index(item_to):
                            from_to_to_num += 1
                # if from_to_to_num == 0:
                #     continue

                confidence_from_2_to = from_to_to_num / item_from_num

                if len(confidences) < ARM_args.i_top_k:
                    confidences.append(confidence_from_2_to)
                    confidences_item_to.append(item_to)
                    min_score = min(confidences)
                elif confidence_from_2_to > min_score:
                    index_temp = confidences.index(min_score)
                    confidences_item_to[index_temp] = item_to
                    confidences[index_temp] = confidence_from_2_to
                    min_score = min(confidences)
            for k in range(len(confidences)):
                ii_tmp[item_from, confidences_item_to[k] + user_num] = confidences[k]

    return ii_tmp
