import torch
import torch as t
import torch.nn as nn
from torch.nn import functional as F
from ATT import ATT
from sklearn.metrics import roc_auc_score
import numpy as np
from aggregators import Aggregator
from aggregators import LabelAggregator



class PEKN(nn.Module):
    def __init__(self, args, n_user, n_entity, n_relation, adj_entity, adj_relation, interaction_table, offset):
        super(PEKN, self).__init__()
        self._parse_args(args, n_user, n_entity, n_relation, adj_entity, adj_relation)
        self.criterion = nn.BCEWithLogitsLoss()
        self.transform_matrix = nn.Linear(self.dim, self.dim, bias=False)
        self.user_emb_matrix = nn.Embedding(self.n_user, self.dim)
        self.entity_emb_matrix = nn.Embedding(self.n_entity, self.dim)
        self.relation_emb_matrix = nn.Embedding(self.n_relation, self.dim)
        self.relation_emb_matrix_user = nn.Embedding(self.n_relation, self.dim*self.dim)
        self.aggregator = Aggregator(self.batch_size, self.dim, self.n_neighbor, args.aggregator)
        self.interaction_table = interaction_table
        self.offset = offset
        # self.ATT1 = ATT()
        # self.ATT2 = ATT()
        nn.init.xavier_uniform_(self.transform_matrix.weight)
        nn.init.normal_(self.user_emb_matrix.weight, std=0.1)
        nn.init.normal_(self.entity_emb_matrix.weight, std=0.1)
        nn.init.normal_(self.relation_emb_matrix.weight, std=0.1)
        nn.init.normal_(self.relation_emb_matrix_user.weight, std=0.1)


    def _parse_args(self, args, n_user, n_entity, n_relation, adj_entity, adj_relation):
        self.n_user = n_user
        self.n_entity = n_entity
        self.n_relation = n_relation
        self.ls_weight = args.ls_weight
        self.n_iter = args.n_iter
        self.batch_size = args.batch_size
        self.n_neighbor = args.neighbor_sample_size
        self.dim = args.dim
        self.l2_weight = args.l2_weight
        self.lr = args.lr
        self.kge_weight = args.kge_weight
        self.adj_entity = adj_entity
        self.adj_relation = adj_relation
        self.aggregator = args.aggregator


    def forward(self,
                users: torch.LongTensor,
                items: torch.LongTensor,
                labels: torch.LongTensor,
                memories_h: list,
                memories_r: list,
                memories_t: list,):

        batch_size = items.size(0)
        if batch_size != self.batch_size:
            self.batch_size = batch_size

        user_embeddings = self.user_emb_matrix(users)
        item_embeddings = self.entity_emb_matrix(items)

        h_emb_list = []
        r_emb_list = []
        t_emb_list = []
        for i in range(self.n_iter):
            #[batch size, n_memory, dim]
            h_emb_list.append(self.entity_emb_matrix(memories_h[i]))
            # [batch size, n_memory, dim, dim]
            r_emb_list.append(
                self.relation_emb_matrix_user(memories_r[i]).view(-1, self.n_neighbor, self.dim, self.dim)
                )

            t_emb_list.append(self.entity_emb_matrix(memories_t[i]))

        # # # ripplenet
        # user_embeddings, item_embeddings = self._key_addressing(h_emb_list, r_emb_list, t_emb_list, item_embeddings, user_embeddings)

        # KGCN
        n_entities, n_relations = self.get_neighbors(items)
        item_embeddings, self.aggregators = self.aggregate(user_embeddings, item_embeddings, n_entities, n_relations)

        self._regularization(users, n_entities, n_relations, user_embeddings)

        # ripplenet
        user_embeddings, _ = self._key_addressing(h_emb_list, r_emb_list, t_emb_list, item_embeddings,
                                                                user_embeddings)



        scores = self.predict(item_embeddings, user_embeddings)


        return_dict = self._compute_loss(
            scores, labels, h_emb_list, t_emb_list, r_emb_list
        )
        return_dict['scores'] = scores

        return return_dict

    def aggregate(self, user_embeddings,  item_embeddings, entities, relations):
        '''
        Make item embeddings by aggregating neighbor vectors
        '''
        aggregators = []
        entity_vectors = [self.entity_emb_matrix(entity) for entity in entities]
        relation_vectors = [self.relation_emb_matrix(relation) for relation in relations]
        j = 0
        for i in range(self.n_iter):
            if i == self.n_iter - 1:
                act = torch.tanh
            else:
                act = torch.relu
            aggregators.append(self.aggregator)
            entity_vectors_next_iter = []
            for hop in range(self.n_iter - i):
                vector = self.aggregator(
                    batch_size=self.batch_size,
                    self_vectors=entity_vectors[hop],
                    neighbor_vectors=entity_vectors[hop + 1].view((self.batch_size, -1, self.n_neighbor, self.dim)),
                    neighbor_relations=relation_vectors[hop].view((self.batch_size, -1, self.n_neighbor, self.dim)),
                    user_embeddings=user_embeddings,
                    item_embeddings=item_embeddings,
                    act=act)
                entity_vectors_next_iter.append(vector)
            entity_vectors = entity_vectors_next_iter
        res = entity_vectors[0].view((self.batch_size, self.dim))


        # res = self.ATT1(res)

        return res, aggregators


    def predict(self, item_embeddings, cf_list):
        y = cf_list[-1]
        for i in range(self.n_iter-1):
            y += cf_list[i]
        # item_embeddings = t.squeeze(item_embeddings, dim=2)
        # y = self.ATT2(y)
        interaction = item_embeddings * y
        final_logit = ATT()(interaction)
        scores = final_logit.sum(dim=1)

        # scores = torch.sum(interaction, dim=1)

        scores = t.squeeze(scores)
        return t.sigmoid(scores)



    def get_neighbors(self, items):
        seeds = items.unsqueeze(dim=1)
        entities = [seeds]
        relations = []
        for h in range(self.n_iter):
            n_e = self.adj_entity[entities[h].cpu()]
            n_r = self.adj_relation[entities[h].cpu()]
            # print(f'n_e[h]{h}', n_e.shape)
            neighbor_entities = torch.LongTensor(n_e).view((self.batch_size, -1)).cuda()
            neighbor_relations = torch.LongTensor(n_r).view((self.batch_size, -1)).cuda()
            entities.append(neighbor_entities)
            relations.append(neighbor_relations)
        return entities, relations




    def _key_addressing(self, h_emb_list, r_emb_list, t_emb_list, item_embeddings, user_embeddings):
        cf_list = []
        for hop in range(self.n_iter):
            # [batch_size, n_memory, dim, 1]
            h_expanded = t.unsqueeze(h_emb_list[hop], dim=3)

            # [batch_size, n_memory, dim]
            Rh = t.squeeze(t.matmul(r_emb_list[hop], h_expanded), dim=3)

            # [batch_size, dim, 1]
            v = t.unsqueeze(item_embeddings, dim=2)
            u = t.unsqueeze(user_embeddings, dim=2)

            # [batch_size, n_memory]
            probs_v = t.squeeze(t.matmul(Rh, v), dim=2)
            probs_u = t.squeeze(t.matmul(Rh, u), dim=2)

            # [batch_size, n_memory]
            probs_normalized_v = F.softmax(probs_v, dim=1)
            probs_normalized_u = F.softmax(probs_u, dim=1)

            # [batch_size, n_memory, 1]
            probs_expanded_v = t.unsqueeze(probs_normalized_v, dim=2)
            probs_expanded_u = t.unsqueeze(probs_normalized_u, dim=2)

            # [batch_size, dim]
            # o_cf = (t_emb_list[hop] * probs_expanded_v * probs_expanded_u).sum(dim=1)
            o_cf = t.sum(t_emb_list[hop] * probs_expanded_v * probs_expanded_u, dim=1)
            # item_embeddings = self.transform_matrix(o_cf + item_embeddings)
            item_embeddings = o_cf + item_embeddings
            cf_list.append(o_cf)

        return cf_list, item_embeddings

    def _compute_loss(self, scores, labels, h_emb_list, t_emb_list, r_emb_list):
        # print(scores.shape)
        # print(labels.float().shape)
        base_loss = self.criterion(scores, labels.float())

        kge_loss = 0
        for hop in range(self.n_iter):
            # # [batch size, n_memory, 1, dim]
            # h_expanded = t.unsqueeze(h_emb_list[hop], dim=2)
            # # [batch size, n_memory, dim, 1]
            # t_expanded = t.unsqueeze(t_emb_list[hop], dim=3)
            # # [batch size, n_memory, dim, dim]
            # hRt = torch.squeeze(
            #     t.matmul(t.matmul(h_expanded, r_emb_list[hop]), t_expanded)
            # )
            # kge_loss += torch.mean(torch.sigmoid(hRt))
            h_entity_emb = self.entity_emb_matrix(h_emb_list[hop].to(t.int64))
            t_entity_emb = self.entity_emb_matrix(t_emb_list[hop].to(t.int64))
            r_relation_emb = self.relation_emb_matrix(r_emb_list[hop].to(t.int64))
            h_entity_emb = h_entity_emb.unsqueeze(dim=-1)
            t_entity_emb = t_entity_emb.unsqueeze(dim=-1)
            kge_scores = torch.sum(t.pow(h_entity_emb + r_relation_emb - t_entity_emb, 2), dim=1)
            kge_loss += t.mean(t.sigmoid(kge_scores))

        kge_loss = -self.kge_weight * kge_loss



        # l2_loss = 0
        # for hop in range(self.n_iter):
        #     l2_loss += (h_emb_list[hop] * h_emb_list[hop]).sum()
        #     l2_loss += (t_emb_list[hop] * t_emb_list[hop]).sum()
        #     l2_loss += (r_emb_list[hop] * r_emb_list[hop]).sum()
        # l2_loss = self.l2_weight * l2_loss

        # LS loss
        ls_loss = self.criterion(self.predicted_labels, labels.float())
        ls_loss = self.ls_weight * ls_loss

        loss = base_loss + kge_loss + ls_loss

        loss_dict = dict()
        loss_dict['loss'] = loss
        loss_dict['base_loss'] = base_loss
        loss_dict['kge_loss'] = kge_loss
        loss_dict['ls_loss'] = ls_loss
        # loss_dict['l2_loss'] = l2_loss
        return loss_dict


    def evaluate(self, users, items, labels, memories_h, memories_r, memories_t):
        return_dict = self.forward(users, items, labels, memories_h, memories_r, memories_t)
        scores = return_dict["scores"].detach().cpu().numpy()
        labels = labels.cpu().numpy()
        auc = roc_auc_score(y_true=labels, y_score=scores)
        predictions = [1 if i >= 0.5 else 0 for i in scores]
        acc = np.mean(np.equal(predictions, labels))
        return auc, acc
    def _regularization(self, user, entities, relations, user_embeddings):

        # calculate initial labels; calculate updating masks for label propagation
        entity_labels = []
        reset_masks = []  # True means the label of this item is reset to initial value during label propagation
        holdout_item_for_user = None

        for entities_per_iter in entities:
            # [batch_size, 1]
            users = t.unsqueeze(user, 1)
            # [batch_size, n_neighbor^i]
            user_entity_concat = users * self.offset + entities_per_iter

            # the first one in entities is the items to be held out
            if holdout_item_for_user is None:
                holdout_item_for_user = user_entity_concat

            # [batch_size, n_neighbor^i]
            initial_label = self.interaction_table(user_entity_concat)
            # holdout_mask = tf.cast(holdout_item_for_user - user_entity_concat, tf.bool)  # False if the item is held out
            holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
            # reset_mask = tf.cast(initial_label - tf.constant(0.5), tf.bool)  # True if the entity is a labeled item
            reset_mask = (initial_label - 0.5).bool()
            # reset_mask = tf.logical_and(reset_mask, holdout_mask)  # remove held-out items
            reset_mask = reset_mask & holdout_mask
            # initial_label = tf.cast(holdout_mask, tf.float32) * initial_label + tf.cast(
            #     tf.logical_not(holdout_mask), tf.float32) * tf.constant(0.5)  # label initialization

            initial_label = holdout_mask.float() * initial_label + (~holdout_mask).float() * 0.5

            reset_masks.append(reset_mask)
            entity_labels.append(initial_label)
        reset_masks = reset_masks[:-1]  # we do not need the reset_mask for the last iteration

        # label propagation
        relation_vectors = [self.relation_emb_matrix(i) for i in relations]
        aggregator = LabelAggregator(self.batch_size, self.dim, self.n_neighbor)
        for i in range(self.n_iter):
            entity_labels_next_iter = []
            for hop in range(self.n_iter - i):
                vector = aggregator(self_labels=entity_labels[hop],
                                    neighbor_labels=t.reshape(
                                        entity_labels[hop + 1], [self.batch_size, -1, self.n_neighbor]),
                                    neighbor_relations=t.reshape(
                                        relation_vectors[hop], [self.batch_size, -1, self.n_neighbor, self.dim]),
                                    user_embeddings=user_embeddings,
                                    masks=reset_masks[hop])
                entity_labels_next_iter.append(vector)
            entity_labels = entity_labels_next_iter
        self.predicted_labels = t.squeeze(entity_labels[0], dim=-1)






    # def get_scores(self, model, feed_dict):
    #     item_indices = feed_dict['item_indices']
    #     with torch.no_grad():
    #         return_dict = model(feed_dict)
    #         scores_normalized = return_dict['scores']
    #     return item_indices, scores_normalized.numpy()







