import torch
import torch.nn.functional as F
import torch.nn as nn


class Aggregator(nn.Module):
    '''
    Aggregator class
    Mode in ['sum', 'concat', 'neighbor']
    '''

    def __init__(self, batch_size, dim, n_neighbor, aggregator):
        super(Aggregator, self).__init__()
        self.batch_size = batch_size
        self.dim = dim
        self.n_neighbor = n_neighbor
        if aggregator == 'concat':
            self.weights = torch.nn.Linear(2 * dim, dim, bias=True).cuda()
            nn.init.xavier_normal_(self.weights.weight)
        else:
            self.weights = torch.nn.Linear(dim, dim, bias=True).cuda()
            nn.init.xavier_normal_(self.weights.weight)
        self.aggregator = aggregator

    def forward(self, batch_size, self_vectors, neighbor_vectors, neighbor_relations, user_embeddings, item_embeddings, act):
        if type(user_embeddings) == list:
            user_embeddings = torch.stack(user_embeddings)
            y = user_embeddings[-1]
            if user_embeddings.shape[0] == 1:
                user_embeddings = user_embeddings
            else:
                for i in range(user_embeddings.shape[0]-1):
                    y += user_embeddings[i]
                user_embeddings = y
        # 调整形状
        user_embeddings = user_embeddings.view(batch_size, 1, 1, self.dim)
        neighbors_agg = self._mix_neighbor_vectors(batch_size, neighbor_vectors, neighbor_relations, user_embeddings, self_vectors)
        if self.aggregator == 'sum':
            output = (self_vectors + neighbors_agg).view((-1, self.dim))  # 加到本身的节点上
        elif self.aggregator == 'concat':
            output = torch.cat((self_vectors, neighbors_agg), dim=-1)
            output = output.view((-1, 2 * self.dim))

        else:
            output = neighbors_agg.view((-1, self.dim))

        # output = self.weights(output)
        # return act(output.view((batch_size, -1, self.dim)))

        return output.view(batch_size, -1, self.dim)

    def pruner(self, user_relation_score, keep_ration=0.5):
        values, indices = torch.topk(user_relation_score, k=int(user_relation_score.shape[-1] * keep_ration),
                                     dim=-1, sorted=True)
        threshold = values[:, :, -1]  # 获取每个元素的阈值
        keep_masks = (user_relation_score > threshold.unsqueeze(-1)).float()  # 使用向量化操作进行计算
        return keep_masks

    def _mix_neighbor_vectors(self, batch_size, neighbor_vectors, neighbor_relations, user_embeddings, self_vectors):
        '''
        This aims to aggregate neighbor vectors
        # 将邻居节点 按照关系 求比例 乘以 邻居节点
        '''
        # [batch_size, 1, dim] -> [batch_size, 1, 1, dim]
        user_embeddings = user_embeddings.view((batch_size, 1, 1, self.dim))
        # [batch_size, -1, n_neighbor, dim] -> [batch_size, -1, n_neighbor]
        user_relation_scores = torch.mean(user_embeddings * neighbor_relations, dim=-1)
        user_relation_scores_normalized = F.softmax(user_relation_scores, dim=-1)

        if user_relation_scores_normalized.shape[1] != 1 or self.n_neighbor > 32:
            keeps_masks = self.pruner(user_relation_scores_normalized)
            user_relation_scores_normalized = user_relation_scores_normalized * keeps_masks
        # [batch_size, -1, n_neighbor] -> [batch_size, -1, n_neighbor, 1]
        user_relation_scores_normalized = user_relation_scores_normalized.unsqueeze(dim=-1)
        # [batch_size, -1, n_neighbor, 1] * [batch_size, -1, n_neighbor, dim] -> [batch_size, -1, dim]
        neighbors_aggregated = torch.mean(user_relation_scores_normalized * neighbor_vectors, dim=2)
        return neighbors_aggregated


class LabelAggregator(nn.Module):
    def __init__(self, batch_size, dim, n_neighbor):
        self.batch_size = batch_size
        self.dim = dim
        self.n_neighbor = n_neighbor
        super(LabelAggregator, self).__init__()

    def forward(self, self_labels, neighbor_labels, neighbor_relations, user_embeddings, masks):
        if type(user_embeddings) == list:
            user_embeddings = torch.stack(user_embeddings)
            # user_embeddings = user_embeddings.view(self.batch_size, 1, 1, self.dim)
            y = user_embeddings[-1]
            if user_embeddings.shape[0] == 1:
                user_embeddings = user_embeddings
            else:
                for i in range(user_embeddings.shape[0]-1):
                    y += user_embeddings[i]
                user_embeddings = y
        # 调整形状
        user_embeddings = user_embeddings.view(self.batch_size, 1, 1, self.dim)
        # [batch_size, 1, 1, dim]
        # user_embeddings = user_embeddings.view(self.batch_size, 1, 1, self.dim)

        # [batch_size, -1, n_neighbor]
        user_relation_scores = torch.mean(user_embeddings * neighbor_relations, dim=-1)
        user_relation_scores_normalized = F.softmax(user_relation_scores, dim=-1)

        # [batch_size, -1]
        neighbors_aggregated = torch.mean(user_relation_scores_normalized * neighbor_labels, dim=-1)
        output = masks.float() * self_labels + (~masks).float() * neighbors_aggregated

        return output