import torch
import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self, Grapher, Sampler, out_dim):
        super(Model, self).__init__()
        self.out_dim = out_dim
        #图item节点个数，item——embedding的输入维度。
        self.center_node_number = len(Sampler.all_node)
        self.contex_node_number = self.center_node_number
        #边缘信息的输入维度，以列表的形式存储。
        self.side_in_dim = [len(Grapher.side_information_dict[side_name]) for side_name in Grapher.side_information_dict.keys()]
        self.center_item_emb = nn.Embedding(self.center_node_number, out_dim)
        self.contex_item_emb = nn.Embedding(self.contex_node_number, out_dim)
        self.all_side_emb = nn.ModuleList([nn.Embedding(in_dim, out_dim) for in_dim in self.side_in_dim])
        self.A = nn.Embedding(self.center_node_number, 1+len(self.all_side_emb))
    
    def forward(self, center_items, contex_items, side_information):
        """
        center_items: torch.tensor类型, 存储了中心items.
        contex_items: torch.tensor类型, 存储了内容items, 可以是正样例或者负样例.
        side_informaiton: 一个列表,内部存储了多个side_informations对应的多个tensor.
        """
        batch_len = center_items.shape[0]
        weight_a = torch.exp(self.A(center_items))
        weight_a = weight_a/torch.sum(weight_a, dim = -1, keepdim = True)
        weight_a = weight_a.unsqueeze(1) #[batch, 1 , all_side_len]
        embedding_list = [self.center_item_emb(center_items)]
        for i in range(len(side_information)):
            embedding_list.append(self.all_side_emb[i](side_information[i]))
        #下面代码请仔细品，tensor维度的扩展都是为了实现权重的相乘。
        new_items_emb = torch.cat(embedding_list, dim = -1).unsqueeze(1).reshape(batch_len,-1,self.out_dim).contiguous()
        weight_item_emb = torch.bmm(weight_a, new_items_emb).reshape(batch_len,self.out_dim).contiguous()
        contex_items_emb = self.contex_item_emb(contex_items)

        return weight_item_emb*contex_items_emb
    
    def loss(self, pre, labels):
        pre = torch.sigmoid(torch.sum(pre, dim = 1))
        pre = torch.clamp(pre, min=1e-7, max=1 - 1e-7)
        l = torch.mean(- (labels * torch.log(pre) + (1 - labels) * torch.log(1 - pre)))
        return l