import torch
import torch.nn as nn
import numpy as np

class DeepFM(nn.Module):
    def __init__(self,fields,laten_dim):
        super(DeepFM, self).__init__()
        self.offsets = np.array((0, *np.cumsum(fields)[:-1]), dtype=np.long)
        feature_dim = sum(fields)+1
        # FM中的线性部分
        self.linear = torch.nn.Embedding(feature_dim, 1)
        self.bias = torch.nn.Parameter(torch.zeros((1,)))

        self.embedding = torch.nn.Embedding(feature_dim, laten_dim)
        nn.init.xavier_uniform_(self.embedding.weight)
        nn.init.xavier_uniform_(self.linear.weight)

        #deep
        input_dim = len(fields)*laten_dim
        layers = []
        output_dim = input_dim//2
        layers.append(nn.Linear(input_dim,output_dim))
        layers.append(nn.BatchNorm1d(output_dim))
        layers.append(nn.ReLU())
        layers.append(nn.Dropout())
        input_dim = output_dim
        layers.append(nn.Linear(input_dim,1))
        for layer in layers:
            if isinstance(layer,nn.Linear):
                nn.init.xavier_uniform_(layer.weight)
                nn.init.constant_(layer.bias,0)
        self.dnn = nn.Sequential(*layers)

    def forward(self,x):
        tmp = x + x.new_tensor(self.offsets).unsqueeze(0)
        embeddings = self.embedding(tmp)
        # FM
        ## linear part
        linear_part = torch.sum(self.linear(tmp), dim=1) + self.bias
        ## inner part
        square_of_sum = torch.sum(embeddings, dim=1) ** 2
        sum_of_square = torch.sum(embeddings ** 2, dim=1)
        inner_part = 0.5 * torch.sum(square_of_sum - sum_of_square, dim=1, keepdim=True)

        fm_part = linear_part + inner_part

        # DNN
        dnn_part = self.dnn(embeddings.view(len(embeddings),-1))

        # 输出part
        x = fm_part + dnn_part
        x = torch.sigmoid(x.squeeze(1))
        return x
