import torch
import torch.nn as nn
from param import args
import numpy as np

class FFM(nn.Module):
    def __init__(self,fields,laten_dim):
        super(FFM, self).__init__()
        field_dim = fields.sum()+1
        self.offsets = np.array((0, *np.cumsum(fields)[:-1]), dtype=np.long)

        self.linear = torch.nn.Embedding(field_dim,1)
        self.bias = torch.nn.Parameter(torch.zeros(1,))

        self.num_fields = len(fields)
        self.embeddings = torch.nn.ModuleList([
            nn.Embedding(field_dim,laten_dim) for _ in range(self.num_fields)
        ])
        for embeddings in self.embeddings:
            nn.init.xavier_uniform_(embeddings.weight)

    def forward(self,x):
        tmp = x + x.new_tensor(self.offsets).unsqueeze(0)
        linear_part = torch.sum(self.linear(tmp), dim=1) + self.bias
        field_x = [embeddings(x) for embeddings in self.embeddings]
        ix = []
        for i in range(self.num_fields-1):
            for j in range(i+1,self.num_fields):
                ix.append(field_x[j][:,i]*field_x[i][:,j])
        ix = torch.stack(ix,dim=1)
        ffm_part = torch.sum(torch.sum(ix, dim=1), dim=1, keepdim=True)

        x = linear_part + ffm_part
        x = torch.sigmoid(x.squeeze(1))
        return x

