import numpy as np
import torch
from torch.nn.functional import one_hot
import argparse
import torch.nn.functional as F
from utils import *

class PointWiseFeedForward(torch.nn.Module):
    def __init__(self, hidden_units, dropout_rate):

        super(PointWiseFeedForward, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        self.Linear2 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        # outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
        outputs = self.relu(self.dropout1(self.Linear1(inputs)))
        outputs += inputs
        return outputs

class PointWiseFeedForward2(torch.nn.Module):
    def __init__(self, hidden_units1, hidden_units2, dropout_rate):

        super(PointWiseFeedForward2, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units1, 1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.Tanh = torch.nn.Tanh()
        self.ReLU = torch.nn.ReLU()

        self.Linear2 = torch.nn.Linear(hidden_units2, 1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        outputs = self.dropout1(self.Linear1(inputs)).squeeze_(2)
        outputs = self.ReLU(outputs)
        outputs = self.Linear2(outputs)
        return outputs
# pls use the following self-made multihead attention layer
# in case your pytorch version is below 1.16 or for other reasons
# https://github.com/pmixer/TiSASRec.pytorch/blob/master/model.py

class TimeAwareMultiHeadAttention(torch.nn.Module):
    # required homebrewed mha layer for Ti/SASRec experiments
    def __init__(self, hidden_size, head_num, dropout_rate, dev, maxlen, Biasmode):
        super(TimeAwareMultiHeadAttention, self).__init__()
        self.Q_w = torch.nn.Linear(hidden_size, hidden_size)
        self.K_w = torch.nn.Linear(hidden_size, hidden_size)
        self.V_w = torch.nn.Linear(hidden_size, hidden_size)

        self.dropout = torch.nn.Dropout(p=dropout_rate)
        self.softmax = torch.nn.Softmax(dim=-1)

        self.hidden_size = hidden_size
        self.head_num = head_num
        self.head_size = hidden_size // head_num
        self.dropout_rate = dropout_rate
        self.dev = dev
        self.maxlen = maxlen
        self.bias = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(maxlen, maxlen).to(self.dev), mean=0, std=1))
        self.Biasmode = Biasmode

        self.Wp = torch.nn.Linear(hidden_size, hidden_size)
        self.Ud = torch.nn.Linear(hidden_size, 1)
        self.Up = torch.nn.Linear(hidden_size, 1)

    def forward(self, queries, keys):
        Q, K, V = self.Q_w(queries), self.K_w(keys), self.V_w(keys)

        # head dim * batch dim for parallelization (h*N, T, C/h)
        Q_ = torch.cat(torch.split(Q, self.head_size, dim=2), dim=0)   # [batchsize. maxlen, hidden_size]
        K_ = torch.cat(torch.split(K, self.head_size, dim=2), dim=0)
        V_ = torch.cat(torch.split(V, self.head_size, dim=2), dim=0)


        # batched channel wise matmul to gen attention weights
        attn_weights = Q_.matmul(torch.transpose(K_, 1, 2))

        # seq length adaptive scaling
        attn_weights = attn_weights / (K_.shape[-1] ** 0.5)
        attn_weights += self.bias     #bias

        # calculate bias
        if self.Biasmode != 'None':
            Z = self.Ud(torch.tanh(self.Wp(Q_)).to(self.dev))
            P = self.Up(torch.tanh(self.Wp(Q_)).to(self.dev))
            D = self.maxlen * torch.sigmoid(Z)
            D = D.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]   #D是方差矩阵，同一行元素相同，因为同一个行为i对其他行为的影响服从同一个分布
            P = self.maxlen * torch.sigmoid(P)
            P = P.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]   #P是影响力中心矩阵，同一行元素相同，都表示行为i产生的最大影响力的坐标
            G = torch.full_like(attn_weights, 0)  # [batchsize, maxlen, maxlen]    #初始化bias矩阵，即G
            I = torch.tensor(range(self.maxlen)).to(self.dev)
            I = I.unsqueeze(-1)
            I = I.repeat(1, self.maxlen)
            I = I.repeat(G.shape[0], 1, 1)    #行为i的矩阵，同一行元素相同，且第i行元素值为i
            J = torch.tensor(range(self.maxlen)).to(self.dev)
            J = J.unsqueeze(0)
            J = J.repeat(self.maxlen, 1)
            J = J.repeat(G.shape[0], 1, 1)  # 行为j的矩阵，同一列元素相同，且第j行元素值为j

            if self.Biasmode == 'lognormal':
                #新的对数正态分布的想法
                #G[i, j]表示行为i对行为j产生的影响，即：第i行都是第i个行为对别的行为产生的影响


                interval = J - I
                # bias_mask_num = torch.eye(self.maxlen)
                # true_matrix = torch.Tensor([[True] * self.maxlen] * self.maxlen)

                # bias_mask = torch.where(bias_mask_num>0, true_matrix, false_matrix)
                # bias_mask = bias_mask.repeat(G.shape[0], 1, 1)
                paddingBias = torch.ones(attn_weights.shape) * (1)
                interval = torch.FloatTensor(interval.cpu().numpy())
                #如果间隔数小于0，则把它设置成1，以防对数函数报错
                interval = torch.where(interval>=1, interval, paddingBias)


                interval = interval.numpy()
                logInterval = np.log(interval)
                interval = torch.tensor(interval).to(self.dev)
                logInterval = torch.Tensor(logInterval).to(self.dev)
                # center = torch.Tensor(np.log((I + P).detach().numpy()))
                # center = torch.Tensor(np.log(P.detach().numpy()))


                #bias采用对数正态分布
                G = -(logInterval - P) ** 2 / (2 * (D / 2) ** 2)

                #把G再转置，以适应attn_weights的含义
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'normal':
                #bias采用正态分布
                G = -(J - P) ** 2 / (2 * (D / 2) ** 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'abs':

                #bias采用绝对值处理方法
                G = -abs(J - P) / (D / 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'learn':
                G = self.bias

            attn_weights += G

        attn_weights = self.softmax(attn_weights) # code as below invalids pytorch backward rules
        # https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/4
        attn_weights = self.dropout(attn_weights)


        #attn_weights[i,j]: 第i个行为与第j个行为(它之前的其他行为)的相关性。  attn_weights满足每一行的行和为1。

        outputs = attn_weights.matmul(V_)

        # (num_head * N, T, C / num_head) -> (N, T, C)
        outputs = torch.cat(torch.split(outputs, Q.shape[0], dim=0), dim=2) # div batch_size

        return outputs # # 3

# 时间间隔感知自我注意的序列推荐
class SASRec(torch.nn.Module):
    def __init__(self, args):
        super(SASRec, self).__init__()
        self.args = args
        self.dev = args.device
        # TODO: loss += args.l2_emb for regularizing embedding vectors during training
        # https://stackoverflow.com/questions/42704283/adding-l1-l2-regularization-in-pytorch
        # cs-trainning1数据集，其中Embedding参数需要对应每一列的类别数+1
        # self.item_emb0 = torch.nn.Embedding(7, args.hidden_units, padding_idx=0)
        # self.item_emb1 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb2 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb3 = torch.nn.Embedding(7, args.hidden_units, padding_idx=0)
        # self.item_emb4 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb5 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb6 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb7 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb8 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb9 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)

        # self.item_emb0 = torch.nn.Embedding(args.emb_list[0], args.hidden_units, padding_idx=0)
        # self.item_emb1 = torch.nn.Embedding(args.emb_list[1], args.hidden_units, padding_idx=0)
        # self.item_emb2 = torch.nn.Embedding(args.emb_list[2], args.hidden_units, padding_idx=0)
        # self.item_emb3 = torch.nn.Embedding(args.emb_list[3], args.hidden_units, padding_idx=0)
        # self.item_emb4 = torch.nn.Embedding(args.emb_list[4], args.hidden_units, padding_idx=0)
        # self.item_emb5 = torch.nn.Embedding(args.emb_list[5], args.hidden_units, padding_idx=0)
        # self.item_emb6 = torch.nn.Embedding(args.emb_list[6], args.hidden_units, padding_idx=0)
        # self.item_emb7 = torch.nn.Embedding(args.emb_list[7], args.hidden_units, padding_idx=0)
        # self.item_emb8 = torch.nn.Embedding(args.emb_list[8], args.hidden_units, padding_idx=0)
        # self.item_emb9 = torch.nn.Embedding(args.emb_list[9], args.hidden_units, padding_idx=0)
        self.item_emb = torch.nn.ModuleList()
        self.Linear = torch.nn.Linear(args.maxlen, args.hidden_units)
        self.Sigmoid = torch.nn.Sigmoid()
        self.Tanh = torch.nn.Tanh()

        self.item_emb_FM = torch.nn.Linear(args.maxlen, args.hidden_units)

        self.Linear1 = torch.nn.Linear(args.hidden_units, 1)
        self.Linear2 = torch.nn.Linear(args.maxlen, 1)
        self.dropoutFM = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout1 = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout2 = torch.nn.Dropout(p=args.dropout_rate)
        self.weight = torch.nn.Parameter(torch.randn(args.maxlen, 1))  #生成兴趣向量的各部分行为向量的权重

        self.relu = torch.nn.ReLU()

        self.emb_dropout = torch.nn.ModuleList()


        self.FM = FM(args.latent_units, args.maxlen_FM)
        self.AFM = AFM(args.maxlen, args.hidden_units_FM, t=args.hidden_units_FM * (args.hidden_units_FM) // 2,
                       k=args.latent_units)
        self.MFM = MFM(args.latent_units, args.maxlen)
        # self.HoAFM = HoAFM(args.hidden_units, args.hidden_units_FM, args.latent_units, args.layernum)
        self.TransFM = TransFM(args.latent_units, args.maxlen)
        self.ATransFM = ATransFM(args.latent_units, args.maxlen, args.hidden_units_FM)
        self.FMmode = args.FMmode

        self.attention_layernorms = torch.nn.ModuleList() # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()
        self.forward_layers2 = PointWiseFeedForward2(args.hidden_units,args.maxlen, args.dropout_rate)

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
        # self.alpha = torch.nn.Parameter(torch.randn(1))
        self.alpha = torch.nn.Parameter(torch.tensor([0.2]))# 模型配比权重生成
        print('生成模型权重{}'.format(torch.sigmoid(self.alpha))) # 判断模型自动配比是否工作正常
        # embedding层
        for i in args.emb_list:
            item_emb = torch.nn.Embedding(i, args.hidden_units, padding_idx=0) # 词向量层
            emb_dropout = torch.nn.Dropout(p=args.dropout_rate)
            self.emb_dropout.append(emb_dropout)
            self.item_emb.append(item_emb)

        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer = TimeAwareMultiHeadAttention(args.hidden_units,
                                                            args.num_heads,
                                                            args.dropout_rate,
                                                            args.device,
                                                            args.maxlen,
                                                            args.Biasmode)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)


    def log2feats(self, log_seqs):
        # seqs0 = self.item_emb0(torch.LongTensor(log_seqs[:,0]).unsqueeze_(1).to(self.dev))
        # seqs1 = self.item_emb1(torch.LongTensor(log_seqs[:,1]).unsqueeze_(1).to(self.dev))
        # seqs2 = self.item_emb2(torch.LongTensor(log_seqs[:,2]).unsqueeze_(1).to(self.dev))
        # seqs3 = self.item_emb3(torch.LongTensor(log_seqs[:,3]).unsqueeze_(1).to(self.dev))
        # seqs4 = self.item_emb4(torch.LongTensor(log_seqs[:,4]).unsqueeze_(1).to(self.dev))
        # seqs5 = self.item_emb5(torch.LongTensor(log_seqs[:,5]).unsqueeze_(1).to(self.dev))
        # seqs6 = self.item_emb6(torch.LongTensor(log_seqs[:,6]).unsqueeze_(1).to(self.dev))
        # seqs7 = self.item_emb7(torch.LongTensor(log_seqs[:,7]).unsqueeze_(1).to(self.dev))
        # seqs8 = self.item_emb8(torch.LongTensor(log_seqs[:,8]).unsqueeze_(1).to(self.dev))
        # seqs9 = self.item_emb9(torch.LongTensor(log_seqs[:,9]).unsqueeze_(1).to(self.dev))
        # print(type(log_seqs))
        # print(log_seqs.shape[1])
        global seqs
        for i in range(log_seqs.shape[1]):
            if i == 0:
                seqs = self.item_emb[0](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
            else:
                _seqs = self.item_emb[i](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
                _seqs = self.emb_dropout[i](_seqs)

                seqs = torch.cat((seqs,_seqs),1)
        # print(seqs.shape) # 检测词向量编码器是否正常
        for i in range(log_seqs.shape[1]):
            if i == 0:
                seqs_FM = torch.nn.functional.one_hot(torch.LongTensor(log_seqs[:,i]).to(self.dev),num_classes= self.args.emb_list[i])
            else:
                _seqs = torch.nn.functional.one_hot(torch.LongTensor(log_seqs[:,i]).to(self.dev),num_classes= self.args.emb_list[i])
                seqs_FM = torch.cat((seqs_FM,_seqs),1)
        # print(seqs_FM.shape) # 检测one-hot编码器是否正常

        # print(type(seqs_FM)) # tensor
        # seqs_FM = torch.FloatTensor(log_seqs).to(self.dev) # 原模型的embedding-不做处理直接转换为tensor
        # for i in range(log_seqs.shape[1]):
        #     _seqs = torch.nn.functional.one_hot(torch.LongTensor(log_seqs[:,i]).to(self.dev),num_classes=self.args)
        #     seqs = torch.cat((seqs,_seqs),1)

        # seqs = torch.cat((seqs0, seqs1,seqs2,seqs3,seqs4,seqs5,seqs6,seqs7,seqs8,seqs9),1)
        # seqs *= self.item_emb.embedding_dim ** 0.5

        # seqs = self.Linear(torch.FloatTensor(log_seqs).unsqueeze(1).to(self.dev))
        # seqs = torch.FloatTensor(log_seqs).unsqueeze(1).to(self.dev)
        for i in range(len(self.attention_layers)):
            Q = self.attention_layernorms[i](seqs)
            K = seqs
            # seqs = torch.transpose(seqs, 1, 2)
            mha_outputs = self.attention_layers[i](Q, K)
                                            # need_weights=False) this arg do not work?
            # mha_outputs = torch.transpose(mha_outputs, 1, 2)

            seqs = Q + mha_outputs

            seqs = self.forward_layernorms[i](seqs)
            if i == len(self.attention_layers)-1:
                seqs = self.forward_layers2(seqs)
            else:
                seqs = self.forward_layers[i](seqs)
                # seqs = self.last_layernorm(seqs)
        # log_seqs = log_seqs
        return seqs, seqs_FM

    def forward(self, log_seqs): # for training
        # print(type(log_seqs)) # ndarray
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet
        # log_feats-seqs  sum_seqs-seqs_FM
        # print(type(sum_seqs)) # Tensor
        from sklearn import preprocessing
        import pandas as pd
        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = se lf.relu(sum_seqs)
            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'MFM':
                log_feats2 = self.MFM(sum_seqs)
            elif self.FMmode == 'HoAFM':
                log_feats2 = self.HoAFM(sum_seqs)
            elif self.FMmode == 'TransFM':
                log_feats2 = self.TransFM(sum_seqs)
            elif self.FMmode == 'ATransFM':
                log_feats2 = self.ATransFM(sum_seqs)

            alpha = torch.sigmoid(self.alpha)
            log_feats = alpha * log_feats + (1 - alpha) * log_feats2 # Attention层与FM层的结果加权加总
            # log_feats = 0.5 * log_feats + 0.5 * log_feats2

        return log_feats

    def predict(self, log_seqs): # for inference # 预测值产生
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet

        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = self.relu(sum_seqs)

            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'MFM':
                log_feats2 = self.MFM(sum_seqs)
            elif self.FMmode == 'HoAFM':
                log_feats2 = self.HoAFM(sum_seqs)
            elif self.FMmode == 'TransFM':
                log_feats2 = self.TransFM(sum_seqs)
            elif self.FMmode == 'ATransFM':
                log_feats2 = self.ATransFM(sum_seqs)
            alpha = torch.sigmoid(self.alpha)
            print('最终attention模型权重{}'.format(alpha))# 判断模型自动配比是否工作正常
            log_feats = alpha * log_feats + (1 - alpha) * log_feats2 # Attention层与FM层的结果加权加总
            # log_feats = 0.5 * log_feats + 0.5 * log_feats2

        return self.Sigmoid(log_feats)

class FM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(FM, self).__init__()
        self.k = latent_size
        self.w0 = torch.nn.Parameter(torch.randn(1))
        # self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        # self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k))
        self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k))
    def forward(self, input): # input-X
        # print(type(input)) # Tensor
        input=input.float()

        linear_part = input.matmul(self.w) + self.w0
        # 矩阵相乘 (batch*p) * (p*k)
        inter_part1 = torch.pow(input.matmul(self.v), 1)  # shape:(batchsize, self.k)
        # 矩阵相乘 (batch*p)^2 * (p*k)^2
        inter_part2 = torch.pow(input, 1).matmul(torch.pow(self.v, 1))  # shape:(batchsize, self.k)
        inter_part = 0.5 * torch.sum(inter_part1 - inter_part2, dim=-1, keepdim=True)  # shape:(batchsize, 1)

        output = linear_part + inter_part # out_size = (batch, 1)
        return output

class Interaction_layer(torch.nn.Module):
    '''
    #input shape:[batchsize, feature, k]
    #output shape:[batchsize, feature(feature-1)/2, k]
    '''

    def __init__(self):
        super(Interaction_layer, self).__init__()

    def forward(self, inputs):
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))

        elements_wise_product_list = []
        for i in range(inputs.shape[1]):
            for j in range(i + 1, inputs.shape[1]):
                elements_wise_product_list.append(torch.mul(inputs[:, i], inputs[:, j]))

        elements_wise_product_tensor = torch.tensor([item.cpu().detach().numpy() for item in elements_wise_product_list])
        elements_wise_product = torch.transpose(elements_wise_product_tensor, 0, 1)
        return elements_wise_product

class Attention_layer(torch.nn.Module):
    '''
        inputs:二阶交叉项矩阵，维度为[None, t, k]，t是feature*(feature-1)/2， k是隐向量维度
        output:
    '''

    def __init__(self, interfield_len, k):
        super(Attention_layer, self).__init__()
        self.attention_w = torch.nn.Linear(k, interfield_len)
        self.attention_h = torch.nn.Linear(interfield_len, 1)
        self.relu = torch.nn.ReLU()

    def forward(self, inputs):  # [None, t, k]
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))
        inputs = inputs.to("cpu")

        x = self.attention_w(inputs)  # [None, t, t]
        x = self.relu(x)
        x = self.attention_h(x)  # [None, t, 1]
        a_score = F.softmax(x, dim=-2)
        a_score = torch.transpose(a_score, 1, 2)  # [None, 1, t]
        output = (a_score.matmul(inputs)).reshape(-1, inputs.shape[2])  # [None, k]
        return output

class AFM(torch.nn.Module):
    def __init__(self, hidden_units, hidden_units_FM, t, k):
        super(AFM, self).__init__()
        self.linear1 = torch.nn.Linear(1, k)
        self.linear2 = torch.nn.Linear(k, k)
        self.interaction_layer = Interaction_layer()
        self.attention_layer = Attention_layer(t, k)
        self.output_layer = torch.nn.Linear(k, 1)

    def forward(self, inputs):  # [batchsize * maxlen, feature]
        inputs = inputs.unsqueeze(-1)  # [batchsize * maxlen, feature, 1]   这相当于未处理的FM模型输入的原始数据
        embed = self.linear1(inputs)  # [batchsize * maxlen, feature,  hidden_units_V]   生成的特征隐向量矩阵
        embed = self.linear2(embed)  # [batchsize * maxlen, feature,  hidden_units_V]    对隐向量再加入一层线性层，隐向量矩阵维度不变

        embed = self.interaction_layer(embed)  # [batchsize * maxlen, t, hidden_units_V], t = feature*(feature-1)/2   经过特征交互层，得到二阶特征交叉矩阵

        x = self.attention_layer(embed)  # [batchsize * maxlen, hidden_units_V]
        output = (self.output_layer(x))  # [batchsize * maxlen, 1]
        return output

class MFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(MFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.w0 = torch.nn.Parameter(torch.randn(1))
        self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k))
        self.a = torch.nn.Parameter(torch.randn(hidden_units, hidden_units))

    def forward(self, input):
        linear_part = input.matmul(self.w) + self.w0
        inter_part = 0

        for i in range(self.hidden_units):
            for j in range(i + 1, self.hidden_units):
                x = torch.mul(self.v[i, :], self.v[j, :])
                x = torch.sum(x)
                x = x * self.a[i, j] * torch.mul(input[:, i], input[:, j])
                inter_part += x
        inter_part = inter_part.unsqueeze(-1)

        output = linear_part + inter_part
        return output

class TransFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(TransFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.var_linear = torch.nn.Parameter(torch.randn(self.hidden_units))
        self.var_emb_factors = torch.nn.Parameter(torch.randn(self.hidden_units, self.k))    #[None, n, k]

    def forward(self, input):  #[batchsize * maxlen, hidden_units]
        linear_term = torch.sum(self.var_linear * input, dim=-1)

        var_emb_product = torch.sum(torch.square(self.var_emb_factors), dim=-1)    #[None, n]
        prod_term = torch.unsqueeze(torch.sum(var_emb_product * input, dim=-1), dim=-1)   #[None, 1]
        input_sum = torch.unsqueeze(torch.sum(input, dim=-1))     #[None, 1]
        term_1 = prod_term * input_sum    #[None, 1]

        input_rep = torch.unsqueeze(input, dim=-1)     #[None, n, 1]
        input_rep = input_rep.reshape(1, self.hidden_units)     #[None, 1, n]
        input_rep = input_rep.repeat(self.hidden_units, 1)   #[None, n, n]
        input_emb_mul = torch.matmul(input_rep, self.var_emb_factors)   #[None, n, k]
        term_2 = 0

        for i in range(input_emb_mul.shape[-2]):
            for j in range(input_emb_mul.shape[-2]):
                term_2 += torch.sum(input_emb_mul[i, :] * input_emb_mul[j, :], dim=-1)

        output = linear_term + term_1 + term_2

        return output

class ATransFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units, hidden_units_FM):
        super(ATransFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.hidden_units_FM = hidden_units_FM
        self.t = self.hidden_units_FM * (self.hidden_units_FM - 1) // 2

        self.linear = torch.nn.Parameter(torch.randn(self.hidden_units_FM))
        self.get_embed = torch.nn.Linear(1, self.k)
        self.weight = torch.nn.Parameter(torch.randn(self.t))
        self.w0 = torch.nn.Parameter(torch.randn(1))

    def forward(self, input):  #[batchsize * maxlen, hidden_units]
        linear_term = torch.sum(self.linear * input, dim=-1)
        input_ = torch.unsqueeze(input, dim=-1)   #[None, hidden_units, 1]

        embed = self.get_embed(input_)  #[None, hidden_units, k]

        vector_distance_list = []
        for i in range(self.hidden_units_FM):
            for j in range(i + 1, self.hidden_units_FM):
                vector_distance_list.append(calculate_distance_square(embed[:, i, :], embed[:, j, :]))

        vector_distance_tensor = torch.tensor([item.cpu().detach().numpy() for item in vector_distance_list])   #[t, None]
        vector_distance_tensor = torch.transpose(vector_distance_tensor, 0, 1)

        feature_mul_list = []
        for i in range(self.hidden_units_FM):
            for j in range(i + 1, self.hidden_units_FM):
                feature_mul_list.append(input[:, i] * input[:, j])

        feature_mul_tensor = torch.tensor([item.cpu().detach().numpy() for item in feature_mul_list])   #[t, None]
        feature_mul_tensor = torch.transpose(feature_mul_tensor, 0, 1)


        inter_term = self.weight * vector_distance_tensor.to("cpu") * feature_mul_tensor.to("cpu")   #[None, t]
        inter_term = torch.sum(inter_term, dim=-1)
        # output = torch.sigmoid(self.w0 + linear_term + inter_term)
        output = (linear_term + inter_term).unsqueeze_(1)

        return output
