import numpy as np
import torch
from torch.nn.functional import one_hot
import argparse
import torch.nn.functional as F
from utils import *
import random
import torchinfo

class PointWiseFeedForward(torch.nn.Module):
    def __init__(self, hidden_units, dropout_rate):

        super(PointWiseFeedForward, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        self.Linear2 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        # outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
        outputs = self.relu(self.dropout1(self.Linear1(inputs)))
        outputs += inputs
        return outputs

class PointWiseFeedForward2(torch.nn.Module):
    def __init__(self, hidden_units1, hidden_units2, dropout_rate):

        super(PointWiseFeedForward2, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units1, 1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.Tanh = torch.nn.Tanh()
        self.ReLU = torch.nn.ReLU()

        self.Linear2 = torch.nn.Linear(hidden_units2, 1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        outputs = self.dropout1(self.Linear1(inputs)).squeeze_(2)
        outputs = self.ReLU(outputs)
        outputs = self.Linear2(outputs)
        return outputs


class TimeAwareMultiHeadAttention(torch.nn.Module):
    def __init__(self, hidden_size, head_num, dropout_rate, dev, maxlen, Biasmode):
        super(TimeAwareMultiHeadAttention, self).__init__()
        self.Q_w = torch.nn.Linear(hidden_size, hidden_size)
        self.K_w = torch.nn.Linear(hidden_size, hidden_size)
        self.V_w = torch.nn.Linear(hidden_size, hidden_size)

        self.dropout = torch.nn.Dropout(p=dropout_rate)
        self.softmax = torch.nn.Softmax(dim=-1)
        self.sigmoid = torch.nn.Sigmoid()

        self.hidden_size = hidden_size
        self.head_num = head_num
        self.head_size = hidden_size // head_num
        # print(self.head_size)
        # print(self.Q_w)
        self.dropout_rate = dropout_rate
        self.dev = dev
        self.maxlen = maxlen
        self.bias = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(maxlen, maxlen).to(self.dev), mean=0, std=1))
        self.Biasmode = Biasmode

        self.Wp = torch.nn.Linear(hidden_size, hidden_size)
        self.Ud = torch.nn.Linear(hidden_size, 1)
        self.Up = torch.nn.Linear(hidden_size, 1)

    def forward(self, queries, keys):
        Q, K, V = self.Q_w(queries), self.K_w(keys), self.V_w(keys) # Q为归一化后的矩阵，K、V为原矩阵

        Q_ = torch.cat(torch.split(Q, self.head_size, dim=2), dim=0)   # [batchsize. maxlen, hidden_size]
        # print(Q.shape)
        K_ = torch.cat(torch.split(K, self.head_size, dim=2), dim=0)
        V_ = torch.cat(torch.split(V, self.head_size, dim=2), dim=0)


        attn_weights = Q_.matmul(torch.transpose(K_, 1, 2)) # Q与K的转置矩阵相乘

        attn_weights = attn_weights / (K_.shape[-1] ** 0.5)
        attn_weights += self.bias     #bias

        # calculate bias
        if self.Biasmode != 'None':
            Z = self.Ud(torch.tanh(self.Wp(Q_)).to(self.dev))
            P = self.Up(torch.tanh(self.Wp(Q_)).to(self.dev))
            D = self.maxlen * torch.sigmoid(Z)
            D = D.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]
            P = self.maxlen * torch.sigmoid(P)
            P = P.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]
            G = torch.full_like(attn_weights, 0)  # [batchsize, maxlen, maxlen]    # The initialized bias matrix
            I = torch.tensor(range(self.maxlen)).to(self.dev)
            I = I.unsqueeze(-1)
            I = I.repeat(1, self.maxlen)
            I = I.repeat(G.shape[0], 1, 1)    # matrices of row i
            J = torch.tensor(range(self.maxlen)).to(self.dev)
            J = J.unsqueeze(0)
            J = J.repeat(self.maxlen, 1)
            J = J.repeat(G.shape[0], 1, 1)  # matrices of row j

            if self.Biasmode == 'lognormal':

                # G[i,j] denotes the effect that behavior i has on behavior j.
                # i.e. Row i is all about the effect that behavior i has on other behaviors.


                interval = J - I
                # bias_mask_num = torch.eye(self.maxlen)
                # true_matrix = torch.Tensor([[True] * self.maxlen] * self.maxlen)

                # bias_mask = torch.where(bias_mask_num>0, true_matrix, false_matrix)
                # bias_mask = bias_mask.repeat(G.shape[0], 1, 1)
                paddingBias = torch.ones(attn_weights.shape) * (1)
                interval = torch.FloatTensor(interval.cpu().numpy())
                # If the number of intervals is less than 0, set it to 1 to prevent the logarithmic function from reporting errors
                interval = torch.where(interval>=1, interval, paddingBias)


                interval = interval.numpy()
                logInterval = np.log(interval)
                interval = torch.tensor(interval).to(self.dev)
                logInterval = torch.Tensor(logInterval).to(self.dev)
                # center = torch.Tensor(np.log((I + P).detach().numpy()))
                # center = torch.Tensor(np.log(P.detach().numpy()))


                # bias follows a lognormal distribution
                G = -(logInterval - P) ** 2 / (2 * (D / 2) ** 2)

                # Transpose G again to fit the meaning of attn_weights
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'normal':
                # bias follow a normal distribution
                G = -(J - P) ** 2 / (2 * (D / 2) ** 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'abs':

                #  absolute value method
                G = -abs(J - P) / (D / 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'learn':
                # self learning
                G = self.bias

            attn_weights += G

        # print(attn_weights.shape)
        # print(type(attn_weights))
        # attn_weights = self.softmax(attn_weights) # code as below invalids pytorch backward rules
        attn_weights = self.sigmoid(attn_weights)
        # print(attn_weights.shape)
        # https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/4
        attn_weights = self.dropout(attn_weights)


        # attn_weights[i,j]: Correlation of the ith behavior with the jth behavior (other behaviors before it)
        # attn_weights satisfies that the sum of each row is 1.

        outputs = attn_weights.matmul(V_)
        # print(outputs.shape)
        # print(outputs.shape)
        # (num_head * N, T, C / num_head) -> (N, T, C)
        outputs = torch.cat(torch.split(outputs, Q.shape[0], dim=0), dim=2) # div batch_size
        # print(outputs.shape)
        return outputs # # 3

# Sequential Recommendations for Time Interval Perception of Self-Attention
class SASRec(torch.nn.Module):
    def __init__(self, args):
        super(SASRec, self).__init__()
        self.args = args
        self.dev = args.device
        # TODO: loss += args.l2_emb for regularizing embedding vectors during training
        # https://stackoverflow.com/questions/42704283/adding-l1-l2-regularization-in-pytorch
        # cs-trainning1 dataset，
        # the Embedding parameter needs to correspond to the number of categories in each column +1
        # self.item_emb0 = torch.nn.Embedding(7, args.hidden_units, padding_idx=0)
        # self.item_emb1 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb2 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb3 = torch.nn.Embedding(7, args.hidden_units, padding_idx=0)
        # self.item_emb4 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb5 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)
        # self.item_emb6 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb7 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb8 = torch.nn.Embedding(4, args.hidden_units, padding_idx=0)
        # self.item_emb9 = torch.nn.Embedding(6, args.hidden_units, padding_idx=0)

        # self.item_emb0 = torch.nn.Embedding(args.emb_list[0], args.hidden_units, padding_idx=0)
        # self.item_emb1 = torch.nn.Embedding(args.emb_list[1], args.hidden_units, padding_idx=0)
        # self.item_emb2 = torch.nn.Embedding(args.emb_list[2], args.hidden_units, padding_idx=0)
        # self.item_emb3 = torch.nn.Embedding(args.emb_list[3], args.hidden_units, padding_idx=0)
        # self.item_emb4 = torch.nn.Embedding(args.emb_list[4], args.hidden_units, padding_idx=0)
        # self.item_emb5 = torch.nn.Embedding(args.emb_list[5], args.hidden_units, padding_idx=0)
        # self.item_emb6 = torch.nn.Embedding(args.emb_list[6], args.hidden_units, padding_idx=0)
        # self.item_emb7 = torch.nn.Embedding(args.emb_list[7], args.hidden_units, padding_idx=0)
        # self.item_emb8 = torch.nn.Embedding(args.emb_list[8], args.hidden_units, padding_idx=0)
        # self.item_emb9 = torch.nn.Embedding(args.emb_list[9], args.hidden_units, padding_idx=0)
        self.item_emb = torch.nn.ModuleList()
        self.item_onehot = torch.nn.ModuleList()
        self.Linear = torch.nn.Linear(args.maxlen, args.hidden_units)
        self.Sigmoid = torch.nn.Sigmoid()
        self.Softmax = torch.nn.Softmax()
        self.Tanh = torch.nn.Tanh()

        self.item_emb_FM = torch.nn.Linear(args.maxlen, args.hidden_units)

        self.Linear_in = torch.nn.Linear(args.hidden_units, 1)
        # self.Linear_out = torch.nn.Linear(2, 1, bias=True)
        self.Linear_out = torch.nn.Linear(args.maxlen, 1, bias=True)
        self.Linear1 = torch.nn.Linear(args.hidden_units, 1)
        self.Linear2 = torch.nn.Linear(args.maxlen, 1)
        self.dropoutFM = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout1 = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout2 = torch.nn.Dropout(p=args.dropout_rate)
        self.weight = torch.nn.Parameter(torch.randn(args.maxlen, 1))

        self.relu = torch.nn.ReLU()

        self.emb_dropout = torch.nn.ModuleList()


        # self.FM = FM(args.latent_units, args.maxlen_FM)
        self.FM = FM(args.latent_units, args.maxlen)
        # self.AFM = AFM(args.maxlen_FM, args.hidden_units_FM, t=args.hidden_units_FM * (args.hidden_units_FM) // 2, k=args.latent_units)
        self.AFM = AFM(args.maxlen, args.hidden_units_FM, t=args.hidden_units_FM * (args.hidden_units_FM) // 2, k=args.latent_units)
        self.MFM = MFM(args.latent_units, args.maxlen)
        self.TransFM = TransFM(args.latent_units, args.maxlen)
        self.ATransFM = ATransFM(args.latent_units, args.maxlen, args.hidden_units_FM)
        # self.CrossNet = CrossNet(in_features=args.maxlen_FM,layer_num=args.layer_num,device=args.device)
        self.CrossNet = CrossNet(in_features=args.maxlen, layer_num=args.layer_num, device=args.device, batch_size=args.batch_size)
        self.CIN = CIN(field_size=args.maxlen)
        self.FMmode = args.FMmode

        self.attention_layernorms = torch.nn.ModuleList() # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()
        self.forward_layers2 = PointWiseFeedForward2(args.hidden_units,args.maxlen, args.dropout_rate)

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
        # self.alpha = torch.nn.Parameter(torch.tensor([0.99]))# 模型配比权重生成
        self.alpha = torch.nn.Parameter(torch.tensor([random.random()]))
        print('生成模型权重{}'.format(torch.sigmoid(self.alpha))) # 判断模型自动配比是否工作正常
        # embedding层
        for i in args.emb_list:
            # print(i)
            item_emb = torch.nn.Embedding(int(i), args.hidden_units, padding_idx=0) # embedding层参数设置
            '''
            torch.nn.Embedding(
            num_embeddings, - The size dimension of the dictionary.
            embedding_dim, - Dimension of the embedding vector, i.e. how many dimensions are used to represent a symbol.
            padding_idx = None, - Padding id, initialized to 0
            max_norm = None, - Maximum paradigm, if the embedding vector's paradigm exceeds this limit, it is renormalized.
            norm_type = 2.0, - Specifies what paradigm to utilize for computation and for comparison to max_norm, defaults to 2 paradigms.
            scale_grad_by_freq = False, - Deflate the gradient based on how often the word appears in the mini - batch. Defaults to False.
            sparse = False, - If True, the gradient associated with the weight matrix is transformed into a sparse tensor.
            _weight = None)
            '''
            emb_dropout = torch.nn.Dropout(p=args.dropout_rate)
            self.emb_dropout.append(emb_dropout)
            self.item_emb.append(item_emb)



        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer = TimeAwareMultiHeadAttention(args.hidden_units,
                                                            args.num_heads,
                                                            args.dropout_rate,
                                                            args.device,
                                                            args.maxlen,
                                                            args.Biasmode)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)


    def log2feats(self, log_seqs):
        global seqs
        for i in range(log_seqs.shape[1]):
            # print(i)
            # print(log_seqs.shape[1])
            if i == 0:
                seqs = self.item_emb[0](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
                # print(seqs)
            else:
                _seqs = self.item_emb[i](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
                # print(_seqs)
                _seqs = self.emb_dropout[i](_seqs)

                seqs = torch.cat((seqs,_seqs),1)

        # print(seqs.shape) # Detecting if the word vector encoder is working

        # print(seqs_FM.shape[1]) # Detecting if the one hot encoder is working
        # print(seqs_FM.shape)
        # print(type(seqs_FM)) # tensor
        # seqs_FM = torch.FloatTensor(log_seqs).to(self.dev) # Convert directly to tensor without processing
        # print(seqs_FM.shape)

        if self.FMmode == 'CrossNet':
            seqs_FM = self.Linear_in(seqs)
        else:
            seqs_FM = torch.FloatTensor(log_seqs).to(self.dev)

        # seqs_linear = self.Linear3(seqs)

        # print(seqs_FM.shape)
        # print(type(seqs_FM))

        # seqs_FM = seqs

        # for i in range(log_seqs.shape[1]):
        #     _seqs = torch.nn.functional.one_hot(torch.LongTensor(log_seqs[:,i]).to(self.dev),num_classes=self.args)
        #     seqs = torch.cat((seqs,_seqs),1)

        # seqs = torch.cat((seqs0, seqs1,seqs2,seqs3,seqs4,seqs5,seqs6,seqs7,seqs8,seqs9),1)
        # seqs *= self.item_emb.embedding_dim ** 0.5

        # seqs = self.Linear(torch.FloatTensor(log_seqs).unsqueeze(1).to(self.dev))
        # seqs = torch.FloatTensor(log_seqs).unsqueeze(1).to(self.dev)



        for i in range(len(self.attention_layers)):
            # print(len(self.attention_layers))
            Q = self.attention_layernorms[i](seqs)
            K = seqs
            # seqs = torch.transpose(seqs, 1, 2)
            mha_outputs = self.attention_layers[i](Q, K)

            # print(mha_outputs.shape)
            # mha_outputs = torch.transpose(mha_outputs, 1, 2)

            seqs = Q + mha_outputs
            # print(seqs.shape)
            # seqs = self.forward_layers2(seqs)
            # print(seqs.shape)

            seqs = self.forward_layernorms[i](seqs)

            if i == len(self.attention_layers)-1:
                seqs = self.forward_layers2(seqs)
            else:
                seqs = self.forward_layers[i](seqs)
                # seqs = self.last_layernorm(seqs)

        # log_seqs = log_seqs
        return seqs, seqs_FM

    def forward(self, log_seqs): # for training
        # print(type(log_seqs)) # ndarray
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet
        # global log_feats2
        # print('SA的shape{}'.format(log_feats.shape))
        # log_feats-seqs  sum_seqs-seqs_FM
        # print(type(sum_seqs)) # Tensor

        # log_feats3 = self.LR(sum_seqs)
        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = se lf.relu(sum_seqs)
            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'MFM':
                log_feats2 = self.MFM(sum_seqs)
            elif self.FMmode == 'HoAFM':
                log_feats2 = self.HoAFM(sum_seqs)
            elif self.FMmode == 'TransFM':
                log_feats2 = self.TransFM(sum_seqs)
            elif self.FMmode == 'ATransFM':
                log_feats2 = self.ATransFM(sum_seqs)
            elif self.FMmode == 'CrossNet':
                log_feats2 = self.CrossNet(sum_seqs)
            elif self.FMmode == 'CIN':
                log_feats2 = self.CIN(sum_seqs)
            # log_feats3 = self.LR(sum_seqs)
            # print('FM的shape{}'.format(log_feats2.shape))
            # print('__________')
            alpha = torch.sigmoid(self.alpha)
            # log_feats = log_feats2
            # print('SACN的shape{}'.format(log_feats.shape))
            # print(log_feats.shape)
            # print(log_feats2.shape)
            # log_feats = alpha * log_feats + (1 - alpha) * log_feats2# Attention层与交叉层的结果加权加总
            # log_feats = torch.cat((log_feats,log_feats2), dim=-1)
            # seqs = torch.cat((log_feats, sum_seqs))
            log_feats = 0.5 * log_feats + 0.5 * log_feats2
            # log_feats = log_feats + log_feats2

        return log_feats

    def predict(self, log_seqs): # for inference
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet
        # log_feats3 = self.LR(sum_seqs)
        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = self.relu(sum_seqs)

            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'MFM':
                log_feats2 = self.MFM(sum_seqs)
            elif self.FMmode == 'HoAFM':
                log_feats2 = self.HoAFM(sum_seqs)
            elif self.FMmode == 'TransFM':
                log_feats2 = self.TransFM(sum_seqs)
            elif self.FMmode == 'ATransFM':
                log_feats2 = self.ATransFM(sum_seqs)
            elif self.FMmode == 'CrossNet':
                log_feats2 = self.CrossNet(sum_seqs)
            elif self.FMmode == 'CIN':
                log_feats2 = self.CIN(sum_seqs)
            # log_feats3 = self.LR(sum_seqs)
            alpha = torch.sigmoid(self.alpha)
            # print(log_feats2)
            print('Final attention model weights{}'.format(alpha))
            # log_feats = log_feats2
            # print('SACN的shape{}'.format(log_feats.shape))
            # log_feats = alpha * log_feats + (1 - alpha) * log_feats2 # Attention层与FM层的结果加权加总
            # print(log_feats)
            log_feats = 0.5 * log_feats + 0.5 * log_feats2
            # log_feats = log_feats + log_feats2


        return self.Sigmoid(log_feats)


class FM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(FM, self).__init__()
        self.k = latent_size
        self.w0 = torch.nn.Parameter(torch.randn(1))
        self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k))
    def forward(self, input): # input-X
        # print(type(input)) # Tensor
        input=input.float()
        # print(self.w0)
        linear_part = input.matmul(self.w) + self.w0
        # 矩阵相乘 (batch*p) * (p*k)
        inter_part1 = torch.pow(input.matmul(self.v), 1)  # shape:(batchsize, self.k)
        # 矩阵相乘 (batch*p)^2 * (p*k)^2
        inter_part2 = torch.pow(input, 1).matmul(torch.pow(self.v, 1))  # shape:(batchsize, self.k)
        inter_part = 0.5 * torch.sum(inter_part1 - inter_part2, dim=-1, keepdim=True)  # shape:(batchsize, 1)

        output = linear_part + inter_part # out_size = (batch, 1)
        # print(output)
        return output

class Interaction_layer(torch.nn.Module):
    '''
    #input shape:[batchsize, feature, k]
    #output shape:[batchsize, feature(feature-1)/2, k]
    '''

    def __init__(self):
        super(Interaction_layer, self).__init__()

    def forward(self, inputs):
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))

        elements_wise_product_list = []
        for i in range(inputs.shape[1]):
            for j in range(i + 1, inputs.shape[1]):
                elements_wise_product_list.append(torch.mul(inputs[:, i], inputs[:, j]))

        elements_wise_product_tensor = torch.tensor([item.cpu().detach().numpy() for item in elements_wise_product_list])
        elements_wise_product = torch.transpose(elements_wise_product_tensor, 0, 1)
        return elements_wise_product

class Attention_layer(torch.nn.Module):
    '''
        inputs:Second order cross term matrix with dimensions [None, t, k],
               t is feature*(feature-1)/2, k is the hidden vector dimension
    '''

    def __init__(self, interfield_len, k):
        super(Attention_layer, self).__init__()
        self.attention_w = torch.nn.Linear(k, interfield_len)
        self.attention_h = torch.nn.Linear(interfield_len, 1)
        self.relu = torch.nn.ReLU()

    def forward(self, inputs):  # [None, t, k]
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))
        inputs = inputs.to("cuda")
        # inputs = inputs.to("cpu")

        x = self.attention_w(inputs)  # [None, t, t]
        x = self.relu(x)
        x = self.attention_h(x)  # [None, t, 1]
        a_score = F.softmax(x, dim=-2)
        a_score = torch.transpose(a_score, 1, 2)  # [None, 1, t]
        output = (a_score.matmul(inputs)).reshape(-1, inputs.shape[2])  # [None, k]
        return output

class AFM(torch.nn.Module):
    def __init__(self, hidden_units, hidden_units_FM, t, k):
        super(AFM, self).__init__()
        self.linear1 = torch.nn.Linear(1, k)
        self.linear2 = torch.nn.Linear(k, k)
        self.interaction_layer = Interaction_layer()
        self.attention_layer = Attention_layer(t, k)
        self.output_layer = torch.nn.Linear(k, 1)
        self.start_linear = torch.nn.Linear(hidden_units, hidden_units_FM)
        self.Sigmoid = torch.nn.Sigmoid()

    def forward(self, inputs):  # [batchsize * maxlen, feature]
        inputs = inputs.float()
        inputs = self.start_linear(inputs)
        # print(inputs.shape)
        inputs = inputs.unsqueeze(-1)  # [batchsize * maxlen, feature, 1]
        embed = self.linear1(inputs)  # [batchsize * maxlen, feature,  hidden_units_V]
        embed = self.linear2(embed)  # [batchsize * maxlen, feature,  hidden_units_V]

        embed = self.interaction_layer(embed)  # [batchsize * maxlen, t, hidden_units_V], t = feature*(feature-1)/2

        x = self.attention_layer(embed)  # [batchsize * maxlen, hidden_units_V]
        output = (self.output_layer(x))  # [batchsize * maxlen, 1]
        output = self.Sigmoid(output)
        return output

class MFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(MFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.w0 = torch.nn.Parameter(torch.randn(1))
        self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k))
        self.a = torch.nn.Parameter(torch.randn(hidden_units, hidden_units))

    def forward(self, input):
        linear_part = input.matmul(self.w) + self.w0
        inter_part = 0

        for i in range(self.hidden_units):
            for j in range(i + 1, self.hidden_units):
                x = torch.mul(self.v[i, :], self.v[j, :])
                x = torch.sum(x)
                x = x * self.a[i, j] * torch.mul(input[:, i], input[:, j])
                inter_part += x
        inter_part = inter_part.unsqueeze(-1)

        output = linear_part + inter_part
        return output

class TransFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(TransFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.var_linear = torch.nn.Parameter(torch.randn(self.hidden_units))
        self.var_emb_factors = torch.nn.Parameter(torch.randn(self.hidden_units, self.k))    #[None, n, k]

    def forward(self, input):  #[batchsize * maxlen, hidden_units]
        linear_term = torch.sum(self.var_linear * input, dim=-1)

        var_emb_product = torch.sum(torch.square(self.var_emb_factors), dim=-1)    #[None, n]
        prod_term = torch.unsqueeze(torch.sum(var_emb_product * input, dim=-1), dim=-1)   #[None, 1]
        input_sum = torch.unsqueeze(torch.sum(input, dim=-1))     #[None, 1]
        term_1 = prod_term * input_sum    #[None, 1]

        input_rep = torch.unsqueeze(input, dim=-1)     #[None, n, 1]
        input_rep = input_rep.reshape(1, self.hidden_units)     #[None, 1, n]
        input_rep = input_rep.repeat(self.hidden_units, 1)   #[None, n, n]
        input_emb_mul = torch.matmul(input_rep, self.var_emb_factors)   #[None, n, k]
        term_2 = 0

        for i in range(input_emb_mul.shape[-2]):
            for j in range(input_emb_mul.shape[-2]):
                term_2 += torch.sum(input_emb_mul[i, :] * input_emb_mul[j, :], dim=-1)

        output = linear_term + term_1 + term_2

        return output

class ATransFM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units, hidden_units_FM):
        super(ATransFM, self).__init__()
        self.hidden_units = hidden_units
        self.k = latent_size
        self.hidden_units_FM = hidden_units_FM
        self.t = self.hidden_units_FM * (self.hidden_units_FM - 1) // 2

        self.linear = torch.nn.Parameter(torch.randn(self.hidden_units_FM))
        self.get_embed = torch.nn.Linear(1, self.k)
        self.weight = torch.nn.Parameter(torch.randn(self.t))
        self.w0 = torch.nn.Parameter(torch.randn(1))

    def forward(self, input):  #[batchsize * maxlen, hidden_units]
        linear_term = torch.sum(self.linear * input, dim=-1)
        input_ = torch.unsqueeze(input, dim=-1)   #[None, hidden_units, 1]

        embed = self.get_embed(input_)  #[None, hidden_units, k]

        vector_distance_list = []
        for i in range(self.hidden_units_FM):
            for j in range(i + 1, self.hidden_units_FM):
                vector_distance_list.append(calculate_distance_square(embed[:, i, :], embed[:, j, :]))

        vector_distance_tensor = torch.tensor([item.cpu().detach().numpy() for item in vector_distance_list])   #[t, None]
        vector_distance_tensor = torch.transpose(vector_distance_tensor, 0, 1)

        feature_mul_list = []
        for i in range(self.hidden_units_FM):
            for j in range(i + 1, self.hidden_units_FM):
                feature_mul_list.append(input[:, i] * input[:, j])

        feature_mul_tensor = torch.tensor([item.cpu().detach().numpy() for item in feature_mul_list])   #[t, None]
        feature_mul_tensor = torch.transpose(feature_mul_tensor, 0, 1)


        inter_term = self.weight * vector_distance_tensor.to("cpu") * feature_mul_tensor.to("cpu")   #[None, t]
        inter_term = torch.sum(inter_term, dim=-1)
        # output = torch.sigmoid(self.w0 + linear_term + inter_term)
        output = (linear_term + inter_term).unsqueeze_(1)

        return output


class CrossNet(torch.nn.Module):
    '''
      Input shape:(batch_size, units)``.
      Output shape:(batch_size, units)``.
      Arguments
        - **in_features** : Positive integer, dimensionality of input features.
        - **layer_num**: Positive integer, the cross layer number
      References
        - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123)
    '''

    def __init__(self, in_features, layer_num, device, batch_size):
        super(CrossNet, self).__init__()
        self.layer_num = layer_num
        self.batch = batch_size
        self.in_features = in_features
        self.kernels = torch.nn.ParameterList(
            [torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
        self.bias = torch.nn.ParameterList(
            [torch.nn.Parameter(torch.nn.init.zeros_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
        self.to(device)
        # self.linear = torch.nn.Linear(in_features, 10, bias=True)
        self.linear = torch.nn.Linear(in_features, 1, bias=True)
        self.Sigmoid = torch.nn.Sigmoid()

    def forward(self, inputs):
        # x_0 = inputs.unsqueeze(2).float()
        # print('input的size:{}'.format(x_0.shape))
        # x_0 = inputs.unsqueeze(2).float()
        x_0 = inputs.float()
        x_l = x_0
        # print('input的size:{}'.format(x_l.shape))
        # print(self.kernels[1].shape)
        # print(x_l)
        # print(x_l.shape)
        for i in range(self.layer_num):
            xl_w = torch.tensordot(x_l, self.kernels[i], dims=([1], [0]))
            dot_ = torch.matmul(x_0, xl_w)
            x_l = dot_ + self.bias[i] + x_l
            x_l = self.Sigmoid(x_l)
        x_l = torch.squeeze(x_l, dim=2)
        # print('output的size:{}'.format(x_l.shape))
        # output = x_l
        output = self.linear(x_l)
        # print('output的size:{}'.format(x_l.shape))
        # print(output)
        return output

class CIN(torch.nn.Module):
    """Compressed Interaction Network used in xDeepFM.
      Input shape
        - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
      Output shape
        - 2D tensor with shape: ``(batch_size, featuremap_num)`` ``featuremap_num =  sum(self.layer_size[:-1]) // 2 + self.layer_size[-1]`` if ``split_half=True``,else  ``sum(layer_size)`` .
      Arguments
        - **filed_size** : Positive integer, number of feature groups.
        - **layer_size** : list of int.Feature maps in each layer.
        - **activation** : activation function name used on feature maps.
        - **split_half** : bool.if set to False, half of the feature maps in each hidden will connect to output unit.
        - **seed** : A Python integer to use as random seed.
      References
        - [Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.] (https://arxiv.org/pdf/1803.05170.pdf)
    """

    def __init__(self, field_size, layer_size=(128, 128), activation='relu', split_half=True, l2_reg=1e-5, seed=2022,
                 device='cuda'):
        super(CIN, self).__init__()
        if len(layer_size) == 0:
            raise ValueError(
                "layer_size must be a list(tuple) of length greater than 1")

        self.layer_size = layer_size
        self.field_nums = [field_size]
        self.split_half = split_half
        self.activation = activation_layer(activation)
        self.Sigmoid = torch.nn.Sigmoid()
        self.Tanh = torch.nn.Tanh()
        self.l2_reg = l2_reg
        self.seed = seed

        self.conv1ds = torch.nn.ModuleList()
        self.forward_layers2 = PointWiseFeedForward2(192, 10, 0.2)
        self.linear = torch.nn.Linear(192, 1, bias=True)
        for i, size in enumerate(self.layer_size):
            self.conv1ds.append(
                torch.nn.Conv1d(self.field_nums[-1] * self.field_nums[0], size, 1))

            if self.split_half:
                if i != len(self.layer_size) - 1 and size % 2 > 0:
                    raise ValueError(
                        "layer_size must be even number except for the last layer when split_half=True")

                self.field_nums.append(size // 2)
            else:
                self.field_nums.append(size)

        #         for tensor in self.conv1ds:
        #             nn.init.normal_(tensor.weight, mean=0, std=init_std)
        self.to(device)

    def forward(self, inputs):
        # print(inputs.shape)
        if len(inputs.shape) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (len(inputs.shape)))
        batch_size = inputs.shape[0]
        dim = inputs.shape[-1]
        hidden_nn_layers = [inputs]
        final_result = []

        for i, size in enumerate(self.layer_size):
            # x^(k-1) * x^0
            x = torch.einsum(
                'bhd,bmd->bhmd', hidden_nn_layers[-1], hidden_nn_layers[0])
            # x.shape = (batch_size , hi * m, dim)
            x = x.reshape(
                batch_size, hidden_nn_layers[-1].shape[1] * hidden_nn_layers[0].shape[1], dim)
            # x.shape = (batch_size , hi, dim)
            x = self.conv1ds[i](x)

            if self.activation is None or self.activation == 'linear':
                curr_out = x
            else:
                curr_out = self.activation(x)

            if self.split_half:
                if i != len(self.layer_size) - 1:
                    next_hidden, direct_connect = torch.split(
                        curr_out, 2 * [size // 2], 1)
                else:
                    direct_connect = curr_out
                    next_hidden = 0
            else:
                direct_connect = curr_out
                next_hidden = curr_out

            final_result.append(direct_connect)
            hidden_nn_layers.append(next_hidden)

        result = torch.cat(final_result, dim=1)
        result = torch.sum(result, -1)
        '''
        
        '''
        # result = self.Sigmoid(result)
        # result = self.Tanh(result)
        # result = self.activation(result)
        # result = self.linear(result)
        # print(result.shape)
        # print(result)
        return result



class Dice(torch.nn.Module):
    """The Data Adaptive Activation Function in DIN,which can be viewed as a generalization of PReLu and can adaptively adjust the rectified point according to distribution of input data.

    Input shape:
        - 2 dims: [batch_size, embedding_size(features)]
        - 3 dims: [batch_size, num_features, embedding_size(features)]

    Output shape:
        - Same shape as input.

    References
        - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
        - https://github.com/zhougr1993/DeepInterestNetwork, https://github.com/fanoping/DIN-pytorch
    """

    def __init__(self, emb_size, dim=2, epsilon=1e-8, device='cpu'):
        super(Dice, self).__init__()
        assert dim == 2 or dim == 3

        self.bn = torch.nn.BatchNorm1d(emb_size, eps=epsilon)
        self.sigmoid = torch.nn.Sigmoid()
        self.dim = dim

        if self.dim == 2:
            self.alpha = torch.zeros((emb_size,)).to(device)
        else:
            self.alpha = torch.zeros((emb_size, 1)).to(device)

    def forward(self, x):
        assert x.dim() == self.dim
        if self.dim == 2:
            x_p = self.sigmoid(self.bn(x))
            out = self.alpha * (1 - x_p) * x + x_p * x
        else:
            x = torch.transpose(x, 1, 2)
            x_p = self.sigmoid(self.bn(x))
            out = self.alpha * (1 - x_p) * x + x_p * x
            out = torch.transpose(out, 1, 2)

        return out


class Identity(torch.nn.Module):

    def __init__(self, **kwargs):
        super(Identity, self).__init__()

    def forward(self, X):
        return X


def activation_layer(act_name, hidden_size=None, dice_dim=2):
    """Construct activation layers

    Args:
        act_name: str or nn.Module, name of activation function
        hidden_size: int, used for Dice activation
        dice_dim: int, used for Dice activation
    Return:
        act_layer: activation layer
    """
    if isinstance(act_name, str):
        if act_name.lower() == 'sigmoid':
            act_layer = torch.nn.Sigmoid()
        elif act_name.lower() == 'linear':
            act_layer = Identity()
        elif act_name.lower() == 'relu':
            act_layer = torch.nn.ReLU(inplace=True)
        elif act_name.lower() == 'dice':
            assert dice_dim
            act_layer = Dice(hidden_size, dice_dim)
        elif act_name.lower() == 'prelu':
            act_layer = torch.nn.PReLU()
    elif issubclass(act_name, torch.nn.Module):
        act_layer = act_name()
    else:
        raise NotImplementedError

    return act_layer


# model = CrossNet(in_features=15, layer_num = 2, device = 'cuda')
# layer_num = 1
# torchinfo.summary(model)

