import numpy as np
import torch
from torch.nn.functional import one_hot
import argparse
import torch.nn.functional as F
from utils import *
import random


class PointWiseFeedForward(torch.nn.Module):
    def __init__(self, hidden_units, dropout_rate):

        super(PointWiseFeedForward, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        self.Linear2 = torch.nn.Linear(hidden_units, hidden_units)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        # outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
        outputs = self.relu(self.dropout1(self.Linear1(inputs)))
        outputs += inputs
        return outputs

class PointWiseFeedForward2(torch.nn.Module):
    def __init__(self, hidden_units1, hidden_units2, dropout_rate):

        super(PointWiseFeedForward2, self).__init__()

        self.Linear1 = torch.nn.Linear(hidden_units1, 1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.Tanh = torch.nn.Tanh()
        self.ReLU = torch.nn.ReLU()

        self.Linear2 = torch.nn.Linear(hidden_units2, 1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        outputs = self.dropout1(self.Linear1(inputs)).squeeze_(2)
        outputs = self.ReLU(outputs)
        outputs = self.Linear2(outputs)
        return outputs


class TimeAwareMultiHeadAttention(torch.nn.Module):
    def __init__(self, hidden_size, head_num, dropout_rate, dev, maxlen, Biasmode):
        super(TimeAwareMultiHeadAttention, self).__init__()
        self.Q_w = torch.nn.Linear(hidden_size, hidden_size)
        self.K_w = torch.nn.Linear(hidden_size, hidden_size)
        self.V_w = torch.nn.Linear(hidden_size, hidden_size)

        self.dropout = torch.nn.Dropout(p=dropout_rate)
        self.softmax = torch.nn.Softmax(dim=-1)
        self.sigmoid = torch.nn.Sigmoid()

        self.hidden_size = hidden_size
        self.head_num = head_num
        self.head_size = hidden_size // head_num
        # print(self.head_size)
        # print(self.Q_w)
        self.dropout_rate = dropout_rate
        self.dev = dev
        self.maxlen = maxlen
        self.bias = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(maxlen, maxlen).to(self.dev), mean=0, std=1))
        self.Biasmode = Biasmode

        self.Wp = torch.nn.Linear(hidden_size, hidden_size)
        self.Ud = torch.nn.Linear(hidden_size, 1)
        self.Up = torch.nn.Linear(hidden_size, 1)

    def forward(self, queries, keys):
        Q, K, V = self.Q_w(queries), self.K_w(keys), self.V_w(keys) # Q为归一化后的矩阵，K、V为原矩阵

        Q_ = torch.cat(torch.split(Q, self.head_size, dim=2), dim=0)   # [batchsize. maxlen, hidden_size]
        # print(Q.shape)
        K_ = torch.cat(torch.split(K, self.head_size, dim=2), dim=0)
        V_ = torch.cat(torch.split(V, self.head_size, dim=2), dim=0)


        attn_weights = Q_.matmul(torch.transpose(K_, 1, 2)) # Q与K的转置矩阵相乘

        attn_weights = attn_weights / (K_.shape[-1] ** 0.5)
        attn_weights += self.bias     #bias

        # calculate bias
        if self.Biasmode != 'None':
            Z = self.Ud(torch.tanh(self.Wp(Q_)).to(self.dev))
            P = self.Up(torch.tanh(self.Wp(Q_)).to(self.dev))
            D = self.maxlen * torch.sigmoid(Z)
            D = D.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]   #D是方差矩阵，同一行元素相同，因为同一个行为i对其他行为的影响服从同一个分布
            P = self.maxlen * torch.sigmoid(P)
            P = P.repeat(1, 1, self.maxlen)  # [batchsize, maxlen, maxlen]   #P是影响力中心矩阵，同一行元素相同，都表示行为i产生的最大影响力的坐标
            G = torch.full_like(attn_weights, 0)  # [batchsize, maxlen, maxlen]    #初始化bias矩阵，即G
            I = torch.tensor(range(self.maxlen)).to(self.dev)
            I = I.unsqueeze(-1)
            I = I.repeat(1, self.maxlen)
            I = I.repeat(G.shape[0], 1, 1)    #行为i的矩阵，同一行元素相同，且第i行元素值为i
            J = torch.tensor(range(self.maxlen)).to(self.dev)
            J = J.unsqueeze(0)
            J = J.repeat(self.maxlen, 1)
            J = J.repeat(G.shape[0], 1, 1)  # 行为j的矩阵，同一列元素相同，且第j行元素值为j

            if self.Biasmode == 'lognormal':
                #新的对数正态分布的想法
                #G[i, j]表示行为i对行为j产生的影响，即：第i行都是第i个行为对别的行为产生的影响


                interval = J - I
                # bias_mask_num = torch.eye(self.maxlen)
                # true_matrix = torch.Tensor([[True] * self.maxlen] * self.maxlen)

                # bias_mask = torch.where(bias_mask_num>0, true_matrix, false_matrix)
                # bias_mask = bias_mask.repeat(G.shape[0], 1, 1)
                paddingBias = torch.ones(attn_weights.shape) * (1)
                interval = torch.FloatTensor(interval.cpu().numpy())
                #如果间隔数小于0，则把它设置成1，以防对数函数报错
                interval = torch.where(interval>=1, interval, paddingBias)


                interval = interval.numpy()
                logInterval = np.log(interval)
                interval = torch.tensor(interval).to(self.dev)
                logInterval = torch.Tensor(logInterval).to(self.dev)
                # center = torch.Tensor(np.log((I + P).detach().numpy()))
                # center = torch.Tensor(np.log(P.detach().numpy()))


                #bias采用对数正态分布
                G = -(logInterval - P) ** 2 / (2 * (D / 2) ** 2)

                #把G再转置，以适应attn_weights的含义
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'normal':
                #bias采用正态分布
                G = -(J - P) ** 2 / (2 * (D / 2) ** 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'abs':

                #bias采用绝对值处理方法
                G = -abs(J - P) / (D / 2)
                G = torch.transpose(G, 1, 2)
            elif self.Biasmode == 'learn':
                # bias采用自我学习的方式
                G = self.bias

            attn_weights += G

        # print(attn_weights.shape)
        # print(type(attn_weights))
        # attn_weights = self.softmax(attn_weights) # code as below invalids pytorch backward rules
        attn_weights = self.sigmoid(attn_weights)
        # print(attn_weights.shape)
        # https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/4
        attn_weights = self.dropout(attn_weights)


        #attn_weights[i,j]: 第i个行为与第j个行为(它之前的其他行为)的相关性。  attn_weights满足每一行的行和为1。

        outputs = attn_weights.matmul(V_)
        # print(outputs.shape)
        # print(outputs.shape)
        # (num_head * N, T, C / num_head) -> (N, T, C)
        outputs = torch.cat(torch.split(outputs, Q.shape[0], dim=0), dim=2) # div batch_size
        # print(outputs.shape)
        return outputs # # 3

# 时间间隔感知自我注意的序列推荐
class SASRec(torch.nn.Module):
    def __init__(self, args):
        super(SASRec, self).__init__()
        self.args = args
        self.dev = args.device
        self.item_emb = torch.nn.ModuleList()
        self.item_onehot = torch.nn.ModuleList()
        self.Linear = torch.nn.Linear(args.maxlen, args.hidden_units)
        self.Sigmoid = torch.nn.Sigmoid()
        self.Softmax = torch.nn.Softmax()
        self.Tanh = torch.nn.Tanh()

        self.item_emb_FM = torch.nn.Linear(args.maxlen, args.hidden_units)

        self.Linear_in = torch.nn.Linear(args.hidden_units, 1)
        # self.Linear_out = torch.nn.Linear(2, 1, bias=True)
        self.Linear_out = torch.nn.Linear(args.maxlen, 1, bias=True)
        self.Linear1 = torch.nn.Linear(args.hidden_units, 1)
        self.Linear2 = torch.nn.Linear(args.maxlen, 1)
        self.dropoutFM = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout1 = torch.nn.Dropout(p=args.dropout_rate)
        self.dropout2 = torch.nn.Dropout(p=args.dropout_rate)
        self.weight = torch.nn.Parameter(torch.randn(args.maxlen, 1))  #生成兴趣向量的各部分行为向量的权重

        self.relu = torch.nn.ReLU()

        self.emb_dropout = torch.nn.ModuleList()


        self.FM = FM(args.latent_units, args.maxlen)
        self.AFM = AFM(args.maxlen, args.hidden_units_FM, t=args.hidden_units_FM * (args.hidden_units_FM) // 2, k=args.latent_units)
        self.CrossNet = CrossNet(in_features=args.maxlen, layer_num=args.layer_num, device=args.device, batch_size=args.batch_size)
        self.FMmode = args.FMmode

        self.attention_layernorms = torch.nn.ModuleList() # to be Q for self-attention
        self.attention_layers = torch.nn.ModuleList()
        self.forward_layernorms = torch.nn.ModuleList()
        self.forward_layers = torch.nn.ModuleList()
        self.forward_layers2 = PointWiseFeedForward2(args.hidden_units,args.maxlen, args.dropout_rate)

        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
        # self.alpha = torch.nn.Parameter(torch.tensor([0.99]))# 模型配比权重生成
        self.alpha = torch.nn.Parameter(torch.tensor([random.random()]))
        print('生成模型权重{}'.format(torch.sigmoid(self.alpha))) # 判断模型自动配比是否工作正常
        # embedding层
        for i in args.emb_list:
            # print(i)
            item_emb = torch.nn.Embedding(int(i), args.hidden_units, padding_idx=0) # embedding层参数设置
            emb_dropout = torch.nn.Dropout(p=args.dropout_rate)
            self.emb_dropout.append(emb_dropout)
            self.item_emb.append(item_emb)



        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            new_attn_layer = TimeAwareMultiHeadAttention(args.hidden_units,
                                                            args.num_heads,
                                                            args.dropout_rate,
                                                            args.device,
                                                            args.maxlen,
                                                            args.Biasmode)
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)

            new_fwd_layer = PointWiseFeedForward(args.hidden_units, args.dropout_rate)
            self.forward_layers.append(new_fwd_layer)


    def log2feats(self, log_seqs):
        global seqs
        for i in range(log_seqs.shape[1]):
            # print(i)
            # print(log_seqs.shape[1])
            if i == 0:
                seqs = self.item_emb[0](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
                # print(seqs)
            else:
                _seqs = self.item_emb[i](torch.LongTensor(log_seqs[:,i]).unsqueeze_(1).to(self.dev))
                # print(_seqs)
                _seqs = self.emb_dropout[i](_seqs)

                seqs = torch.cat((seqs,_seqs),1)

        # print(seqs.shape) # 检测词向量编码器是否正常

        if self.FMmode == 'CrossNet':
            seqs_FM = self.Linear_in(seqs)
        else:
            seqs_FM = torch.FloatTensor(log_seqs).to(self.dev)


        for i in range(len(self.attention_layers)):
            # print(len(self.attention_layers))
            Q = self.attention_layernorms[i](seqs)
            K = seqs
            # seqs = torch.transpose(seqs, 1, 2)
            mha_outputs = self.attention_layers[i](Q, K)

            # print(mha_outputs.shape)
            # mha_outputs = torch.transpose(mha_outputs, 1, 2)

            seqs = Q + mha_outputs
            # print(seqs.shape)
            # seqs = self.forward_layers2(seqs) # 全连接层
            # print(seqs.shape)

            seqs = self.forward_layernorms[i](seqs) # 全连接层

            if i == len(self.attention_layers)-1:
                seqs = self.forward_layers2(seqs)
            else:
                seqs = self.forward_layers[i](seqs)
                # seqs = self.last_layernorm(seqs)

        # log_seqs = log_seqs
        return seqs, seqs_FM

    def forward(self, log_seqs): # for training
        # print(type(log_seqs)) # ndarray
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet

        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = se lf.relu(sum_seqs)
            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'CrossNet':
                log_feats2 = self.CrossNet(sum_seqs)
            elif self.FMmode == 'CIN':
                log_feats2 = self.CIN(sum_seqs)

            # alpha = torch.sigmoid(self.alpha)

            log_feats = 0.5 * log_feats + 0.5 * log_feats2


        return log_feats

    def predict(self, log_seqs): # for inference # 预测值产生
        log_feats, sum_seqs = self.log2feats(log_seqs) # user_ids hasn't been used yet
        if self.FMmode != 'None':
            # sum_seqs = self.dropoutFM(self.item_emb_FM(torch.FloatTensor(sum_seqs).to(self.dev)))
            # sum_seqs = self.relu(sum_seqs)

            if self.FMmode == 'FM':
                log_feats2 = self.FM(sum_seqs)
            elif self.FMmode == 'AFM':
                log_feats2 = self.AFM(sum_seqs)
            elif self.FMmode == 'CrossNet':
                log_feats2 = self.CrossNet(sum_seqs)
            elif self.FMmode == 'CIN':
                log_feats2 = self.CIN(sum_seqs)

            # alpha = torch.sigmoid(self.alpha)
            log_feats = 0.5 * log_feats + 0.5 * log_feats2



        return self.Sigmoid(log_feats)


class FM(torch.nn.Module):
    def __init__(self, latent_size, hidden_units):
        super(FM, self).__init__()
        self.k = latent_size
        self.w0 = torch.nn.Parameter(torch.randn(1))
        self.w = torch.nn.Parameter(torch.randn(hidden_units, 1))
        self.v = torch.nn.Parameter(torch.randn(hidden_units, self.k)) # 隐藏层设置
    def forward(self, input): # input-X
        # print(type(input)) # Tensor
        input=input.float()
        # print(self.w0)
        linear_part = input.matmul(self.w) + self.w0
        # 矩阵相乘 (batch*p) * (p*k)
        inter_part1 = torch.pow(input.matmul(self.v), 1)  # shape:(batchsize, self.k)
        # 矩阵相乘 (batch*p)^2 * (p*k)^2
        inter_part2 = torch.pow(input, 1).matmul(torch.pow(self.v, 1))  # shape:(batchsize, self.k)
        inter_part = 0.5 * torch.sum(inter_part1 - inter_part2, dim=-1, keepdim=True)  # shape:(batchsize, 1)

        output = linear_part + inter_part # out_size = (batch, 1)
        # print(output)
        return output

class Interaction_layer(torch.nn.Module):
    '''
    #input shape:[batchsize, feature, k]
    #output shape:[batchsize, feature(feature-1)/2, k]
    '''

    def __init__(self):
        super(Interaction_layer, self).__init__()

    def forward(self, inputs):
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))

        elements_wise_product_list = []
        for i in range(inputs.shape[1]):
            for j in range(i + 1, inputs.shape[1]):
                elements_wise_product_list.append(torch.mul(inputs[:, i], inputs[:, j]))

        elements_wise_product_tensor = torch.tensor([item.cpu().detach().numpy() for item in elements_wise_product_list])
        elements_wise_product = torch.transpose(elements_wise_product_tensor, 0, 1)
        return elements_wise_product

class Attention_layer(torch.nn.Module):
    '''
        inputs:二阶交叉项矩阵，维度为[None, t, k]，t是feature*(feature-1)/2， k是隐向量维度
        output:
    '''

    def __init__(self, interfield_len, k):
        super(Attention_layer, self).__init__()
        self.attention_w = torch.nn.Linear(k, interfield_len)
        self.attention_h = torch.nn.Linear(interfield_len, 1)
        self.relu = torch.nn.ReLU()

    def forward(self, inputs):  # [None, t, k]
        if inputs.ndim != 3:
            raise ValueError("Unexpected inputs dimensions %d, expected to be 3 dimensions" % (inputs.ndim))
        inputs = inputs.to("cuda")
        # inputs = inputs.to("cpu")

        x = self.attention_w(inputs)  # [None, t, t]
        x = self.relu(x)
        x = self.attention_h(x)  # [None, t, 1]
        a_score = F.softmax(x, dim=-2)
        a_score = torch.transpose(a_score, 1, 2)  # [None, 1, t]
        output = (a_score.matmul(inputs)).reshape(-1, inputs.shape[2])  # [None, k]
        return output

class AFM(torch.nn.Module):
    def __init__(self, hidden_units, hidden_units_FM, t, k):
        super(AFM, self).__init__()
        self.linear1 = torch.nn.Linear(1, k)
        self.linear2 = torch.nn.Linear(k, k)
        self.interaction_layer = Interaction_layer()
        self.attention_layer = Attention_layer(t, k)
        self.output_layer = torch.nn.Linear(k, 1)
        self.start_linear = torch.nn.Linear(hidden_units, hidden_units_FM)
        self.Sigmoid = torch.nn.Sigmoid()

    def forward(self, inputs):  # [batchsize * maxlen, feature]
        inputs = inputs.float()
        inputs = self.start_linear(inputs)
        # print(inputs.shape)
        inputs = inputs.unsqueeze(-1)  # [batchsize * maxlen, feature, 1]   这相当于未处理的FM模型输入的原始数据
        embed = self.linear1(inputs)  # [batchsize * maxlen, feature,  hidden_units_V]   生成的特征隐向量矩阵
        embed = self.linear2(embed)  # [batchsize * maxlen, feature,  hidden_units_V]    对隐向量再加入一层线性层，隐向量矩阵维度不变

        embed = self.interaction_layer(embed)  # [batchsize * maxlen, t, hidden_units_V], t = feature*(feature-1)/2   经过特征交互层，得到二阶特征交叉矩阵

        x = self.attention_layer(embed)  # [batchsize * maxlen, hidden_units_V]
        output = (self.output_layer(x))  # [batchsize * maxlen, 1]
        output = self.Sigmoid(output)
        return output


class CrossNet(torch.nn.Module):
    '''
      Input shape:(batch_size, units)``.
      Output shape:(batch_size, units)``.
      Arguments
        - **in_features** : Positive integer, dimensionality of input features.
        - **layer_num**: Positive integer, the cross layer number
      References
        - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123)
    '''

    def __init__(self, in_features, layer_num, device, batch_size):
        super(CrossNet, self).__init__()
        self.layer_num = layer_num
        self.batch = batch_size
        self.in_features = in_features
        self.kernels = torch.nn.ParameterList(
            [torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
        self.bias = torch.nn.ParameterList(
            [torch.nn.Parameter(torch.nn.init.zeros_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
        self.to(device)
        # self.linear = torch.nn.Linear(in_features, 10, bias=True)
        self.linear = torch.nn.Linear(in_features, 1, bias=True)
        self.Sigmoid = torch.nn.Sigmoid()

    def forward(self, inputs):
        # x_0 = inputs.unsqueeze(2).float()
        # print('input的size:{}'.format(x_0.shape))
        # x_0 = inputs.unsqueeze(2).float()
        x_0 = inputs.float()
        x_l = x_0
        # print('input的size:{}'.format(x_l.shape))
        # print(self.kernels[1].shape)
        # print(x_l)
        # print(x_l.shape)
        for i in range(self.layer_num):
            xl_w = torch.tensordot(x_l, self.kernels[i], dims=([1], [0]))
            dot_ = torch.matmul(x_0, xl_w)
            x_l = dot_ + self.bias[i] + x_l
            x_l = self.Sigmoid(x_l)
        x_l = torch.squeeze(x_l, dim=2)
        # print('output的size:{}'.format(x_l.shape))
        # output = x_l
        output = self.linear(x_l)
        # print('output的size:{}'.format(x_l.shape))
        # print(output)
        return output

# model = CrossNet(in_features=15, layer_num = 2, device = 'cuda')
# layer_num = 1
# torchinfo.summary(model)

