import torch
import torch.nn as nn
import torch.nn.functional as F
from performer_pytorch.performer_pytorch import PerformerLM
import numpy as np


class Model(nn.Module):
    def __init__(self, hyper_params):
        super(Model, self).__init__()

        self.hyper_params = hyper_params
        self.performer = PerformerLM(
            num_tokens=hyper_params['total_items']+1,
            max_seq_len=hyper_params['seq_len'],
            dim=hyper_params['item_embed_size'],
            depth=hyper_params['depth'],
            heads=hyper_params['num_heads'],
            causal=True,
            emb_dropout=0.5,
            ff_dropout=0.5,
            attn_dropout=0.5,
        )
        self.abs_pos_K_emb = nn.Embedding(
            hyper_params['seq_len'], hyper_params['item_embed_size'])
        self.abs_pos_V_emb = nn.Embedding(
            hyper_params['seq_len'], hyper_params['item_embed_size'])
        self.abs_pos_K_emb_dropout = nn.Dropout(p=hyper_params['dropout_rate'])
        self.abs_pos_V_emb_dropout = nn.Dropout(p=hyper_params['dropout_rate'])

    def forward(self, x, padding):
        positions = np.tile(np.array(range(x.shape[1])), [
                            x.shape[0], 1])
        positions = torch.LongTensor(positions).to(self.hyper_params['device'])
        abs_pos_K = self.abs_pos_K_emb(positions)
        abs_pos_V = self.abs_pos_V_emb(positions)
        abs_pos_K = self.abs_pos_K_emb_dropout(abs_pos_K)
        abs_pos_V = self.abs_pos_V_emb_dropout(abs_pos_V)

        mask = ~padding
        y = self.performer(x, mask=mask, abs_pos_K=abs_pos_K,
                           abs_pos_V=abs_pos_V)
        out = torch.softmax(y, dim=-1)

        return out
