# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import numpy as np


class MultiHeadSelfAttention(nn.Module):

    def __init__(self, heads, hidden_size, keep_prob=0.1):
        super(MultiHeadSelfAttention, self).__init__()
        assert  hidden_size % heads == 0
        self.heads = heads
        self.d_k = hidden_size // heads
        self.linears = nn.ModuleList([nn.Linear(hidden_size, hidden_size) for _ in range(4)])
        self.dropout = nn.Dropout(keep_prob)

    def attention(self, query, key, value, masks=None, dropout=None):
        d_k = query.size(-1)
        key_ = key.transpose(-2, -1)
        score = torch.matmul(query, key_) / np.sqrt(d_k)
        if masks is not None:
            score = score.masked_fill_(masks == 0, -1e-9)

        p_att = torch.softmax(score, dim=-1)
        if dropout is not None:
            p_att = dropout(p_att)

        return torch.matmul(p_att, value), p_att

    def forward(self, Q, K, V, masks=None):
        if masks is not None:
            masks = masks.unsqueeze(1).unsqueeze(1)
        nbatches = Q.size(0)
        q, k, v = [linear(x).view(nbatches, -1, self.heads, self.d_k).transpose(1, 2)
                   for linear, x in zip(self.linears, (Q, K, V))]
        x, _ = self.attention(q, k, v, masks, self.dropout)
        x = x.transpose_(1, 2).contiguous().view(nbatches, -1, self.heads * self.d_k)
        x = self.linears[-1](x)
        return x


class FeedForward(nn.Module):

    def __init__(self, hidden_size, d_ff, tgt_size, keep_prob):
        super(FeedForward, self).__init__()
        self.w_1 = nn.Linear(hidden_size, d_ff)
        self.w_2 = nn.Linear(d_ff, tgt_size)
        self.dropout = nn.Dropout(keep_prob)

    def forward(self, x):
        return self.w_2(self.dropout(torch.relu(self.w_1(x))))


class LSTM_Attn(nn.Module):

    def __init__(self,
                 vocab_size: int,
                 tgt_size: int,
                 emb_dim: int,
                 max_len: int,
                 hidden_size: int,
                 num_layers: int = 2,
                 keep_prob: float = 0.5,
                 num_heads: int = 2,
                 attn_keep_prob: float = 0.1,
                 d_ff: int = 128,
                 pad_idx: int = 0,
                 use_cuda: bool = True):
        super(LSTM_Attn, self).__init__()
        self.device = torch.device("cuda:0" if use_cuda and torch.cuda.is_available() else "cpu:0")
        self.max_len = max_len
        self.pad_idx = pad_idx
        self.embeddings = nn.Embedding(vocab_size, emb_dim, pad_idx)
        self.pe = self.positional_encoding(max_len, emb_dim, self.device)
        self.layerNorm = nn.LayerNorm(emb_dim)
        self.lstm = nn.LSTM(emb_dim, hidden_size // 2, num_layers, batch_first=True, dropout=keep_prob, bidirectional=True)
        self.attn = MultiHeadSelfAttention(num_heads, hidden_size, attn_keep_prob)
        self.ffn = FeedForward(hidden_size, d_ff, tgt_size, keep_prob)
        self.to(self.device)

    @staticmethod
    def positional_encoding(max_len, emb_dim, device):
        pe = torch.LongTensor(max_len, emb_dim)
        pos = torch.arange(0, max_len, 1.0).unsqueeze(1)
        k = torch.exp(-np.log(10000) * torch.arange(0, emb_dim, 2.) / emb_dim)
        pe[:, 0::2] = torch.sin(pos * k)
        pe[:, 1::2] = torch.cos(pos * k)
        return pe.to(device)

    def forward(self, entities, masks):
        batch_size, max_len = entities.size()
        seq_len = masks.sum(dim=1).long()

        emb = self.embeddings(entities)
        emb = emb + self.pe[:max_len]
        emb = self.layerNorm(emb)

        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, True, self.pad_idx, max_len)

        out = torch.tanh(out)
        seq_ends = (seq_len - 1)

        out = self.attn(out, out, out, masks)
        out = self.ffn(out[torch.arange(batch_size), seq_ends, :])
        return out

