"""
Pytorch implementation of Paper
'A Bi-model based RNN Semantic Frame Parsing Model for Intent Detection and Slot Filling'
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class IntentEncoder(nn.Module):
    def __init__(self, embedding_dim, lstm_hidden_size, dropout_rate):
        super(IntentEncoder, self).__init__()
        self.embedding_dim = embedding_dim
        self.lstm_hidden_size = lstm_hidden_size
        self.dropout_rate = dropout_rate

        # Word Embedding learn from scratch
        # self.embedding = nn.Embedding(vocab_size, embedding_size).to(device)
        # self.embedding.weight.data.uniform_(-1.0, 1.0)

        # Use Pre-trained Embedding
        self.lstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.lstm_hidden_size,
                            num_layers=2, bidirectional=True, batch_first=True, dropout=self.dropout_rate)

    def forward(self, x):
        # x = self.embedding(x)
        x = F.dropout(x, self.dropout_rate)
        x, _ = self.lstm(x)
        x = F.dropout(x, self.dropout_rate)
        return x


class IntentDecoder(nn.Module):
    def __init__(self, lstm_hidden_size, dropout_rate, label_size):
        super(IntentDecoder, self).__init__()
        self.lstm_hidden_size = lstm_hidden_size
        self.label_size = label_size
        self.dropout_rate = dropout_rate

        self.lstm = nn.LSTM(input_size=self.lstm_hidden_size*4, hidden_size=lstm_hidden_size,
                            batch_first=True, num_layers=1)

        # fully connected layer
        self.fc = nn.Linear(in_features=self.lstm_hidden_size, out_features=self.label_size)

    def forward(self, x, hs, real_len):
        batch_size = x.size()[0]
        real_len = torch.tensor(real_len).to(device)

        x = torch.cat((x, hs), dim=-1)
        x = F.dropout(x, self.dropout_rate)
        x, _ = self.lstm(x)
        x = F.dropout(x, self.dropout_rate)

        index = torch.arange(batch_size).long().to(device)
        state = x[index, real_len-1, :]
        res = self.fc(state.squeeze())
        return res


class SlotEncoder(nn.Module):
    def __init__(self, embedding_dim, lstm_hidden_size, dropout_rate):
        super(SlotEncoder, self).__init__()
        self.embedding_dim = embedding_dim
        self.lstm_hidden_size = lstm_hidden_size
        self.dropout_rate = dropout_rate

        # self.embedding = nn.Embedding(vocab_size, embedding_size).to(device)
        self.lstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.lstm_hidden_size,
                            num_layers=2, bidirectional=True, batch_first=True)

    def forward(self, x):
        # x = self.embedding(x)
        x = F.dropout(x, self.dropout_rate)
        x, _ = self.lstm(x)
        x = F.dropout(x, self.dropout_rate)
        return x


class SlotDecoder(nn.Module):
    def __init__(self, lstm_hidden_size, dropout_rate, label_size):
        super(SlotDecoder, self).__init__()
        self.lstm_hidden_size = lstm_hidden_size
        self.dropout_rate = dropout_rate

        # slot decoder每一步的输入是对应时间点slot encoder的hidden state(因为是双向，所以*2) 加上
        # intent encoder对应时间点的hidden state(因为是双向，所以*2) 加上 上一时间点的hidden state
        # 所以一共是lstm_hidden_size * 5
        self.lstm = nn.LSTM(input_size=self.lstm_hidden_size*5, hidden_size=self.lstm_hidden_size, num_layers=1)
        # 每一个时间点的输出接一个线性层，映射到slot维度
        self.fc = nn.Linear(in_features=self.lstm_hidden_size, out_features=label_size)

    def forward(self, x, hi):
        """
        :param x: x(0)代表batch_size x(1)代表实际长度 x(2)代表slot x(3)代表intent
        :param hi:从Intent Encoder传来的Hidden State
        :return:
        """
        batch = x.size(0)
        length = x.size(1)
        dec_init_out = torch.zeros(batch, 1, self.lstm_hidden_size).to(device)
        hidden_state = (torch.zeros(1, 1, self.lstm_hidden_size).to(device), torch.zeros(1, 1, self.lstm_hidden_size).to(device))
        x = torch.cat((x, hi), dim=-1)

        x = x.transpose(1, 0)  # 50 x batch x feature_size
        x = F.dropout(x, self.dropout_rate)
        all_out = []
        for i in range(length):
            if i == 0:
                out, hidden_state = self.lstm(torch.cat((x[i].unsqueeze(1), dec_init_out), dim=-1), hidden_state)
            else:
                out, hidden_state = self.lstm(torch.cat((x[i].unsqueeze(1), out), dim=-1), hidden_state)
            all_out.append(out)
        output = torch.cat(all_out, dim=1)  # 50 x batch x feature_size
        x = F.dropout(x, self.dropout_rate)
        res = self.fc(output)
        return res


class Intent(nn.Module):
    def __init__(self, embedding_dim, lstm_hidden_size, dropout_rate, batch_size, max_len, num_classes):
        super(Intent, self).__init__()

        self.encoder = IntentEncoder(embedding_dim, lstm_hidden_size, dropout_rate).to(device)
        self.decoder = IntentDecoder(lstm_hidden_size, dropout_rate, num_classes).to(device)
        self.share_memory = torch.zeros(batch_size, max_len, lstm_hidden_size*2).to(device)


class Slot(nn.Module):
    def __init__(self, embedding_dim, lstm_hidden_size, dropout_rate, batch_size, max_len, num_classes):
        super(Slot, self).__init__()

        self.encoder = SlotEncoder(embedding_dim, lstm_hidden_size, dropout_rate).to(device)
        self.decoder = SlotDecoder(lstm_hidden_size, dropout_rate, num_classes).to(device)
        self.share_memory = torch.zeros(batch_size, max_len, lstm_hidden_size*2).to(device)
