# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F


class MultiChannelMatcher(nn.Module):
    def __init__(self, emb_vectors, hidden_size, kernels, num_filters, dropout_p=0.0):
        super(MultiChannelMatcher, self).__init__()
        assert hidden_size % 2 == 0
        self.vocab_size = emb_vectors.size(0)
        self.emb_size = emb_vectors.size(1)
        self.hidden_size = hidden_size
        self.num_filters = num_filters

        self.embeddings = nn.Embedding.from_pretrained(emb_vectors)

        self.lstm = nn.LSTM(self.emb_size, self.hidden_size // 2,
                            num_layers=1,
                            batch_first=True,
                            bidirectional=True)

        self.conv_layers = nn.ModuleList([])
        for kernel in kernels:
            conv_layer_i = nn.Sequential(
                nn.Conv2d(2, num_filters, kernel_size=kernel),
                nn.ReLU()
            )
            self.conv_layers.add_module(name='conv-{}'.format(kernel),
                                        module=conv_layer_i)

        self.conv_output_size = self.num_filters * len(kernels)
        self.linear_lstm = nn.Linear(self.hidden_size, self.hidden_size)

        self.similarity_project = nn.Linear(self.conv_output_size, 1)
        self.dropout = nn.Dropout(p=dropout_p)
        pass

    def conv_and_max_pooling(self, inputs):
        conv_outputs = []
        for conv_layer in self.conv_layers:
            output_i = conv_layer(inputs)
            output_i = F.max_pool2d(output_i, (output_i.size(2), output_i.size(3)))
            conv_outputs += [output_i.squeeze(2).squeeze(2)]
        return torch.cat(conv_outputs, dim=1)

    def forward(self, query_inputs, candidate_inputs, **kwargs):
        query_emb_inputs = self.embeddings(query_inputs)
        query_emb_inputs = self.dropout(query_emb_inputs)

        candidate_emb_inputs = self.embeddings(candidate_inputs)
        candidate_emb_inputs = self.dropout(candidate_emb_inputs)

        query_rnn_outputs, _ = self.lstm(query_emb_inputs)
        candidate_rnn_outputs, _ = self.lstm(candidate_emb_inputs)

        embedded_interactions = torch.einsum('bik,bjk->bij', (query_emb_inputs, candidate_emb_inputs))
        query_rnn_outputs_ = self.linear_lstm(query_rnn_outputs)

        rnn_interactions = torch.einsum('bik,bjk->bij', (query_rnn_outputs_, candidate_rnn_outputs))

        conv_output = self.conv_and_max_pooling(torch.stack([embedded_interactions, rnn_interactions], dim=1))

        similarity = self.similarity_project(conv_output).squeeze(1)
        return similarity


if __name__ == '__main__':
    embs = torch.randn(100, 64)
    matcher = MultiChannelMatcher(embs, hidden_size=256, kernels=[(3, 3), (4, 4), (5, 5)], num_filters=64, dropout_p=0.2)
    print(matcher)

    input1 = torch.LongTensor([[1, 3, 4, 0, 0], [20, 30, 50, 60, 0]])
    input2 = torch.LongTensor([[1, 30, 40, 10], [2, 3, 5, 0]])
    print(matcher.forward(input1, input2))
    pass
