from torch import bmm, mul, matmul, no_grad
from torch import softmax as torch_softmax
from torch import max as torch_max
from torch import cat as torch_cat
from torch import sum as torch_sum
from torch import argmax as torch_argmax
from torch.nn import Module, Conv1d, Sequential, Linear, Softmax, CrossEntropyLoss
from pytorch_pretrained_bert import BertModel
from numpy import inf as np_inf


class Model(Module):

    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config
        self.embedding_dim = config.embedding_dim
        # self.hidden_dim = config.hidden_dim
        self.max_seq_len = config.max_seq_len
        # self.num_perspective = config.num_perspective

        assert self.embedding_dim == 768
        # self.W = nn.Parameter(torch.empty(self.num_perspective, self.embedding_dim), requires_grad=True)
        # nn.init.xavier_normal_(self.W)
        # self.bilstm = nn.LSTM(self.num_perspective, self.hidden_dim, bidirectional=True, batch_first=True)
        self.feature_extractor = Conv1d(in_channels=self.embedding_dim, out_channels=self.config.cnn_out_channels,
                                           kernel_size=self.config.cnn_kernel_size)
        self.bert_model = BertModel.from_pretrained("bert-base-chinese")
        for param in self.bert_model.parameters():
            param.requires_grad = False
        self.classification = Sequential(
            Linear(2 * (self.config.max_seq_len - self.config.cnn_kernel_size + 1), 2),
            # nn.ReLU(inplace=True),
            # nn.Sigmoid(),
            Softmax(dim=1)
        )

    def embedding(self, inputs):
        embeddings, _ = self.bert_model(inputs)
        embeddings = embeddings[-1]
        return embeddings

    def attention(self, p, q, mask):
        # p: batch_size * max_seq_len * embedding_dim
        # q: batch_size * max_seq_len * embedding_dim
        attention = bmm(p, q.permute(0, 2, 1))        # batch_size * max_seq_len * max_seq_len
        attention.masked_fill(mask, -np_inf)
        return torch_softmax(attention, dim=2)

    def build_mask(self, input_q):
        mask = (input_q.eq(0)).unsqueeze(dim=1).expand(-1, self.max_seq_len, -1)
        return mask

    def similarity(self, p, q):
        # batch_size * max_seq_len * embedding_dim
        output = mul(p, q)
        output = matmul(output, self.W.t())   # batch_size * max_seq_len * num_perspective
        return output

    def forward(self, sentences, nodes, lens_s, lens_n):
        # batch_size * max_seq_len
        sentences_embeddings = self.embedding(sentences)    # batch_size * max_seq_len * embedding_dim
        nodes_embeddings = self.embedding(nodes)            # batch_size * max_seq_len * embedding_dim

        s2n_mask = self.build_mask(nodes)
        n2s_mask = self.build_mask(sentences)
        attention_n = self.attention(sentences_embeddings, nodes_embeddings, s2n_mask)      # batch_size * max_seq_len * max_seq_len
        attention_s = self.attention(nodes_embeddings, sentences_embeddings, n2s_mask)      # batch_size * max_seq_len * max_seq_len

        attention_n = bmm(attention_n, nodes_embeddings)                              # batch_size * max_seq_len * embedding_dim
        attention_s = bmm(attention_s, sentences_embeddings)

        node_features, _ = torch_max(self.feature_extractor(attention_n.permute(0, 2, 1)).permute(0, 2, 1), dim=-1)
        sentence_features, _ = torch_max(self.feature_extractor(attention_s.permute(0, 2, 1)).permute(0, 2, 1), dim=-1)
        features = torch_cat([node_features, sentence_features], dim=-1)
        predicts = self.classification(features)
        return predicts

    @staticmethod
    def train_step(model, optimizer, data_iterator, cuda=True):
        model.train()

        optimizer.zero_grad()
        sentences, nodes, labels, lens_s, lens_n, part = next(data_iterator)
        if cuda:
            sentences = sentences.cuda()
            nodes = nodes.cuda()
            labels = labels.cuda()

        predicts = model(sentences, nodes, lens_s, lens_n)

        loss_fn = CrossEntropyLoss()
        loss = loss_fn(predicts, labels)

        log = {}
        loss.backward()
        optimizer.step()

        predicts = torch_argmax(predicts, dim=1)
        print('predicts: ', predicts.cpu().numpy())
        accuracy = torch_sum(predicts == labels)
        accuracy = accuracy.item() / predicts.shape[0]
        log["accuracy"] = accuracy
        log[part + ' loss'] = loss.item()
        return log

    @staticmethod
    def valid_step(model, data_iterator_entity, data_iterator_relation, cuda=True):
        model.eval()

        with no_grad():
            accuracy_entity, accuracy_relation, step = 0., 0., 0
            for step, batch in enumerate(data_iterator_entity):
                sentences, nodes, labels, lens_s, lens_n, part = batch
                if cuda:
                    sentences = sentences.cuda()
                    nodes = nodes.cuda()
                    labels = labels.cuda()

                predicts = model(sentences, nodes, lens_s, lens_n)

                predicts = torch_argmax(predicts, dim=1)
                accuracy_entity += torch_sum(predicts == labels).item() / predicts.shape[0]
            accuracy_entity /= (step + 1)

            for step, batch in enumerate(data_iterator_relation):
                sentences, nodes, labels, lens_s, lens_n, part = batch
                if cuda:
                    sentences = sentences.cuda()
                    nodes = nodes.cuda()
                    labels = labels.cuda()

                predicts = model(sentences, nodes, lens_s, lens_n)

                predicts = torch_argmax(predicts, dim=1)
                accuracy_relation += torch_sum(predicts == labels).item() / predicts.shape[0]
            accuracy_relation /= (step + 1)
        return accuracy_entity, accuracy_relation
