import torch
import torch.nn as nn
import sys

sys.path.append("./")

from model.config_layer import config_classifier


class Classifier(nn.Module):
    def __init__(self, data, pooling_type="logsum") -> None:
        super(Classifier, self).__init__()

        assert pooling_type in ["mean", "max", "min", "logsum", "CLS"]

        self.pooling_type = config_classifier["pooling_type"]
        self.hidden_dim = config_classifier["hidden_dim"]

        self.input_dim = config_classifier["input_dim"]
        self.labels = data.label_alphabet_size

        self.linear = nn.Sequential(
            nn.Linear(self.input_dim, self.hidden_dim),
            nn.ReLU(),
            nn.Linear(self.hidden_dim, self.labels),
        )

    def forward(self, hidden_out):

        # [bs, seq, dim] -> [bs, dim]
        if self.pooling_type == "max":
            hidden_out = torch.max(hidden_out, dim=1)
        elif self.pooling_type == "logsum":
            hidden_out = torch.logsumexp(hidden_out, dim=1)
        elif self.pooling_type == "mean":
            hidden_out = torch.mean(hidden_out, dim=1)
        else:
            hidden_out = hidden_out[:, 0, :]

        logit = self.linear(hidden_out)
        return logit


class AttClassifier(nn.Module):
    def __init__(self, data, pooling_type="logsum"):
        super(AttClassifier, self).__init__()

        assert pooling_type in ["mean", "max", "min", "logsum", "CLS"]

        self.hidden_dim = config_classifier["hidden_dim"]
        self.input_dim = config_classifier["input_dim"]

        self.labels = data.label_alphabet_size

        # Parameters for attention classifier
        # Weighted matrix W
        self.attention_w = nn.Parameter(
            torch.FloatTensor(self.input_dim, 1), requires_grad=True
        )  # w
        # Embeddings of labels
        self.relation_embedding = nn.Parameter(
            torch.FloatTensor(self.labels, self.input_dim), requires_grad=True
        )  # W
        # Bias term b
        self.sen_b = nn.Parameter(
            torch.FloatTensor(self.labels), requires_grad=True
        )  # b

        self.dropout = nn.Dropout(0.1)
        self.relu = nn.ReLU()

        self.reset_parameters()

    def show_dim(self):
        fmt = "Summary\nhidden_dim:{}\ninput_dim:{}\nlabels:{}".format(
            self.hidden_dim, self.input_dim, self.labels
        )
        print(fmt)

    def reset_parameters(self):
        nn.init.normal_(self.attention_w)
        nn.init.constant_(self.sen_b, val=0)
        nn.init.normal_(self.relation_embedding)

    def __attention_train_logit__(self, x, query):
        # query: ins_label
        current_relation = self.relation_embedding[query]
        attention_logit = torch.sum(current_relation * x, -1)
        return attention_logit

    def __attention_test_logit__(self, x):
        attention_logit = torch.matmul(
            x, torch.transpose(self.relation_embedding, 0, 1)
        )
        return attention_logit

    def __logit__(self, x):

        logit = (
            torch.matmul(x, torch.transpose(self.relation_embedding, 0, 1)) + self.sen_b
        )
        logit = logit.view([x.size(0), self.labels])
        return logit

    def forward(self, feature, label, scope):

        batch_size, seq_len, feature_dim = feature.size()
        label = label.squeeze()

        feature = self.relu(feature)

        # alpha
        alpha = torch.matmul(feature, self.attention_w)
        alpha = alpha.view([batch_size, seq_len])
        alpha = torch.nn.functional.softmax(alpha, dim=1)
        alpha = alpha.view([batch_size, 1, seq_len])
        # h* in the paper
        h_star = torch.matmul(alpha, feature)
        h_star = h_star.view([batch_size, feature_dim])

        if self.training:

            attention_logit = self.__attention_train_logit__(h_star, label)
            batch_rep = []
            for i in range(batch_size):
                hidden_mat = torch.tanh(h_star[scope[i][0] : scope[i][1]])
                attention_score = torch.softmax(
                    attention_logit[scope[i][0] : scope[i][1]], dim=-1
                )
                batch_rep.append(
                    torch.matmul(attention_score, hidden_mat).view(feature_dim)
                )
            batch_rep = torch.stack(batch_rep)
            logit = self.__logit__(batch_rep)
        else:
            attention_logit = self.__attention_test_logit__(h_star)
            bag_logit = []
            for i in range(batch_size):
                bag_hidden_mat = torch.tanh(h_star[scope[i][0] : scope[i][1]])
                attention_score = torch.softmax(
                    torch.transpose(
                        attention_logit[scope[i][0] : scope[i][1], :], 0, 1
                    ),
                    dim=-1,
                ).view(self.labels, 1)
                # Merge multiple sentence representations
                bag_repre_for_each_rel = torch.matmul(attention_score, bag_hidden_mat)

                # [num_classes * num_classes]
                bag_logit_for_each_rel = self.__logit__(bag_repre_for_each_rel)
                bag_logit.append(torch.diag(torch.softmax(bag_logit_for_each_rel, -1)))
            logit = torch.stack(bag_logit)

        # logit = logit.view(batch_size, -1)
        logit = self.dropout(logit)
        return logit
