import mxnet.gluon.nn as nn
import mxnet.gluon as gluon
# import gluonnlp as nlp
import mxnet.ndarray as nd
import mxnet as mx
from mxnet.gluon.loss import Loss


class MatchLSTM(nn.Block):
    def __init__(self, vocab_size, emb_size, hidden_size, num_layers=1, drop_rate=0.5):
        super(MatchLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(vocab_size, emb_size)
        self.linear_p = nn.Dense(hidden_size, in_units=hidden_size)
        self.linear_q = nn.Dense(hidden_size, in_units=hidden_size)
        self.linear_r = nn.Dense(hidden_size, in_units=hidden_size)
        self.linear_attn = nn.Dense(1, in_units=hidden_size)

        self.question_encoder = gluon.rnn.LSTM(self.hidden_size, num_layers, dropout=drop_rate)
        self.passage_encoder = gluon.rnn.LSTM(self.hidden_size, num_layers, dropout=drop_rate)

        self.match_lstm = gluon.rnn.LSTMCell(self.hidden_size)

    def get_attn_i(self, Q, p, hidden_r):
        """

        :param Q: TxNxCb   time batch features
        :param p:  i th hidden representation  NxC
        :param hidden_r:  (i-1)th of hidden  NxC
        :return:
        """
        # print(p.shape)
        lp = self.linear_p(p)   # N x C
        lr = self.linear_r(hidden_r)   # N x C
        res = []
        for hidden_q in Q:
            lq = self.linear_q(hidden_q)  # N x C
            # print('linear: ', lq, lp, lr)
            g_i = nd.tanh(lq + lp + lr)
            # print('g_i:', g_i.shape)
            o_i = self.linear_attn(g_i)
            # print('o_i: ', o_i.shape)

            res.append(o_i)
        o_i = nd.stack(*res)
        alpha_i = nd.softmax(o_i)  # TXNx1
        shape = alpha_i.shape
        return alpha_i.reshape((shape[1], shape[2], shape[0]))   # Nx1xT

    def forward(self, question, passage, q_lens, p_lens, hidden_q, hidden_p, hidden_r):
        emb_question = self.embedding(question).swapaxes(0, 1)
        emb_passage = self.embedding(passage).swapaxes(0, 1)

        enc_p, hidden_p = self.passage_encoder(emb_passage, hidden_p)
        enc_q, hidden_q = self.question_encoder(emb_question, hidden_q)
        # print(enc_p)
        # print(hidden_p)
        # print(hidden_q)
        output = []
        shape = None
        print(enc_p.shape)
        for p in enc_p:
            # print('passage: ', p)
            alpha_i = self.get_attn_i(enc_q, p, hidden_r[-1])
            z_i = nd.batch_dot(alpha_i, enc_q.swapaxes(0, 1))
            z_i = z_i.swapaxes(0, 1)   # to 1xNxC
            z_i = nd.concat(*(p, z_i[0]), dim=1)
            # print('z_i: ', z_i.shape)
            # print('hidden_r', hidden_r)
            out, hidden_r = self.match_lstm(z_i, hidden_r)
            shape = out.shape
            output.append(out)
        output.append(self.get_zero_pad(shape))
        output = nd.stack(*output)   # (T+1)xNxC
        return output

    def init_hidden_p(self, batch_size, ctx=mx.cpu()):
        return self.passage_encoder.begin_state(batch_size=batch_size, ctx=ctx)

    def init_hidden_q(self, batch_size, ctx=mx.cpu()):
        return self.question_encoder.begin_state(batch_size=batch_size, ctx=ctx)

    def init_hidden_r(self, batch_size, ctx=mx.cpu()):
        return self.match_lstm.begin_state(batch_size=batch_size, ctx=ctx)

    @staticmethod
    def get_zero_pad(shape, ctx=mx.cpu()):
        return nd.zeros(shape=shape, ctx=ctx)


class AnsPtrNet(nn.Block):
    def __init__(self, hidden_size, vocab_size):
        super(AnsPtrNet, self).__init__()
        self.hidden_size = hidden_size
        self.answer_lstm = gluon.rnn.LSTMCell(hidden_size)
        self.linear_a = nn.Dense(hidden_size)
        self.linear_r = nn.Dense(hidden_size)
        self.linear_attn = nn.Dense(1)
        self.classifier = nn.Dense(vocab_size)

    def get_attn_k(self, R, ak):
        res = []
        for r in R:
            lr = self.linear_r(r)
            la = self.linear_a(ak)
            f_k = nd.tanh(lr + la)
            o_k = self.linear_attn(f_k)
            res.append(o_k)
        o_k = nd.stack(*res)
        # print('ok: ', o_k.shape)
        beta_k = nd.softmax(o_k)
        shape = beta_k.shape
        return beta_k.reshape((shape[1], shape[2], shape[0]))

    def forward(self, match_enc, hidden_a, ans_lens):
        # print(hidden_a)
        output = []
        for _ in range(int(nd.max(ans_lens).asscalar())):
            beta_k = self.get_attn_k(match_enc, hidden_a[-1])
            z_k = nd.batch_dot(beta_k, match_enc.swapaxes(0, 1))
            z_k = z_k.swapaxes(0, 1)[0]
            a, hidden_a = self.answer_lstm(z_k, hidden_a)
            out = self.classifier(a)
            output.append(out)
        return output


class MatchLstmAnsPtr(nn.Block):
    def __init__(self, vocab_size, emb_size, hidden_size, num_layers=1, drop_rate=0.5):
        super(MatchLstmAnsPtr, self).__init__()
        self.match_lstm = MatchLSTM(vocab_size, emb_size, hidden_size, num_layers, drop_rate)
        self.ans_ptr = AnsPtrNet(hidden_size, vocab_size)

    def forward(self, question, passage, q_lens, p_lens, ans_lens, h_q, h_p, h_r, h_a):
        out = self.match_lstm.forward(question, passage, q_lens, p_lens, h_q, h_p, h_r)
        out = self.ans_ptr.forward(out, h_a, ans_lens)
        return out


class SoftmaxSeqCELoss(Loss):
    def __init__(self, axis=-1, weight=None,
                 batch_axis=0, **kwargs):
        super(SoftmaxSeqCELoss, self).__init__(weight, batch_axis, **kwargs)
        self._axis = axis

    def hybrid_forward(self, F, pred, label, masks, sample_weight=None):
        # print(label.shape)
        pred = F.log_softmax(pred, self._axis)
        loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
        loss = loss.reshape(label.shape)
        # print(loss)
        loss = loss * masks

        return F.sum(loss, axis=self._batch_axis, exclude=True) / F.sum(masks, axis=1)


def test_net():
    match_net = MatchLSTM(100, 20, 10)
    match_net.initialize()

    ptr_net = AnsPtrNet(10, 100)
    ptr_net.initialize()

    q = nd.array([[1, 2, 3, 4], [1, 2, 3, 4]])
    p = nd.array([[1, 3, 4, 7, 9], [1, 3, 4, 7, 9]])

    hq = match_net.init_hidden_q(2)
    hp = match_net.init_hidden_p(2)
    hr = match_net.init_hidden_r(2)
    out = match_net(q, p, [], [], hq, hp, hr)
    print(ptr_net(out, hr, nd.array([3, 2])))

    m_ptr_net = MatchLstmAnsPtr(100, 20, 10)
    m_ptr_net.initialize()
    out = m_ptr_net(q, p, [], [], nd.array([3, 2]), hq, hp, hr, hr)
    print(out)

