from collections import defaultdict
# import math
# import time
import mxnet as mx
import random
import numpy as np
from mxnet import nd
from mxnet import autograd
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.gluon.loss import Loss
# import gluonnlp as nlp
# from mxnet import initializer
# nlp.model.AttentionCell


BATCH_SIZE = 50
EMB_SIZE = 64
HIDDEN_SIZE = 128
ATTENTION_SIZE = 128  # max sequence length


class Attn(nn.Block):
    def __init__(self, method, hidden_size):
        super(Attn, self).__init__()
        self.method = method
        self.hidden_size = hidden_size
        with self.name_scope():
            self.attn_general = nn.Dense(self.hidden_size, in_units=self.hidden_size)
        with self.name_scope():
            self.attn_concat = nn.Dense(self.hidden_size, in_units=self.hidden_size * 2)
            self.v = gluon.Parameter(name='attn', shape=(1, self.hidden_size))

    def forward(self, hidden, encoder_output):
        """

        :param hidden: 1xNxC
        :param encoder_output: TxNxC
        """
        max_len = encoder_output.shape[0]
        batch_size = encoder_output.shape[1]
        attn_energies = nd.zeros(shape=(batch_size, max_len))
        for b in range(batch_size):
            for i in range(max_len):
                attn_energies[b, i] = self.score(hidden[:, b], encoder_output[i, b])

        # normalize energies to weights in range 0 to 1, resize to N x 1 xT
        return nd.softmax(attn_energies).expand_dims(1)

    def score(self, hidden, encoder_output):
        if self.method == 'dot':
            energy = nd.dot(hidden, encoder_output)

            return energy
        elif self.method == 'general':
            # print('score: ', hidden, encoder_output.expand_dims(0))
            energy = self.attn_general(encoder_output.expand_dims(0))
            # print(energy)
            energy = nd.dot(hidden, energy[0])
            return energy
        elif self.method == 'concat':

            energy = self.attn_concat(nd.concat(*[hidden, encoder_output.expand_dims(0)]))

            energy = nd.dot(self.v.data(), energy)
            return energy


class Encoder(nn.Block):
    def __init__(self, vocab_size, emb_size, hidden_size, **kwargs):
        super(Encoder, self).__init__(**kwargs)
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(vocab_size, emb_size)
        self.lstm = gluon.rnn.LSTM(hidden_size, num_layers=1)

    def forward(self, x, hidden, seq_lens_src):
        length = int(nd.max(seq_lens_src).asscalar())
        # print(length)
        x = self.embedding(x).swapaxes(0, 1)
        x, hidden = self.lstm(x, hidden)
        # x: TxNxC  hidden: [1xNxC, 1xNxC]
        return x, hidden

    def init_hidden(self, batch_size, ctx):
        return self.lstm.begin_state(batch_size=batch_size, ctx=ctx)


class BahdanauAttnDecoder(nn.Block):
    def __init__(self, vocab_size, emb_size, hidden_size, **kwargs):
        super(BahdanauAttnDecoder, self).__init__(**kwargs)
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(vocab_size, emb_size)
        self.attn = Attn('general', self.hidden_size)
        self.lstm = gluon.rnn.LSTM(hidden_size, num_layers=1)
        self.out = nn.Dense(vocab_size)

    # hidden: [1xNxC, 1xNxC] encoder_outputs: TxNxC
    def forward(self, x, hidden, encoder_output):
        # Note one time step per time
        # x should be batch of one word
        x = self.embedding(x).swapaxes(0, 1)  # reshape from Nx1xC to 1xNxC
        print(hidden, encoder_output)

        attn_weights = self.attn(hidden[-1], encoder_output)  # N x1 x T
        context = nd.batch_dot(attn_weights, encoder_output.swapaxes(0, 1))  # N x 1 x C = batch_dot(Nx1xT, NxTxC)
        context = context.swapaxes(0, 1)  # reshape to 1xNxC

        x = nd.concat(x, context, dim=2)

        x, hidden = self.lstm(x, hidden)

        x = x[0]  # N x C

        x = nd.log_softmax(self.out(nd.concat(x, context[0], dim=1)))

        return x, hidden, attn_weights

    def init_hidden(self, batch_size, ctx):
        return self.lstm.begin_state(batch_size=batch_size, ctx=ctx)


# todo add generate sentence function
# latent feature for sentence
class EncoderDecoderAttn(nn.Block):
    def __init__(self, vocab_size_src, vocab_size_trg, emb_size, attention_size, hidden_size, num_layers, **kwargs):
        super(EncoderDecoderAttn, self).__init__(**kwargs)
        self.hidden_size = hidden_size // 2

        with self.name_scope():
            self.encoder = nn.Sequential()
            self.encoder.add(nn.Embedding(vocab_size_src, emb_size))
            self.encoder.add(gluon.rnn.LSTM(self.hidden_size, num_layers=num_layers, bidirectional=True))

        with self.name_scope():
            self.embedding_trg = nn.Embedding(vocab_size_trg, emb_size)
            self.lstm_trg = gluon.rnn.LSTM(self.hidden_size, num_layers=num_layers, bidirectional=True)

        with self.name_scope():
            self.attn_key = nn.Dense(attention_size, in_units=self.hidden_size * 2)

        self.linear = nn.Dense(vocab_size_trg)
        self.tanh = nn.Activation('tanh')

    def forward(self, x_src, x_trg, seq_lens_src, seq_lens_trg):
        x = self.encoder(x_src)
        src_output = nd.stack(*[x[i, l - 1] for i, l in enumerate(seq_lens_src)])
        print(x)
        print(src_output)
        max_len = max(seq_lens_trg)
        masks = nd.array([[1 if i < k else 0 for i in range(max_len)] for k in seq_lens_trg])
        return x


class SoftmaxSeqCELoss(Loss):
    def __init__(self, axis=-1, weight=None,
                 batch_axis=0, **kwargs):
        super(SoftmaxSeqCELoss, self).__init__(weight, batch_axis, **kwargs)
        self._axis = axis

    def hybrid_forward(self, F, pred, label, masks, sample_weight=None):
        # print(label.shape)
        pred = F.log_softmax(pred, self._axis)
        loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
        loss = loss.reshape(label.shape)
        # print(loss)
        loss = loss * masks

        return F.sum(loss, axis=self._batch_axis, exclude=True) / F.sum(masks, axis=1)


# def test_net():
#     # vocab_size, emb_size, hidden_size
#     enc = Encoder(20, 16, 10)
#     # vocab_size, emb_size, hidden_size
#     attn_dec = BahdanauAttnDecoder(30, 20, 10)
#
#     ctx = mx.cpu()
#     enc.initialize()
#     attn_dec.initialize()
#
#     hidden = enc.init_hidden(3, ctx)
#     # batch, seq len
#     data = nd.zeros((3, 15))
#     enc_output, hiddend = enc(data, hidden, nd.array([15, 4, 3]))
#
#     data_dec = nd.zeros((3, 1))
#     print(attn_dec(data_dec, hidden, enc_output))


def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    data = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.lower().split()
        data.append([w2i[w.strip()] for w in d if w.strip()])
    w2i = defaultdict(lambda: UNK, w2i)
    return data, w2i


def read_dev_data(text_file, w2i):
    data = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.lower().split()
        data.append([w2i[w.strip()] for w in d if w.strip()])
    return data


def padding(data, pad):
    seq_lens = [len(d) for d in data]
    max_size = max(seq_lens)
    padded_data = []
    for d in data:
        pad_size = max_size - len(d)
        padded_data.append(d + [pad] * pad_size)
    return padded_data, seq_lens


class DataIter:
    def __init__(self, data, trg, batch_size, pad_src, pad_trg, sos_src, sos_trg, eos_src, eos_trg,
                 split=0.8, ctx=mx.cpu()):
        self.data = data
        self.trg = trg
        self.batch_size = batch_size
        self.length = len(self.data)
        self.pad_src = pad_src
        self.pad_trg = pad_trg
        self.sos_src = sos_src
        self.sos_trg = sos_trg
        self.eos_src = eos_src
        self.eos_trg = eos_trg

        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i*self.batch_size:(i+1)*self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            d, seq_lens_data = padding([[self.sos_src] + self.data[i] + [self.eos_src] for i in bi], self.pad_src)

            if self.trg:
                t, seq_lens_tag = padding([[self.sos_trg] + self.trg[i] + [self.eos_trg] for i in bi], self.pad_trg)
                # print(d, t)
                yield (nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.int32, ctx=self.ctx),
                       seq_lens_data, seq_lens_tag)
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), seq_lens_data

    def get_iter(self, dataset_type='train'):
        return self.data_iter(dataset_type)


def retrieve(src, db_mtx):
    scores = np.dot(db_mtx, src)
    ranks = np.argsort(-scores)
    return ranks, scores


def index_corpus(src_net, data_iter):
    for i, (src, trg, seq_src, seq_trg) in enumerate(data_iter):
        src_rep, trg_rep = src_net(src, trg, seq_src, seq_trg)

        for s, t in zip(src_rep, trg_rep):
            yield s.asnumpy(), t.asnumpy()


def train(net, data_iter, data_iter_dev):
    trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 1e-3})
    # loss_func_l2 = gluon.loss.L2Loss()
    loss_func = SoftmaxSeqCELoss()

    for epoch in range(100):
        train_data = data_iter.get_iter('train')
        dev_data = data_iter_dev.get_iter('train')
        train_loss = 0
        train_acc = 0
        test_acc = 0
        data_len = 0
        for i, (data_src, data_trg, seq_lens_src, seq_lens_trg) in enumerate(train_data):
            with autograd.record():
                out, masks = net(data_src, data_trg, seq_lens_src, seq_lens_trg)
                # print(out.shape)
                loss = loss_func(out, data_trg, masks)
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.sum(loss).asscalar()
            data_len += BATCH_SIZE

            # if i % 50 == 0:
            #     # print(data)
            #     print('output: ', output)
            #     print('label: ', label)
            #     print('out: ', output.argmax(axis=1))
            #     print(loss)
            #     # print(t2i)
            # train_acc += accuracy(output, label)

        print(train_loss, ' ', train_loss / data_len)

        # test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        # print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
        #       (epoch, train_loss / data_iter.train_size, train_acc / data_iter.train_size, test_acc))


# todo finish training
def train_model(text_src_file, text_trg_file, src_dev_file, trg_dev_file, out_model_file, w2i_file):
    src_data, w2i_src = read_train_data(text_src_file)
    trg_data, w2i_trg = read_train_data(text_trg_file)

    dev_src_data = read_dev_data(src_dev_file, w2i_src)
    dev_trg_data = read_dev_data(trg_dev_file, w2i_trg)

    # pickle.dump(w2i, open(w2i_file, 'w+'))
    data_len = len(src_data)
    unk_src = w2i_src['<unk>']
    unk_trg = w2i_trg['<unk>']
    sos_src = w2i_src['<s>']
    eos_src = w2i_src['</s>']
    sos_trg = w2i_trg['<s>']
    eos_trg = w2i_trg['</s>']

    ctx = mx.cpu()
    print(data_len)
    train_data_iter = DataIter(src_data, trg_data, BATCH_SIZE, eos_src, eos_trg, sos_src, sos_trg, eos_src, eos_trg,
                               split=1, ctx=ctx)
    test_data_iter = DataIter(dev_src_data, dev_trg_data, BATCH_SIZE, eos_src, eos_trg, sos_src, sos_trg, eos_src,
                              eos_trg,
                              split=1, ctx=ctx)

    num_layers = 1
    enc_dec_net = EncoderDecoderAttn(len(w2i_src), len(w2i_trg), emb_size=EMB_SIZE, attention_size=EMB_SIZE, hidden_size=HIDDEN_SIZE, num_layers=num_layers)
    enc_dec_net.initialize()

    train(enc_dec_net, train_data_iter, test_data_iter)


def generate(net, sent, i2w_src, i2w_trg):
    data = nd.array([sent])
    out = net.encoder(data)


if __name__ == '__main__':
    train_src_file = "../data/parallel/train.ja"
    train_trg_file = "../data/parallel/train.en"
    dev_src_file = "../data/parallel/dev.ja"
    dev_trg_file = "../data/parallel/dev.en"

    # train_src_data, w2i_src = read_train_data(train_src_file)
    # train_trg_data, w2i_trg = read_train_data(train_trg_file)

    train_model(train_src_file, train_trg_file, dev_src_file, dev_trg_file, '', '')
