from collections import defaultdict
# import math
# import time
import mxnet as mx
import random
import numpy as np
from mxnet import nd
from mxnet import autograd
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.gluon.loss import Loss


BATCH_SIZE = 50
EMB_SIZE = 64
HIDDEN_SIZE = 128


# latent feature for sentence
class BiLSTM(nn.Block):
    def __init__(self, vocab_size_src, vocab_size_trg, emb_size, hidden_size, num_layers, **kwargs):
        super(BiLSTM, self).__init__(**kwargs)
        self.embedding_src = nn.Embedding(vocab_size_src, emb_size)
        self.embedding_trg = nn.Embedding(vocab_size_trg, emb_size)
        self.lstm_src = gluon.rnn.LSTM(hidden_size // 2, num_layers=num_layers, bidirectional=True)
        self.lstm_trg = gluon.rnn.LSTM(hidden_size // 2, num_layers=num_layers, bidirectional=True)

        # self.lstm_src = gluon.rnn.LSTM(hidden_size, num_layers=num_layers)
        # self.lstm_trg = gluon.rnn.LSTM(hidden_size, num_layers=num_layers)

    def forward(self, x_src, x_trg, seq_lens_src, seq_lens_trg):
        x_src = self.embedding_src(x_src)
        x_trg = self.embedding_trg(x_trg)
        x_src = self.lstm_src(x_src)
        x_trg = self.lstm_trg(x_trg)

        print(x_src)
        return (nd.stack(*[x_src[i, l-1] for i,  l in enumerate(seq_lens_src)]),
                nd.stack(*[x_trg[i, l-1] for i, l in enumerate(seq_lens_trg)]))


# def test_net():
#     net = BiLSTM(20, 10, 10, 1)
#     net.initialize()
#     # batch, seq len
#     data = nd.zeros((1, 5))
#     print(net(data))


def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    data = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.lower().split()
        data.append([w2i[w.strip()] for w in d if w.strip()])
    w2i = defaultdict(lambda: UNK, w2i)
    return data, w2i


def read_test_data(text_file, w2i):
    data = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.lower().split()
        data.append([w2i[w.strip()] for w in d if w.strip()])
    return data


def padding(data, pad):
    seq_lens = [len(d) for d in data]
    max_size = max(seq_lens)
    padded_data = []
    for d in data:
        pad_size = max_size - len(d)
        padded_data.append(d + [pad] * pad_size)
    return padded_data, seq_lens


class DataIter:
    def __init__(self, data, trg, batch_size, pad_src, pad_trg, split=0.8, ctx=mx.cpu()):
        self.data = data
        self.trg = trg
        self.batch_size = batch_size
        self.length = len(self.data)
        self.pad_src = pad_src
        self.pad_trg = pad_trg
        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i*self.batch_size:(i+1)*self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            d, seq_lens_data = padding([self.data[i] for i in bi], self.pad_src)

            if self.trg:
                t, seq_lens_tag = padding([self.trg[i] for i in bi], self.pad_trg)
                # print(d, t)
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.int32, ctx=self.ctx), seq_lens_data, seq_lens_tag
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), seq_lens_data

    def get_iter(self, dataset_type='train'):
        return self.data_iter(dataset_type)


def retrieve(src, db_mtx):
    scores = np.dot(db_mtx, src)
    ranks = np.argsort(-scores)
    return ranks, scores


def index_corpus(src_net, data_iter):
    for i, (src, trg, seq_src, seq_trg) in enumerate(data_iter):
        src_rep, trg_rep = src_net(src, trg, seq_src, seq_trg)

        for s, t in zip(src_rep, trg_rep):
            yield s.asnumpy(), t.asnumpy()


class HingeLossDim(Loss):
    def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
        super(HingeLossDim, self).__init__(weight, batch_axis, **kwargs)
        self._margin = margin

    def hybrid_forward(self, F, pred, label, dim=0, sample_weight=None):
        loss = F.relu(self._margin - pred * label)

        return F.mean(loss, axis=self._batch_axis, exclude=True)


def train(net, data_iter, data_iter_dev):
    trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 1e-3})
    # loss_func_l2 = gluon.loss.L2Loss()
    loss_func = HingeLossDim()

    for epoch in range(100):
        train_data = data_iter.get_iter('train')
        dev_data = data_iter_dev.get_iter('train')
        train_loss = 0
        train_acc = 0
        test_acc = 0
        data_len = 0
        for i, (data_src, data_trg, seq_lens_src, seq_lens_trg) in enumerate(train_data):
            with autograd.record():
                out_src, out_trg = net(data_src, data_trg, seq_lens_src, seq_lens_trg)
                sim_mtx = nd.dot(out_src, out_trg.transpose())
                # if i % 1000 == 0:
                #     print(sim_mtx)
                size = sim_mtx.shape[0]
                # 1 * (size - 1) is the key
                target = [[1 * (size - 1) if j == k else -1 for j in range(size)] for k in range(size)]
                target = nd.array(target)

                loss = loss_func(sim_mtx, target)
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.sum(loss).asscalar()
            data_len += BATCH_SIZE

            # if i % 50 == 0:
            #     # print(data)
            #     print('output: ', output)
            #     print('label: ', label)
            #     print('out: ', output.argmax(axis=1))
            #     print(loss)
            #     # print(t2i)
            # train_acc += accuracy(output, label)

        print(train_loss, ' ', train_loss / data_len)

        rec_at_1, rec_at_5, rec_at_10 = 0, 0, 0
        reps = list(index_corpus(net, dev_data))

        trg_mtx = np.stack([trg for src, trg in reps])

        len_dev = 0

        for i, (src, trg) in enumerate(reps):
            ranks, scores = retrieve(src, trg_mtx)
            # if i % 100 == 0:
            #     print(ranks[0:100])
            if ranks[0] == i:
                rec_at_1 += 1
            if i in ranks[0:5]:
                rec_at_5 += 1
            if i in ranks[0:10]:
                rec_at_10 += 1
            len_dev += 1
        # print(len_dev, len(ranks))

        print("Epoch: %d, Loss: %f, Recall@1: %f, Recall@5: %f, Recall@10: %f" %
              (epoch, train_loss / data_iter.train_size, rec_at_1/len_dev * 100,
               rec_at_5/len_dev * 100, rec_at_10/len_dev * 100))

        # test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        # print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
        #       (epoch, train_loss / data_iter.train_size, train_acc / data_iter.train_size, test_acc))


def train_model(text_src_file, text_trg_file, src_dev_file, trg_dev_file, out_model_file, w2i_file):
    src_data, w2i_src = read_train_data(text_src_file)
    trg_data, w2i_trg = read_train_data(text_trg_file)

    dev_src_data = read_test_data(src_dev_file, w2i_src)
    dev_trg_data = read_test_data(trg_dev_file, w2i_trg)

    # pickle.dump(w2i, open(w2i_file, 'w+'))
    data_len = len(src_data)
    unk_src = w2i_src['<unk>']
    unk_trg = w2i_trg['<unk>']

    ctx = mx.cpu()
    print(data_len)
    train_data_iter = DataIter(src_data, trg_data, BATCH_SIZE, unk_src, unk_trg, split=1, ctx=ctx)
    test_data_iter = DataIter(dev_src_data, dev_trg_data, BATCH_SIZE, unk_src, unk_trg, split=1, ctx=ctx)

    num_layers = 1
    bi_lstm = BiLSTM(len(w2i_src), len(w2i_trg), EMB_SIZE, HIDDEN_SIZE, num_layers)
    bi_lstm.initialize()

    train(bi_lstm, train_data_iter, test_data_iter)


if __name__ == '__main__':
    train_src_file = "../data/parallel/train.ja"
    train_trg_file = "../data/parallel/train.en"
    dev_src_file = "../data/parallel/dev.ja"
    dev_trg_file = "../data/parallel/dev.en"

    # train_src_data, w2i_src = read_train_data(train_src_file)
    # train_trg_data, w2i_trg = read_train_data(train_trg_file)

    train_model(train_src_file, train_trg_file, dev_src_file, dev_trg_file, '', '')
