import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
import mxnet.gluon.nn as nn
import mxnet.gluon as gluon
import mxnet.autograd as autograd
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.trainer import Trainer
import tqdm
from collections import defaultdict
import random


BATCH_SIZE = 16


def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    t2i = defaultdict(lambda: len(t2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    UNK_TAG = t2i["<unk>"]
    data = []
    tags = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        word_tags = [wt.split('|') for wt in l.strip().split()]
        data.append([w2i[wt[0].strip()] for wt in word_tags])
        tags.append([t2i[wt[1].strip()] for wt in word_tags])
    w2i = defaultdict(lambda: UNK, w2i)
    t2i = defaultdict(lambda: UNK_TAG, t2i)
    return data, tags, w2i, t2i


def read_dev_data(text_file, w2i, t2i):
    data = []
    tags = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        word_tags = [wt.split('|') for wt in l.strip().split()]
        data.append([w2i[wt[0].strip()] for wt in word_tags])
        tags.append([t2i[wt[1].strip()] for wt in word_tags])
    return data, tags


def padding(d, padding_size, pad):
    d = d[0:padding_size]
    l = padding_size - len(d)
    if l > 0:
        d = d + [pad] * l
    return d


class DataIter:
    def __init__(self, data, tag, batch_size, pad, tag_pad, split=0.8, ctx=mx.cpu()):
        self.data = data
        self.tag = tag
        self.batch_size = batch_size
        self.pad = pad
        self.tag_pad = tag_pad
        self.length = len(self.data)
        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i * self.batch_size:(i + 1) * self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            lens = [len(self.data[i]) for i in bi]
            max_len = max(lens)
            # print(max_len)
            d = [padding(self.data[i], max_len, self.pad) for i in bi]

            if self.tag:
                tag_lens = [len(self.tag[i]) for i in bi]
                tag_max_len = max(tag_lens)
                t = [padding(self.tag[i], tag_max_len, self.tag_pad) for i in bi]
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.uint8, ctx=self.ctx), lens, tag_lens
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), lens

    def get_iter(self, dataset_type):
        return self.data_iter(dataset_type)


class BiLstmTagger(nn.Block):
    def __init__(self, vocab_size, n_tags, emb_size, hidden_size):
        super(BiLstmTagger, self).__init__()
        with self.name_scope():
            self.emb = nn.Embedding(vocab_size, emb_size)
            self.tagger = gluon.rnn.BidirectionalCell(
                gluon.rnn.LSTMCell(hidden_size=hidden_size),
                gluon.rnn.LSTMCell(hidden_size=hidden_size),
            )
            self.classify = nn.Dense(n_tags)

    def init_state(self, batch_size, ctx):
        return self.tagger.begin_state(batch_size=batch_size, ctx=ctx)

    def forward(self, x, length, lens, begin_state=None):
        x = self.emb(x)
        outputs, hidden_states = self.tagger.unroll(length, x,
                                                    valid_length=lens,
                                                    merge_outputs=False,
                                                    begin_state=begin_state)
        out = []

        # independent classify
        for o in outputs:
            out.append(self.classify(o))
        return out


def test_net():
    net = BiLstmTagger(10, 4, 8, 16)
    net.initialize()
    data = nd.array([[1, 2, 3, 4], [4, 5, 0, 0]])
    a = net.init_state(2, ctx=mx.cpu())
    print(net(data, 4, nd.array([4, 2], dtype=np.float32), a))


def accuracy(output, label, mask=None):
    out = output.argmax(axis=1)
    # out = out.astype(np.uint8)

    return nd.sum((out == label.astype(np.float32)) * mask).asscalar()


def evaluate_accuracy(net, data_iter, data_len):
    acc = 0
    n = 0
    for data, label, lens, tag_lens in data_iter:
        max_len = max(lens)
        output = net(data, max_len, nd.array(lens))
        mask = nd.array(get_mask(lens, max_len))
        for i in range(len(output)):
            acc += accuracy(output[i], label[:, i], mask[:, i])
            n += mask[:, i].sum().asscalar()
    return acc / n


# class MyLoss(gluon.loss.Loss):
#     def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
#         super(MyLoss, self).__init__(weight, batch_axis, **kwargs)
#         self._from_sigmoid = from_sigmoid
#
#     def hybrid_forward(self, F, preds, labels, mask=None, weight=1.0,  sample_weight=None):
#         if not self._from_sigmoid:
#             # We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
#             loss = F.relu(preds) - preds * labels + F.Activation(-F.abs(preds), act_type='softrelu')
#         else:
#             loss = -(F.log(preds+1e-12)*labels + F.log(1.-preds+1e-12)*(1.-labels))
#         return F.mean(loss)


def get_mask(lens, size):
    data = []
    for l in lens:
        data.append([1 if l > i else 0 for i in range(size)])
    return data


def train(net, data_iter, dev_iter, out_model_file, ctx):
    trainer = Trainer(net.collect_params(),
                      'sgd', {'learning_rate': 1e-2})
    loss_func = SoftmaxCrossEntropyLoss()

    for epoch in range(100):
        train_data = data_iter.get_iter('train')
        dev_data = dev_iter.get_iter('train')
        train_loss = 0
        train_acc = 0
        n = 0
        # test_acc = 0
        for i, (data, label, lens, tag_lens) in enumerate(train_data):
            max_len = max(lens)
            mask = nd.array(get_mask(lens, max_len), ctx=ctx)
            with autograd.record():
                output = net(data, max_len, nd.array(lens))
                loss = loss_func(output[0], label[:, 0])
                # print(loss)
                # print(label[:, 0])
                for k in range(1, len(output)):
                    loss = loss + loss_func(output[k], label[:, k])
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.mean(loss).asscalar()

            for k in range(len(output)):
                train_acc += accuracy(output[k], label[:, k], mask[:, k])
                n += mask[:, k].sum().asscalar()
            if (i + 1) % 50 == 0:
                print(train_acc, n, train_acc / n)

        test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
              (epoch, train_loss / data_iter.train_size, train_acc / n, test_acc))
        # net.save_params(net.collect_params().save(out_model_file + '_' + str(epoch)))


def train_lstm_tagger(text_file, dev_file, out_model_file):
    data, tags, w2i, t2i = read_train_data(text_file)
    dev_data, dev_tags = read_dev_data(dev_file, w2i, t2i)
    data_len = len(data)
    unk = w2i['<unk>']
    unk_tag = t2i['<unk>']
    ctx = mx.cpu()
    print(data_len)
    train_data_iter = DataIter(data, tags, BATCH_SIZE, unk, unk_tag, split=1.0, ctx=ctx)
    dev_data_iter = DataIter(dev_data, dev_tags, BATCH_SIZE, unk, unk_tag, split=1.0, ctx=ctx)

    vocab_size = len(w2i)
    emb_size = 128
    n_tags = len(t2i)
    hidden_size = 64
    net = BiLstmTagger(vocab_size, n_tags, emb_size, hidden_size)
    net.initialize(ctx=ctx)
    train(net, train_data_iter, dev_data_iter, out_model_file, ctx=ctx)


if __name__ == '__main__':
    train_file = '../data/tags/train.txt'
    dev_file = '../data/tags/dev.txt'
    out_file = ''
    train_lstm_tagger(train_file, dev_file, out_file)

