import mxnet as mx
import mxnet.ndarray as nd
from mxnet.ndarray import NDArray
import numpy as np
import mxnet.gluon.nn as nn
import mxnet.gluon as gluon
import mxnet.autograd as autograd
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.trainer import Trainer
import tqdm
from collections import defaultdict
import random
from typing import Optional


BATCH_SIZE = 16


def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    t2i = defaultdict(lambda: len(t2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    UNK_TAG = t2i["<unk>"]
    data = []
    tags = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        word_tags = [wt.split('|') for wt in l.strip().split()]
        data.append([w2i[wt[0].strip()] for wt in word_tags])
        tags.append([t2i[wt[1].strip()] for wt in word_tags])
    w2i = defaultdict(lambda: UNK, w2i)
    t2i = defaultdict(lambda: UNK_TAG, t2i)
    return data, tags, w2i, t2i


def read_dev_data(text_file, w2i, t2i):
    data = []
    tags = []
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        for wt in l.strip().split():
            d, tag = wt.lower().split('|')
            data.append(w2i[d.strip()])
            tags.append(t2i[tag.strip()])
    return data, tags


def padding(d, padding_size, pad):
    d = d[0:padding_size]
    l = padding_size - len(d)
    if l > 0:
        d = d + [pad] * l
    return d


class DataIter:
    def __init__(self, data, tag, batch_size, pad, tag_pad, split=0.8, ctx=mx.cpu()):
        self.data = data
        self.tag = tag
        self.batch_size = batch_size
        self.pad = pad
        self.tag_pad = tag_pad
        self.length = len(self.data)
        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i * self.batch_size:(i + 1) * self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            lens = [len(self.data[i]) for i in bi]
            max_len = max(lens)
            # print(max_len)
            d = [padding(self.data[i], max_len, self.pad) for i in bi]

            if self.tag:
                tag_lens = [len(self.tag[i]) for i in bi]
                tag_max_len = max(tag_lens)
                t = [padding(self.tag[i], tag_max_len, self.tag_pad) for i in bi]
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.uint8, ctx=self.ctx), lens, tag_lens
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), lens

    def get_iter(self, dataset_type):
        return self.data_iter(dataset_type)


class CRF(nn.Block):
    def __init__(self, num_tags: int):
        super(CRF, self).__init__()
        self.n_states = num_tags
        self.start_transitions = self.params.get('start_transitions', shape=(num_tags,))
        self.end_transitions = self.params.get('end_transitions', shape=(num_tags,))
        self.transitions = self.params.get('transitions', shape=(num_tags, num_tags))

    def reset_parameters(self):
        """
        Initialize the transition parameters

        :return:
        """
        pass

    @staticmethod
    def log_sum_exp(x, axis=1):
        k = nd.max(x, axis=axis)
        # print('k', k)
        return nd.log(nd.sum(nd.exp(x - k.expand_dims(axis)), axis=axis)) + k

    def _compute_joint_llh(self, emissions: NDArray, tags: NDArray, mask: NDArray):
        """

        :param emissions: (seq_len, batch_size, num_tags)
        :param tags: (seq_len, batch_size)
        :param mask: (seq_len, batch_size)
        """
        if mask is not None:
            mask = mask.astype(np.float32)
        seq_len = emissions.shape[0]

        llh = self.start_transitions.data()[tags[0]]  # (batch_size, )
        for i in range(seq_len - 1):
            cur_tag, next_tag = tags[i], tags[i + 1]
            # print('pick', emissions[i].pick(cur_tag))
            llh += emissions[i].pick(cur_tag) * mask[i]
            # transition score
            transition_score = self.transitions.data()[cur_tag, next_tag]

            llh += transition_score * mask[i+1]
        # find last tag index
        last_tag_indices = mask.astype(np.int32).sum(axis=0) - 1
        # print(last_tag_indices)
        # print(tags)
        last_tags = tags.swapaxes(0, 1).pick(last_tag_indices)
        # print(last_tags)
        llh += self.end_transitions.data()[last_tags]
        llh += emissions[-1].pick(last_tags) * mask[-1]
        return llh

    def _compute_log_partition_function(self, emissions: NDArray, mask: NDArray):
        """

        :param emissions: (seq_len, batch_size, num_tags)
        :param mask: (seq_len, batch_size)
        """
        seq_len = emissions.shape[0]
        mask = mask.astype(np.float32)

        # Start transition score and first emission
        print('start_trans', self.start_transitions.data())
        log_prob = self.start_transitions.data() + emissions[0]  # (batch_size, num_tags)
        for i in range(1, seq_len):
            # broadcast log_prob over all possible next tags
            broadcast_log_probs = log_prob.expand_dims(2)   # (batch_size, num_tags, 1)
            # broadcast transition score over all instances in the batch
            broadcast_transitions = self.transitions.data().expand_dims(0)  # (1, num_tags, num_tags)
            # broadcast emission score over all possible current tags
            broadcast_emissions = emissions[i].expand_dims(1)  # (batch_size, 1, num_tags)
            # Sum current log prob, transition, and emission scores
            score = broadcast_log_probs + broadcast_transitions + broadcast_emissions  # (batch_size, num_tags, num_tags)
            # print(score)
            score = self.log_sum_exp(score, 1)  # (batch_size, num_tags)
            # print('score', score)
            log_prob = score * mask[i].expand_dims(1) + log_prob * (1 - mask[i]).expand_dims(1)

        # End transition score
        log_prob += self.end_transitions.data()

        return log_prob

    def viterbi_decode(self, x):
        pass

    def forward(self, emissions: NDArray, tags: NDArray, mask: Optional[NDArray]=None,
                reduce: bool = True):
        if mask is None:
            mask = nd.ones_like(tags)
        numerator = self._compute_joint_llh(emissions, tags, mask)
        denominator = self._compute_log_partition_function(emissions, mask)
        llh = numerator - denominator
        return llh


class BiLstmCRFTagger(nn.Block):
    def __init__(self, vocab_size, n_tags, emb_size, hidden_size):
        super(BiLstmCRFTagger, self).__init__()
        with self.name_scope():
            self.emb = nn.Embedding(vocab_size, emb_size)
            self.tagger = gluon.rnn.BidirectionalCell(
                gluon.rnn.LSTMCell(hidden_size=hidden_size),
                gluon.rnn.LSTMCell(hidden_size=hidden_size),
            )
            self.classify = nn.Dense(n_tags)
            self.crf = CRF(n_tags)

    def init_state(self, batch_size, ctx):
        return self.tagger.begin_state(batch_size=batch_size, ctx=ctx)

    def forward(self, x, length, lens, begin_state=None):
        x = self.emb(x)
        outputs, hidden_states = self.tagger.unroll(length, x,
                                                    valid_length=lens,
                                                    merge_outputs=False,
                                                    begin_state=begin_state)

        return outputs


def accuracy(output, label, mask=None):
    out = output.argmax(axis=1)
    return nd.sum((out == label.astype(np.float32)) * mask).asscalar()


def evaluate_accuracy(net, data_iter, data_len):
    acc = 0
    n = 0
    for data, label, lens, tag_lens in data_iter:
        max_len = max(lens)
        output = net(data, max_len, nd.array(lens))
        mask = nd.array(get_mask(lens, max_len))
        for i in range(len(output)):
            acc += accuracy(output[i], label[:, i], mask[:, i])
            n += mask[:, i].sum().asscalar()
    return acc / n


def get_mask(lens, size):
    data = []
    for l in lens:
        data.append([1 if l > i else 0 for i in range(size)])
    return data


def train(net, data_iter, out_model_file, ctx):
    trainer = Trainer(net.collect_params(), 'sgd', {'learning_rate': 1e-2})
    loss_func = SoftmaxCrossEntropyLoss()

    for epoch in range(50):
        train_data = data_iter.get_iter('train')
        dev_data = data_iter.get_iter('dev')
        train_loss = 0
        train_acc = 0
        n = 0
        # test_acc = 0
        for i, (data, label, lens, tag_lens) in enumerate(train_data):
            max_len = max(lens)
            mask = nd.array(get_mask(lens, max_len), ctx=ctx)
            with autograd.record():
                output = net(data, max_len, nd.array(lens))
                loss = loss_func(output[0], label[:, 0])
                # print(loss)
                # print(label[:, 0])
                for k in range(1, len(output)):
                    loss = loss + loss_func(output[k], label[:, k])
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.mean(loss).asscalar()

            for k in range(len(output)):
                train_acc += accuracy(output[k], label[:, k], mask[:, k])
                n += mask[:, k].sum().asscalar()
            if (i + 1) % 50 == 0:
                print(train_acc, n, train_acc / n)

        test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
              (epoch, train_loss / data_iter.train_size, train_acc / n, test_acc))
        # net.save_params(net.collect_params().save(out_model_file + '_' + str(epoch)))


def train_lstm_tagger(text_file, out_model_file):
    data, tags, w2i, t2i = read_train_data(text_file)
    data_len = len(data)
    unk = w2i['<unk>']
    unk_tag = t2i['<unk>']
    ctx = mx.cpu()
    print(data_len)
    train_data_iter = DataIter(data, tags, BATCH_SIZE, unk, unk_tag, ctx=ctx)

    vocab_size = len(w2i)
    emb_size = 128
    n_tags = len(t2i) - 1
    hidden_size = 64
    net = BiLstmCRFTagger(vocab_size, n_tags, emb_size, hidden_size)
    net.initialize(ctx=ctx)
    train(net, train_data_iter, out_model_file, ctx=ctx)


def test_net():
    net = BiLstmCRFTagger(10, 4, 8, 16)
    net.initialize()
    data = nd.array([[1, 2, 3, 4], [4, 5, 0, 0]])
    a = net.init_state(2, ctx=mx.cpu())
    # print(net(data, 4, nd.array([4, 2], dtype=np.float32), a))


def test_crf():
    seq_len, batch_size, num_tags = 3, 2, 5
    emissions = nd.random.uniform(-0.1, 0.1, shape=(seq_len, batch_size, num_tags))
    tags = nd.array([[0, 1], [2, 4], [3, 1]], dtype=np.int32)

    net = CRF(num_tags)
    net.initialize()
    print(net(emissions, tags))
    print(net._compute_log_partition_function(emissions, tags))




if __name__ == '__main__':
    train_file = '../data/tags/train.txt'
    dev_file = '../data/tags/dev.txt'
    out_file = ''
    # train_lstm_tagger(train_file, out_file)
    test_net()
