from collections import defaultdict
import math
import time
import mxnet as mx
import random
import numpy as np
from mxnet import nd
from mxnet import autograd
from mxnet import gluon
from mxnet.gluon import nn
import re

"""
CBOW
context words to predict one focus word
"""
# todo use Hierarchical Softmax to train word2vec


# length of window on each size
N = 2

# the size of the embedding
EMB_SIZE = 128
BATCH_SIZE = 500


def accuracy(output, label):
    out = output.argmax(axis=1)
    out = out.astype(np.int32)
    # print('output: ', output)
    # print('pred: ', out)
    # print('label: ', label)
    return nd.sum(out == label).asscalar()


def evaluate_accuracy(net, data_iter, data_len):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / data_len


def train(net, data_iter, out_model_file):
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1e-2})
    loss_func = gluon.loss.SoftmaxCrossEntropyLoss()

    for epoch in range(5):
        train_data = data_iter.get_iter('train')
        dev_data = data_iter.get_iter('dev')
        train_loss = 0
        train_acc = 0
        test_acc = 0
        for i, (data, label) in enumerate(train_data):
            with autograd.record():
                output = net(data)
                loss = loss_func(output, label)
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.mean(loss).asscalar()
            # if i % 50 == 0:
            #     # print(data)
            #     print('output: ', output)
            #     print('label: ', label)
            #     print('out: ', output.argmax(axis=1))
            #     print(loss)
            #     # print(t2i)
            train_acc += accuracy(output, label)

        test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
              (epoch, train_loss / data_iter.train_size, train_acc / data_iter.train_size, test_acc))
        # net.save_params(net.collect_params().save(out_model_file + '_' + str(epoch)))


def get_cbow_data(words, window, pad):
    """

    :param words: words in integers
    :param window: window size
    :rtype:
    """
    padded_words = [pad] * window + words + [pad] * window
    for i, w in enumerate(words):
        pre_words = padded_words[i: i+window]
        post_words = padded_words[i+window+1: i + 2 * window + 1]
        context_words = pre_words + post_words
        yield w, context_words


def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    t2i = defaultdict(lambda: len(t2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    data = []
    tag = []
    # CBOW
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.lower().split()
        for w, context_words in get_cbow_data([w2i[c.strip()] for c in d if c.strip()], N, S):
            data.append(context_words)
            if len(context_words) != 4:
                print(context_words)
            tag.append(w)

    w2i = defaultdict(lambda: UNK, w2i)
    return data, tag, w2i, t2i


class EmbBOW(nn.Block):
    def __init__(self, input_dim, emb_size, **kwargs):
        super(EmbBOW, self).__init__(**kwargs)
        self.embedding = nn.Embedding(input_dim, emb_size)
        self.dense = nn.Dense(256, activation='relu')
        self.out = nn.Dense(input_dim)

    def forward(self, x, *args):
        x = self.embedding(x)
        x = self.dense(x)
        x = self.out(x)
        return x


class DataIter:
    def __init__(self, data, tag, batch_size, split=0.8, ctx=mx.cpu()):
        self.data = data
        self.tag = tag
        self.batch_size = batch_size
        self.length = len(self.data)
        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i*self.batch_size:(i+1)*self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            d = [self.data[i] for i in bi]
            if self.tag:
                t = [self.tag[i] for i in bi]
                # print(d, t)
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.int32, ctx=self.ctx)
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx)

    def get_iter(self, dataset_type):
        return self.data_iter(dataset_type)


def train_word2vec(text_file, out_model_file, w2i_file):
    data, tag, w2i, t2i = read_train_data(text_file)
    # print(t2i)
    # pickle.dump(w2i, open(w2i_file, 'w+'))
    data_len = len(data)
    unk = w2i['<unk>']
    ctx = mx.gpu()
    print(data_len)
    train_data_iter = DataIter(data, tag, BATCH_SIZE, ctx=ctx)

    emb_net = EmbBOW(len(w2i), EMB_SIZE)
    emb_net.initialize(ctx=ctx)
    train(emb_net, train_data_iter, out_model_file)


if __name__ == '__main__':
    train_word2vec('../data/ptb/train.txt', '', '')
