import mxnet as mx
import mxnet.ndarray as nd
from mxnet.gluon import nn
import mxnet.gluon as gluon
import mxnet.autograd as autograd
from collections import defaultdict
import random
# import pickle
import numpy as np


BATCH_SIZE = 20
EMB_SIZE = 300
PADDING_SIZE = EMB_SIZE


class TextClassifier(nn.Block):
    def __init__(self, vocab_size, emb_size, num_classes):
        super(TextClassifier, self).__init__()
        self.emb = nn.Embedding(vocab_size, emb_size)
        self.conv1 = nn.Conv2D(3, 3, padding=1)
        self.pool1 = nn.MaxPool2D(2)
        self.flatten = nn.Flatten()
        self.linear = nn.Dense(num_classes)

    def forward(self, words):
        # emb_size 300, padding words emb output to 300 * 300 matrix
        x = self.emb(words)
        # x = nd.sum(x, axis=1)
        # print(x.shape)
        x = nd.stack(x, axis=1)

        x = self.conv1(x)
        x = self.pool1(x)
        x = self.flatten(x)
        x = self.linear(x)
        # x = nd.softmax(x)
        return x


def accuracy(output, label):
    out = output.argmax(axis=1)
    out = out.astype(np.uint8)
    # print('output: ', output)
    # print('pred: ', out)
    # print('label: ', label)
    return nd.sum(out == label).asscalar()


def evaluate_accuracy(net, data_iter, data_len):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / data_len


def train(net, data_iter, out_model_file):
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1e-2})
    loss_func = gluon.loss.SoftmaxCrossEntropyLoss()

    for epoch in range(5):
        train_data = data_iter.get_iter('train')
        dev_data = data_iter.get_iter('dev')
        train_loss = 0
        train_acc = 0
        test_acc = 0
        for i, (data, label) in enumerate(train_data):
            with autograd.record():
                output = net(data)
                loss = loss_func(output, label)
            loss.backward()

            trainer.step(BATCH_SIZE)
            train_loss += nd.mean(loss).asscalar()
            if i % 50 == 0:
                # print(data)
                print('output: ', output)
                print('label: ', label)
                print('out: ', output.argmax(axis=1))
                print(loss)
                # print(t2i)
            train_acc += accuracy(output, label)

        test_acc = evaluate_accuracy(net, dev_data, data_iter.dev_size)
        print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
              (epoch, train_loss / data_iter.train_size, train_acc / data_iter.train_size, test_acc))
        # net.save_params(net.collect_params().save(out_model_file + '_' + str(epoch)))


def test():
    net = TextClassifier(500, 20, 10)
    net.initialize()
    test_x = nd.array([[1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
    print(net(test_x))


# the input data is split by tab
def read_train_data(text_file):
    w2i = defaultdict(lambda: len(w2i))
    t2i = defaultdict(lambda: len(t2i))
    S = w2i["<s>"]
    UNK = w2i["<unk>"]
    data = []
    tag = []
    # no segmentation
    for i, l in enumerate(open(text_file, encoding='utf-8')):
        d = l.split('\t')
        if len(d) == 2:
            if 'http' in d[0]:
                continue
            data.append([w2i[c.strip()] for c in d[1] if c.strip()])
            tag.append(t2i[d[0].strip()])
        # if i > 10000:
        #     break
    w2i = defaultdict(lambda: UNK, w2i)
    return data, tag, w2i, t2i


def read_dev_data(text_file, w2i, t2i):
    data = []
    tag = []
    for l in open(text_file):
        d = l.split('\t')
        if len(d) == 2:
            if 'http' in d[0]:
                continue
            data.append([w2i[c.strip()] for c in d[1] if c.strip()])
            t = t2i[d[0].strip()]
            tag.append(t)

    return data, tag


def read_data(text_file, w2i):
    data = []
    for l in open(text_file):
        data.append([w2i[c.strip()] for c in l if c.strip()])

    return data


def padding(d, padding_size, pad):
    d = d[0:padding_size]
    l = padding_size - len(d)
    if l > 0:
        d = d + [pad] * l
    return d


class DataIter:
    def __init__(self, data, tag, batch_size, padding_size, pad, split=0.8, ctx=mx.cpu()):
        self.data = data
        self.tag = tag
        self.batch_size = batch_size
        self.padding_size = padding_size
        self.pad = pad
        self.length = len(self.data)
        self.index = list(range(self.length))

        # random split train dev
        random.shuffle(self.index)
        self.train_size = int(self.length * split)
        self.dev_size = self.length - self.train_size
        self.train_index = self.index[0: self.train_size]
        self.dev_index = self.index[self.train_size:]
        self.ctx = ctx

    def data_iter(self, dataset_type='train'):
        if dataset_type == 'train':
            length = self.train_size
            index = self.train_index
            random.shuffle(index)
        elif dataset_type == 'dev':
            length = self.dev_size
            index = self.dev_index
        else:
            length = self.length
            index = self.index

        # print(index)
        n_batch = length // self.batch_size
        rest = (n_batch + 1) * self.batch_size - length

        batch_index = [index[i*self.batch_size:(i+1)*self.batch_size] for i in range(n_batch)]
        if rest < self.batch_size:
            batch_index.append(index[self.batch_size * n_batch:length] + index[0:rest])
        for bi in batch_index:
            d = [padding(self.data[i], self.padding_size, self.pad) for i in bi]
            if self.tag:
                t = [self.tag[i] for i in bi]
                yield nd.array(d, dtype=np.int32, ctx=self.ctx), nd.array(t, dtype=np.uint8, ctx=self.ctx)
            else:
                yield nd.array(d, dtype=np.int32, ctx=self.ctx)

    def get_iter(self, dataset_type):
        return self.data_iter(dataset_type)


def train_text_classifier(text_file, out_model_file, w2i_file):
    data, tag, w2i, t2i = read_train_data(text_file)
    # print(t2i)
    # pickle.dump(w2i, open(w2i_file, 'w+'))
    data_len = len(data)
    unk = w2i['<unk>']
    ctx = mx.cpu()
    print(data_len)
    train_data_iter = DataIter(data, tag, BATCH_SIZE, PADDING_SIZE, unk, ctx=ctx)

    text_net = TextClassifier(len(w2i), EMB_SIZE, len(t2i))
    text_net.initialize(ctx=ctx)
    train(text_net, train_data_iter, out_model_file)


if __name__ == '__main__':
    import sys
    if len(sys.argv) < 2:
        input_file = '/data/hime/alpha/data/smzdm/class1_text.txt'
        model_file = '/home/alpha/tmp/'
        w2i_pkl_file = '/home/alpha/tmp/t.pkl'
    else:
        input_file = sys.argv[1]
        flag = sys.argv[2]   # train or eval
        w2i_pkl_file = sys.argv[3]
        model_file = sys.argv[4]

    train_text_classifier(input_file, w2i_pkl_file, model_file)
    # for d in train_data_iter:
    #     print(d)
    #     break



