import mxnet as mx
import mxnet.gluon as gluon
from models.qa_net import QANet, mask_logits
# import models.qa_net as qa
from config import Config
from mxnet.gluon import Trainer
from mxnet import autograd
from mxnet.lr_scheduler import LRScheduler

from data import SimpleIter
from tqdm import tqdm
# from losses import SeqBinaryCrossEntropyLoss
import mxnet.ndarray as nd
import math
# from models.qa_net_hybrid import QANet
import argparse
import os


CONFIG = Config()


class SimpleLRScheduler(LRScheduler):
    def __init__(self, learning_rate=0.1):
        super(SimpleLRScheduler, self).__init__()
        self.learning_rate = learning_rate
        self.out = 0

    def __call__(self, num_update):
        if num_update > 1000:
            return self.learning_rate
        else:
            self.out = self.learning_rate / math.log2(1000) * math.log2(num_update + 1)
            return self.learning_rate / math.log2(1000) * math.log2(num_update + 1)


def get_mask(lens, size):
    data = []
    for l in lens:
        data.append([1 if l > i else 0 for i in range(size)])
    return data


def get_mask2(lens, size):
    data = []
    for l in lens:
        data.append([1 if l == (i + 1) else 0 for i in range(size)])
    return data


def train_epoch(net, epoch, train_iter, trainer, loss_func, ctx, lr_scheduler):
    # for i, data in enumerate(data_iter):
    step = train_iter.num_batches * epoch
    loss_iter = 0
    for i, data in tqdm(enumerate(train_iter), total=train_iter.num_batches):
        p_chars, q_chars, p_words, q_words, y1, y2, p_lens, q_lens, ids = data

        p_chars = p_chars[:, 0: CONFIG.para_limit, :]
        q_chars = q_chars[:, 0: CONFIG.ques_limit, :]
        p_words = p_words[:, 0: CONFIG.para_limit]
        q_words = q_words[:, 0: CONFIG.ques_limit]
        y1 = y1[:, 0: CONFIG.para_limit]
        y2 = y2[:, 0: CONFIG.para_limit]

        mask = nd.array(get_mask(p_lens, size=y1.shape[1]), ctx=ctx)
        mask2 = nd.array(get_mask2(p_lens, size=y1.shape[1]), ctx=ctx)
        # weight = 100

        # max_p_len = max(p_lens)
        # max_q_len = max(q_lens)

        with autograd.record():
            pred1, pred2 = net(p_chars,
                               p_words,
                               q_chars,
                               q_words,
                               p_lens,
                               q_lens)

            # pred1 = mask_logits(pred1, mask)
            # pred2 = mask_logits(pred2, mask)

            loss = loss_func(mask_logits(pred1, mask), y1) + loss_func(mask_logits(pred2, mask), y2)
            loss.backward()
        # print('pred: ', nd.softmax(pred1))
        # print(loss)
        # print(lr_scheduler.out)
        grads = [p.grad() for p in net.collect_params().values() if p.grad_req != 'null']
        gluon.utils.clip_global_norm(grads, CONFIG.grad_clip)
        loss_iter += nd.mean(loss).asscalar()

        trainer.step(CONFIG.batch_size, ignore_stale_grad=True)

        if (i + 1) % 50 == 0:
            print(loss)
            print(pred1)
            desc = '{} loss: {}'.format(i, loss_iter / i)
            prob1 = nd.softmax(pred1)
            prob2 = nd.softmax(pred2)
            print('prob1: ', nd.max(prob1, axis=1))
            print('y1 prob: ', nd.max(prob1 * y1, axis=1))
            print('prob2: ', nd.max(prob2, axis=1))
            print('y2 prob: ', nd.max(prob2 * y2, axis=1))
            pred1 = nd.argmax(pred1, axis=1)
            pred2 = nd.argmax(pred2, axis=1)
            print('pred1: ', pred1)
            print('truth1: ', nd.argmax(y1, axis=1))
            print('pred2: ', pred2)
            print('truth2: ', nd.argmax(y2, axis=1))
            print(desc)


def train():
    ctx = mx.gpu()
    epochs = CONFIG.epochs
    net = QANet(CONFIG)
    model_file = 'qanet.params'
    # net.hybridize()

    if os.path.exists(model_file):
        net.load_params(model_file, ctx=ctx)
    else:
        net.initialize(init=mx.init.Xavier(), ctx=ctx)

    net.emb.emb_word.weight.grad_req = 'null'
    net.emb.emb_ch.weight.grad_req = 'null'

    lr_scheduler = SimpleLRScheduler(1e-3)
    trainer = Trainer(net.collect_params(),
                      'Adam',
                      {
                          'lr_scheduler': lr_scheduler,
                          # 'learning_rate': 1e-3,
                          'wd': CONFIG.l2_norm,
                          'beta1': 0.8,
                          'beta2': 0.999,
                          'epsilon': 1e-7
                       }
                      )
    train_iter = SimpleIter(CONFIG.train_record_file + '.idx', CONFIG.train_record_file, ctx=ctx, batch_size=CONFIG.batch_size)

    dev_iter = SimpleIter(CONFIG.dev_record_file + '.idx', CONFIG.dev_record_file, ctx=ctx, batch_size=CONFIG.batch_size)
    # loss_func = SeqBinaryCrossEntropyLoss()

    loss_func = mx.gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False)
    for e in range(epochs):
        train_epoch(net, e, train_iter, trainer, loss_func, ctx, lr_scheduler)
        train_iter.reset()
        # lr_scheduler.learning_rate /= 2.0
        # eval_dev(net, dev_iter, loss_func, ctx=mx)
        dev_iter.reset()
        net.save_params(model_file)


def eval_dev(net, data_iter, loss_func, ctx):
    loss_iter = 0
    for i, data in tqdm(enumerate(data_iter), total=data_iter.num_batches):
        p_chars, q_chars, p_words, q_words, y1, y2, p_lens, q_lens, ids = data
        # mask = nd.array(get_mask(p_lens, size=y1.shape[1]), ctx=ctx)
        # weight = 100
        #
        # max_p_len = max(p_lens)
        # max_q_len = max(q_lens)
        pred1, pred2 = net(p_chars,
                           p_words,
                           q_chars,
                           q_words)
        loss = loss_func(pred1, y1) + loss_func(pred2, y2)
        loss_iter += nd.mean(loss).asscalar()
    print('dev loss: ', loss_iter / data_iter.num_batches)


if __name__ == '__main__':
    # argparse
    train()
    nd.batch_dot()
