#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-3-22
import os
from multiprocessing import Pool

import numpy as np
import tensorflow as tf
from keras.preprocessing.sequence import pad_sequences

import utils
from hparams import Hparams

hparams = Hparams()
hp = hparams.parser.parse_args()  # 超参数字典
utils.save_hparams(hp, hp.log_dir)
logger = utils.get_logger(__name__, path=hp.log_dir)
logger.critical(vars(hp))
logger.info('loading test data.')

ratio = (1.0 - 0.2) / hp.num_thread
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=ratio)
config = tf.ConfigProto(gpu_options=gpu_options)
# config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config)
from model.JointModel import JointModel

tfe = tf.contrib.eager

test_x = np.load(os.path.join(hp.data_dir, 'test_x.npy'))
test_y = np.load(os.path.join(hp.data_dir, 'test_y.npy')).astype(np.int32)
if hp.num_class == 2:
    test_y = np.where(test_y == 3, 1, 0)

test_lens = [min(len(x), hp.seq_maxlen) for x in test_x]
test_x = pad_sequences(test_x, hp.seq_maxlen, 'float32', padding='post', truncating='post')
if hp.fake_task == 'POS':
    test_POS = np.load(os.path.join(hp.data_dir, 'POStest_x.npy'))
    test_POS = pad_sequences(test_POS, hp.seq_maxlen, 'float32', padding='post', truncating='post')


# test_y = to_categorical(test_y, 2, 'int32')


def train_domain(fold):
    logger.info('start domain training')
    logger.info('training fold {}'.format(fold))
    logger.info('loading train and dev data.')
    train_x, train_y, dev_x, dev_y = utils.get_single_fold(hp.data_dir, fold)
    if hp.fake_task == 'POS':
        train_POS, dev_POS = utils.get_POS_single_fold(hp.data_dir, fold)
    train_seq_lens = [min(len(x), hp.seq_maxlen) for x in train_x]
    train_x = pad_sequences(train_x, hp.seq_maxlen, 'float32', padding='post', truncating='post')
    dev_seq_lens = [min(len(x), hp.seq_maxlen) for x in dev_x]
    dev_x = pad_sequences(dev_x, hp.seq_maxlen, 'float32', padding='post', truncating='post')
    # 将数据和相应的长度打包，防止shuffle的时候打乱
    # 打包后变成 [(x, seq_len)...]
    train_x = list(zip(train_x, train_seq_lens))
    dev_x = list(zip(dev_x, dev_seq_lens))

    # train_y = np.where(train_y == 3, 1, 0)
    # dev_y = np.where(dev_y == 3, 1, 0)
    # train_y = to_categorical(train_y, 2, 'int32')
    # dev_y = to_categorical(dev_y, 2, 'int32')
    tf.reset_default_graph()
    tf.keras.backend.clear_session()
    model = JointModel(hp.seq_maxlen, hp.emb_size, hp.rnn_size, hp.rnn_keep_prob, hp.domain_keep_prob,
                       hp.use_self_att, hp.soft_att_size,
                       hp.fake_task, hp.num_class, hp.use_crf, ntags=hp.ntags)
    optimizer = tf.train.AdamOptimizer(hp.lr)

    # 指标
    # tf_label = tf.placeholder()
    dev_acc = tfe.metrics.Accuracy()
    test_acc = tfe.metrics.Accuracy()

    best_dev_acc = 0
    for i in range(hp.epochs):
        logger.info('epoch: %d' % (i + 1))
        dev_acc.init_variables()
        test_acc.init_variables()
        total_loss = 0.0
        batch_num = 0
        for x, y in utils.generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
            with tf.GradientTape() as tape:
                # 使用zip(*x)解包，将[(x, seq_len)...] 拆分为 [x], [seq_len]两个
                if hp.fake_task == 'POS':
                    _, pred = model(zip(*x), is_training=True)
                else:
                    pred = model(zip(*x), is_training=True)
                loss = model.domain_loss_func(pred, y)
                g = tape.gradient(loss, model.trainable_variables)
                optimizer.apply_gradients(zip(g, model.trainable_variables), global_step=model.global_step)
                total_loss += loss
                batch_num += 1
        logger.info('training loss: %f' % (total_loss / batch_num))

        dev_pred = model.predict_domain(zip(*dev_x))
        dev_acc(dev_y.flatten(), dev_pred)
        if dev_acc.result().numpy() > 0.85 and dev_acc.result().numpy() > best_dev_acc:
            best_dev_acc = dev_acc.result().numpy()
            path = os.path.join(hp.log_dir, str(fold), '%.5f.ckpt' % best_dev_acc)
            model.save_weights(path, save_format='tf')
            logger.info('saving weights to %s' % path)
            logger.info('dev_acc: {}'.format(dev_acc.result().numpy()))
            test_pred = model.predict_domain([test_x, test_lens])
            test_acc(test_y.flatten(), test_pred)
            logger.info('test_acc: {}'.format(test_acc.result().numpy()))
        else:
            logger.info('dev_acc: {}'.format(dev_acc.result().numpy()))

    logger.info('finished.')


def train_joint(fold, two_step=False):
    """

    :param two_step: 如果使用two_step，前半部分的epoch会进行联合训练，后一半的epoch会单独训练domain
    :return:
    """
    logger.info('start domain training')
    logger.info('training fold {}'.format(fold))
    logger.info('loading train and dev data.')
    train_x, train_y, dev_x, dev_y = utils.get_single_fold(hp.data_dir, fold)
    if hp.num_class == 2:
        train_y = np.where(train_y == 3, 1, 0)
        dev_y = np.where(dev_y == 3, 1, 0)
    train_seq_lens = [min(len(x), hp.seq_maxlen) for x in train_x]
    train_x = pad_sequences(train_x, hp.seq_maxlen, 'float32', padding='post', truncating='post')

    dev_seq_lens = [min(len(x), hp.seq_maxlen) for x in dev_x]
    dev_x = pad_sequences(dev_x, hp.seq_maxlen, 'float32', padding='post', truncating='post')

    if hp.fake_task == 'POS':
        train_POS, dev_POS = utils.get_POS_single_fold(hp.data_dir, fold)
        train_POS = pad_sequences(train_POS, hp.seq_maxlen, 'int32', padding='post', truncating='post')
        dev_POS = pad_sequences(dev_POS, hp.seq_maxlen, 'int32', padding='post', truncating='post')

        train_x = list(zip(train_x, train_seq_lens, train_POS))
        dev_x = list(zip(dev_x, dev_seq_lens, dev_POS))
    # 将数据和相应的长度打包，防止shuffle的时候打乱
    # 打包后变成 [(x, seq_len)...]
    else:

        train_x = list(zip(train_x, train_seq_lens))
        dev_x = list(zip(dev_x, dev_seq_lens))

    tf.reset_default_graph()
    tf.keras.backend.clear_session()
    model = JointModel(hp.seq_maxlen, hp.emb_size, hp.rnn_size, hp.rnn_keep_prob, hp.domain_keep_prob,
                       hp.use_self_att, hp.soft_att_size,
                       hp.fake_task, hp.num_class, hp.use_crf, ntags=hp.ntags)
    optimizer = tf.train.AdamOptimizer(hp.lr)

    # 指标
    # tf_label = tf.placeholder()
    dev_acc = tfe.metrics.Accuracy()
    test_acc = tfe.metrics.Accuracy()
    best_dev_acc = 0
    for i in range(hp.epochs):
        logger.info('epoch: %d' % (i + 1))
        dev_acc.init_variables()
        test_acc.init_variables()
        total_loss = 0.0
        batch_num = 0
        for x, y in utils.generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
            if two_step:
                if i < hp.epochs // 2 or hp.fake_task == 'None':
                    with tf.GradientTape() as tape:
                        # 使用zip(*x)解包，将[(x, seq_len)...] 拆分为 [x], [seq_len]两个
                        x, seq_lens, tag = zip(*x)
                        tagging_pred, domain_pred = model([x, seq_lens], is_training=True)
                        domain_loss = model.domain_loss_func(domain_pred, y)
                        loss = domain_loss
                        g = tape.gradient(loss, model.trainable_variables)
                        optimizer.apply_gradients(zip(g, model.trainable_variables), global_step=model.global_step)
                        total_loss += loss
                        batch_num += 1
                else:
                    with tf.GradientTape() as tape:
                        # 使用zip(*x)解包，将[(x, seq_len)...] 拆分为 [x], [seq_len]两个
                        x, seq_lens, tag = zip(*x)
                        tagging_pred, domain_pred = model([x, seq_lens], is_training=True)
                        domain_loss = model.domain_loss_func(domain_pred, y)
                        tagging_loss = model.tag_loss_func(tagging_pred, tf.convert_to_tensor(tag, dtype=tf.int32),
                                                           tf.convert_to_tensor(seq_lens, dtype=tf.int32))
                        loss = domain_loss + tagging_loss
                        g = tape.gradient(loss, model.trainable_variables)
                        optimizer.apply_gradients(zip(g, model.trainable_variables), global_step=model.global_step)
                        total_loss += loss
                        batch_num += 1
            else:
                with tf.GradientTape() as tape:
                    # 使用zip(*x)解包，将[(x, seq_len)...] 拆分为 [x], [seq_len]两个
                    x, seq_lens, tag = zip(*x)
                    tagging_pred, domain_pred = model([x, seq_lens], is_training=True)
                    domain_loss = model.domain_loss_func(domain_pred, y)
                    tagging_loss = model.tag_loss_func(tagging_pred, tf.convert_to_tensor(tag, dtype=tf.int32),
                                                       tf.convert_to_tensor(seq_lens, dtype=tf.int32))
                    loss = domain_loss + tagging_loss
                    g = tape.gradient(loss, model.trainable_variables)
                    optimizer.apply_gradients(zip(g, model.trainable_variables), global_step=model.global_step)
                    total_loss += loss
                    batch_num += 1
        logger.info('training loss: %f' % (total_loss / batch_num))

        x, seq_lens, tag = zip(*dev_x)
        dev_pred = model.predict_domain([x, seq_lens])
        dev_acc(dev_y.flatten(), dev_pred)
        if dev_acc.result().numpy() > 0.83 and dev_acc.result().numpy() > best_dev_acc:
            best_dev_acc = dev_acc.result().numpy()
            path = os.path.join(hp.log_dir, str(fold), '%.5f.ckpt' % best_dev_acc)
            model.save_weights(path, save_format='tf')
            logger.info('saving weights to %s' % path)

            logger.info('dev_acc: {}'.format(dev_acc.result().numpy()))

            test_pred = model.predict_domain([test_x, test_lens])
            test_acc(test_y.flatten(), test_pred)
            logger.info('test_acc: {}'.format(test_acc.result().numpy()))
        else:
            logger.info('dev_acc: {}'.format(dev_acc.result().numpy()))
    logger.info('finished.')


if __name__ == '__main__':
    # train_domain()
    two_step = True
    logger.warning("The number of thread pool is %d" % hp.num_thread)
    p = Pool(hp.num_thread)
    for i in range(10):
        if hp.fake_task == 'POS':
            p.apply_async(train_joint, args=(i, two_step))
            logger.info('fold {} started.'.format(i))
        else:
            p.apply_async(train_domain, args=(i,))
            logger.info('fold {} started.'.format(i))
    p.close()
    p.join()
