import operator
import sys
import time
from functools import reduce
from itertools import accumulate

import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops

import config
import load_data
import var_cnn_rnn_model
from data_set import DataSet


class FlipGradientBuilder(object):
    def __init__(self):
        self.num_calls = 0

    def __call__(self, x, l=1.0):
        grad_name = 'FlipGradient%d' % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradient(unused_op, grad):
            return [tf.negative(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
        self.num_calls += 1
        return y


class GenderDANNCRModel(var_cnn_rnn_model.CRModel):
    def __init__(self):
        self.batch_size = config.batch_size
        self.session = None
        self.fc_dropout_kprob = tf.placeholder(tf.float32, shape=[], name='fc_keep_prob')
        self.learning_rate_ph = tf.placeholder(tf.float32, shape=[], name='learning_rate_ph')
        self.g_learning_rate_ph = tf.placeholder(tf.float32, shape=[], name='g_learning_rate_ph')
        self.e_learning_rate_ph = tf.placeholder(tf.float32, shape=[], name='e_learning_rate_ph')
        self.saver = None
        self.inputs_ph = tf.placeholder(tf.float32,
                                        shape=[None, None, config.feature_size],
                                        name='inputs_ph')
        self.seq_lens_ph = tf.placeholder(tf.int32, shape=[None], name='seq_lens_ph')
        self.logits, self.g_logits = self.model(self.inputs_ph, self.seq_lens_ph)
        self.n_classes = len(config.emos)
        self.label_ph = tf.placeholder(tf.int32, [None], name='label_ph')
        self.label_gender_ph = tf.placeholder(tf.int32, [None], name='label_gender_ph')
        self.loss_weight_ph = tf.placeholder(tf.float32, [None], name='loss_weight_ph')
        self.loss = self.get_loss()
        self.g_loss = self.get_g_loss()
        self.e_loss = self.get_e_loss()
        self.train_step = self.get_train_step()
        self.frist_train_step = self.get_first_train_step()
        self.e_train_step = self.get_e_train_step()
        self.acc = self.get_acc()
        self.train_epoch_nums = config.train_epochs
        # self.train_batch_nums = config.train_batch_nums
        self.lrs = config.lrs
        self.log_file = sys.stdout
        self.start_time = time.time()
        self.train_loop_num = 0
        loaded_data = load_data.load_data()
        self.d_set = DataSet(loaded_data=loaded_data, batch_size=self.batch_size)

    def gender_fc(self, inputs):
        output_d = 2
        inputs_d = config.rnn_hidden_size * 2
        # with tf.variable_scope('g_classifier'):
        with tf.name_scope('g_fc1'):
            w_fc1 = self.weight_variable([inputs_d, 32])
            b_fc1 = self.bias_variable([32])
            h_fc1 = tf.nn.relu(tf.matmul(inputs, w_fc1) + b_fc1)
        with tf.name_scope('g_dropout'):
            h_fc1_drop = tf.nn.dropout(h_fc1, self.fc_dropout_kprob)
        with tf.name_scope('g_fc2'):
            w_fc2 = self.weight_variable([32, output_d])
            b_fc2 = self.bias_variable([output_d])
            g_logits = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
        return g_logits

    def model(self, inputs, seq_lens):
        with tf.name_scope('feature_extractor'):
            h_cnn, seq_lens = self.cnn(inputs, seq_lens)
            h_rnn = self.rnn(h_cnn, seq_lens)
        with tf.name_scope('emotion_classifier'):
            logits = self.fc(h_rnn)
        with tf.name_scope('flip_grad_op'):
            flip_gradient = FlipGradientBuilder()
            feat = flip_gradient(h_rnn)
        with tf.name_scope('gender_classifier'):
            g_logits = self.gender_fc(feat)
        return logits, g_logits

    def get_loss(self, gender_alpha=0.5):
        with tf.name_scope('loss_emo'):
            losses_emo = tf.losses.sparse_softmax_cross_entropy(labels=self.label_ph,
                                                                logits=self.logits,
                                                                weights=self.loss_weight_ph)
            loss_emo = tf.reduce_mean(losses_emo)
        with tf.name_scope('loss_gender'):
            losses_gender = tf.losses.sparse_softmax_cross_entropy(labels=self.label_gender_ph,
                                                                   logits=self.g_logits)
            loss_gender = tf.reduce_mean(losses_gender)
        loss = (1 - gender_alpha) * loss_emo + gender_alpha * loss_gender
        return loss

    def get_g_loss(self):
        with tf.name_scope('g_loss'):
            losses_gender = tf.losses.sparse_softmax_cross_entropy(labels=self.label_gender_ph,
                                                                   logits=self.g_logits)
            loss_gender = tf.reduce_mean(losses_gender)
        return loss_gender

    def get_e_loss(self):
        with tf.name_scope('e_loss'):
            losses_e = tf.losses.sparse_softmax_cross_entropy(labels=self.label_ph,
                                                              logits=self.logits,
                                                              weights=self.loss_weight_ph)
            loss_e = tf.reduce_mean(losses_e)
        return loss_e

    def train_epoch(self, train_iter, lr, train_k_prob):
        count = 0
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(train_iter.initializer)
        next_item = train_iter.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.train_print_batch_interval == 0:
                print('train: batch num %d ; time %g' % (count, time.time() - self.start_time))
            count += 1
            batch_x = t[0]
            batch_y = t[1]
            batch_ts = t[2]
            batch_ws = t[3]
            batch_genders = t[5]
            self.train_step.run(feed_dict={
                self.inputs_ph: batch_x,
                self.seq_lens_ph: batch_ts,
                self.label_ph: batch_y,
                self.label_gender_ph: batch_genders,
                self.fc_dropout_kprob: train_k_prob,
                self.learning_rate_ph: lr,
                self.loss_weight_ph: batch_ws
            })
            self.train_loop_num += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None

    def e_train_epoch(self, train_iter, lr, train_k_prob):
        count = 0
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(train_iter.initializer)
        next_item = train_iter.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.train_print_batch_interval == 0:
                print('e_train_epoch: batch num %d ; time %g' % (count, time.time() - self.start_time))
            count += 1
            batch_x = t[0]
            batch_y = t[1]
            batch_ts = t[2]
            batch_ws = t[3]
            self.e_train_step.run(feed_dict={
                self.inputs_ph: batch_x,
                self.seq_lens_ph: batch_ts,
                self.label_ph: batch_y,
                self.loss_weight_ph: batch_ws,
                self.e_learning_rate_ph: lr,
                self.fc_dropout_kprob: train_k_prob
            })
            self.train_loop_num += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None

    def first_train_epoch(self, train_iter, lr, train_k_prob):
        count = 0
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(train_iter.initializer)
        next_item = train_iter.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.train_print_batch_interval == 0:
                print('train: batch num %d ; time %g' % (count, time.time() - self.start_time))
            count += 1
            batch_x = t[0]
            batch_y = t[1]
            batch_ts = t[2]
            batch_ws = t[3]
            batch_genders = t[5]
            self.frist_train_step.run(feed_dict={
                self.inputs_ph: batch_x,
                self.seq_lens_ph: batch_ts,
                self.label_ph: batch_y,
                self.label_gender_ph: batch_genders,
                self.fc_dropout_kprob: train_k_prob,
                self.g_learning_rate_ph: lr,
                self.loss_weight_ph: batch_ws
            })
            self.train_loop_num += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None

    def eval_acc_loss(self, iterator, is_first_train=False):
        count = 0
        acces = list()
        losses = list()
        weights = list()
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(iterator.initializer)
        next_item = iterator.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.eval_print_batch_interval == 0:
                print('eval: batch num %d ; time %g' % (count, time.time() - self.start_time))
            batch_x = t[0]
            batch_y = t[1]
            batch_seq_lens = t[2]
            batch_w = t[3]
            batch_genders = t[5]
            batch_acc = self.acc.eval(feed_dict={
                self.inputs_ph: batch_x,
                self.label_ph: batch_y,
                self.seq_lens_ph: batch_seq_lens,
                self.fc_dropout_kprob: 1
            }, session=self.get_session())
            if is_first_train:
                batch_loss = self.g_loss.eval(feed_dict={
                    self.inputs_ph: batch_x,
                    self.label_ph: batch_y,
                    self.label_gender_ph: batch_genders,
                    self.seq_lens_ph: batch_seq_lens,
                    self.fc_dropout_kprob: 1,
                    self.loss_weight_ph: batch_w
                }, session=self.get_session())
            else:
                batch_loss = self.loss.eval(feed_dict={
                    self.inputs_ph: batch_x,
                    self.label_ph: batch_y,
                    self.label_gender_ph: batch_genders,
                    self.seq_lens_ph: batch_seq_lens,
                    self.fc_dropout_kprob: 1,
                    self.loss_weight_ph: batch_w
                }, session=self.get_session())
            acces.append(batch_acc)
            losses.append(batch_loss)
            weights.append(len(batch_x))
            count += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None
        acc = float(np.dot(acces, weights) / np.sum(weights))
        loss = float(np.dot(losses, weights) / np.sum(weights))
        return acc, loss

    def get_first_train_step(self):
        optimizer_type = config.optimizer_type
        first_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "gender_classifier")

        # first_train_op = optimizer.minimize(cost, var_list=first_train_vars)
        if self.g_loss is None:
            self.g_loss = self.get_g_loss()
        with tf.name_scope('optimizer'):
            if optimizer_type.lower() == 'adam':
                train_step = tf.train.AdamOptimizer(self.g_learning_rate_ph).minimize(self.g_loss,
                                                                                      var_list=first_train_vars)
            elif optimizer_type.lower() == 'adadelta':
                train_step = tf.train.AdadeltaOptimizer(self.g_learning_rate_ph).minimize(
                    self.g_loss, var_list=first_train_vars)
            else:
                train_step = tf.train.GradientDescentOptimizer(self.g_learning_rate_ph).minimize(
                    self.g_loss, var_list=first_train_vars)
        return train_step

    def get_e_train_step(self):
        optimizer_type = config.optimizer_type
        if self.e_loss is None:
            self.e_loss = self.get_e_loss()
        with tf.name_scope('optimizer'):
            if optimizer_type.lower() == 'adam':
                train_step = tf.train.AdamOptimizer(self.e_learning_rate_ph).minimize(self.e_loss)
            elif optimizer_type.lower() == 'adadelta':
                train_step = tf.train.AdadeltaOptimizer(self.e_learning_rate_ph).minimize(
                    self.e_loss)
            else:
                train_step = tf.train.GradientDescentOptimizer(self.e_learning_rate_ph).minimize(
                    self.e_loss)
        return train_step

    def g_get_lr(self, current_epoch):
        """return learning rate according to current batch step number"""
        acc_train_epoch_nums = accumulate(config.first_train_epochs, operator.add)
        for lr, acc_train_epoch_num in zip(config.first_lrs, acc_train_epoch_nums):
            if current_epoch < acc_train_epoch_num:
                return lr
        return config.first_lrs[-1]

    def e_get_lr(self, current_epoch):
        """return learning rate according to current batch step number"""
        acc_train_epoch_nums = accumulate(config.e_train_epochs, operator.add)
        for lr, acc_train_epoch_num in zip(config.e_lrs, acc_train_epoch_nums):
            if current_epoch < acc_train_epoch_num:
                return lr
        return config.e_lrs[-1]

    def train(self, start_i, sess):
        end_i = reduce((lambda _a, _b: _a + _b), self.train_epoch_nums, 0)
        first_end_i = reduce((lambda _a, _b: _a + _b), config.first_train_epochs, 0)
        e_end_i = reduce((lambda _a, _b: _a + _b), config.e_train_epochs, 0)
        train_k_prob = config.train_k_prob
        eval_internal = config.eval_internal
        persist_interval = config.persistent_internal
        persist_checkpoint_file = config.persist_checkpoint_file
        persist_bestacc_file = config.persist_bestacc_file
        persist_bestloss_file = config.persist_bestloss_file
        train_iter = self.d_set.train_gen()
        vali_iter = self.d_set.vali_gen()
        best_acc = 0
        best_loss = 100

        for k in range(config.e_train_restart_epoch_i, e_end_i):
            e_lr = self.e_get_lr(k)
            self.e_train_epoch(train_iter, e_lr, train_k_prob)
            train_acc, train_loss = self.eval_acc_loss(train_iter, True)
            vali_acc, vali_loss = self.eval_acc_loss(vali_iter, True)
            print(
                'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g , best_acc %g' % (
                    k, train_acc, train_loss, vali_acc, vali_loss, best_acc))
            print(
                'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g' % (
                    k, train_acc, train_loss, vali_acc, vali_loss), file=self.log_file)

        for j in range(config.first_train_restart_epoch_i, first_end_i):
            f_lr = self.g_get_lr(j)
            self.first_train_epoch(train_iter, f_lr, train_k_prob)
            train_acc, train_loss = self.eval_acc_loss(train_iter, True)
            vali_acc, vali_loss = self.eval_acc_loss(vali_iter, True)
            print(
                'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g , best_acc %g' % (
                    j, train_acc, train_loss, vali_acc, vali_loss, best_acc))
            print(
                'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g' % (
                    j, train_acc, train_loss, vali_acc, vali_loss), file=self.log_file)

        # persist_bestloss_file = config.persist_bestloss_file
        for i in range(start_i, end_i):
            lr = self.get_lr(i)
            self.train_epoch(train_iter, lr, train_k_prob)
            if i % eval_internal == 0:
                train_acc, train_loss = self.eval_acc_loss(train_iter)
                vali_acc, vali_loss = self.eval_acc_loss(vali_iter)
                if vali_acc > best_acc:
                    best_acc = vali_acc
                    self.saver.save(sess, persist_bestacc_file)
                if vali_loss < best_loss:
                    best_loss = vali_loss
                    self.saver.save(sess, persist_bestloss_file)
                print(
                    'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g , best_acc %g' % (
                        i, train_acc, train_loss, vali_acc, vali_loss, best_acc))
                print(
                    'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g' % (
                        i, train_acc, train_loss, vali_acc, vali_loss), file=self.log_file)
            if i % persist_interval == 0 and i > persist_interval:
                self.saver.save(sess, persist_checkpoint_file, global_step=self.train_loop_num)
        self.saver.save(sess, persist_checkpoint_file, global_step=self.train_loop_num)
