import operator
import sys
import time
from functools import reduce
from itertools import accumulate

import numpy as np
import tensorflow as tf

import config
import load_data
from data_set import DataSet


class MyError(Exception):
    def __init__(self, value):
        self.value = value

    def __str__(self):
        return repr(self.value)


class BaseModel:
    def __init__(self):
        self.batch_size = config.batch_size
        self.session = None
        self.fc_dropout_kprob = tf.placeholder(tf.float32, shape=[], name='fc_keep_prob')
        self.learning_rate_ph = tf.placeholder(tf.float32, shape=[], name='learning_rate_ph')
        self.saver = None
        self.inputs_ph = tf.placeholder(tf.float32,
                                        shape=[None, None, config.feature_size],
                                        name='inputs_ph')
        self.seq_lens_ph = tf.placeholder(tf.int32, shape=[None], name='seq_lens_ph')
        self.logits = self.model(self.inputs_ph, self.seq_lens_ph)
        self.n_classes = len(config.emos)
        self.label_ph = tf.placeholder(tf.int32, [None], name='label_ph')
        self.loss_weight_ph = tf.placeholder(tf.float32, [None], name='loss_weight_ph')
        self.loss = self.get_loss()
        self.train_step = self.get_train_step()
        self.acc = self.get_acc()
        self.train_epoch_nums = config.train_epochs
        # self.train_batch_nums = config.train_batch_nums
        self.lrs = config.lrs
        self.log_file = sys.stdout
        self.start_time = time.time()
        self.train_loop_num = 0
        loaded_data = load_data.load_data()
        self.d_set = DataSet(loaded_data=loaded_data, batch_size=self.batch_size)

    def get_session(self):
        if self.session is None:
            # self.session = tf.Session()
            raise MyError('session should be initialized!')
        return self.session

    def close_session(self):
        if not (self.session is None):
            self.session.close()
        self.session = None

    @staticmethod
    def weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial)

    @staticmethod
    def bias_variable(shape):
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    @staticmethod
    def conv2d(x, W):
        return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

    @staticmethod
    def maxpool_1x2(x):
        return tf.nn.max_pool(x, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME')

    @staticmethod
    def conv_mask(seq_lens, max_len):
        mask = tf.cast(tf.sequence_mask(seq_lens, max_len), dtype=tf.float32)
        mask = tf.expand_dims(tf.expand_dims(mask, 2), 3)
        return mask

    def model(self, inputs, seq_lens):
        logits = tf.zeros((config.batch_size, len(config.emos)))
        return logits

    def get_loss(self):
        with tf.name_scope('loss'):
            losses = tf.losses.sparse_softmax_cross_entropy(labels=self.label_ph,
                                                            logits=self.logits,
                                                            weights=self.loss_weight_ph)
            loss = tf.reduce_mean(losses)
        return loss

    def get_train_step(self):
        optimizer_type = config.optimizer_type
        if self.loss is None:
            self.loss = self.get_loss()
        with tf.name_scope('optimizer'):
            if optimizer_type.lower() == 'adam':
                train_step = tf.train.AdamOptimizer(self.learning_rate_ph).minimize(self.loss)
            elif optimizer_type.lower() == 'adadelta':
                train_step = tf.train.AdadeltaOptimizer(self.learning_rate_ph).minimize(self.loss)
            else:
                train_step = tf.train.GradientDescentOptimizer(self.learning_rate_ph).minimize(
                    self.loss)
        return train_step

    def get_acc(self):
        with tf.name_scope('accuracy'):
            correct_prediction = tf.equal(tf.argmax(self.logits, axis=1, output_type=tf.int32),
                                          self.label_ph)
            correct_prediction = tf.cast(correct_prediction, tf.float32)
            accuracy = tf.reduce_mean(correct_prediction)
        return accuracy

    def eval_acc_loss(self, iterator):
        count = 0
        acces = list()
        losses = list()
        weights = list()
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(iterator.initializer)
        next_item = iterator.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.eval_print_batch_interval == 0:
                print('eval: batch num %d ; time %g' % (count, time.time() - self.start_time))
            batch_x = t[0]
            batch_y = t[1]
            batch_seq_lens = t[2]
            batch_w = t[3]
            batch_acc = self.acc.eval(feed_dict={
                self.inputs_ph: batch_x,
                self.label_ph: batch_y,
                self.seq_lens_ph: batch_seq_lens,
                self.fc_dropout_kprob: 1
            }, session=self.get_session())
            batch_loss = self.loss.eval(feed_dict={
                self.inputs_ph: batch_x,
                self.label_ph: batch_y,
                self.seq_lens_ph: batch_seq_lens,
                self.fc_dropout_kprob: 1,
                self.loss_weight_ph: batch_w
            }, session=self.get_session())
            acces.append(batch_acc)
            losses.append(batch_loss)
            weights.append(len(batch_x))
            count += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None
        acc = float(np.dot(acces, weights) / np.sum(weights))
        loss = float(np.dot(losses, weights) / np.sum(weights))
        return acc, loss

    def get_lr(self, current_epoch):
        """return learning rate according to current batch step number"""
        acc_train_epoch_nums = accumulate(self.train_epoch_nums, operator.add)
        for lr, acc_train_epoch_num in zip(self.lrs, acc_train_epoch_nums):
            if current_epoch < acc_train_epoch_num:
                return lr
        return self.lrs[-1]

    def train_epoch(self, train_iter, lr, train_k_prob):
        count = 0
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(train_iter.initializer)
        next_item = train_iter.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            if count % config.train_print_batch_interval == 0:
                print('train: batch num %d ; time %g' % (count, time.time() - self.start_time))
            count += 1
            batch_x = t[0]
            batch_y = t[1]
            batch_ts = t[2]
            batch_ws = t[3]
            self.train_step.run(feed_dict={
                self.inputs_ph: batch_x,
                self.seq_lens_ph: batch_ts,
                self.label_ph: batch_y,
                self.fc_dropout_kprob: train_k_prob,
                self.learning_rate_ph: lr,
                self.loss_weight_ph: batch_ws
            })
            self.train_loop_num += 1
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None

    def train(self, start_i, sess):
        end_i = reduce((lambda _a, _b: _a + _b), self.train_epoch_nums, 0)
        train_k_prob = config.train_k_prob
        eval_internal = config.eval_internal
        persist_interval = config.persistent_internal
        persist_checkpoint_file = config.persist_checkpoint_file
        persist_bestacc_file = config.persist_bestacc_file
        persist_bestloss_file = config.persist_bestloss_file
        train_iter = self.d_set.train_gen()
        vali_iter = self.d_set.vali_gen()
        best_acc = 0
        best_loss = 100
        # persist_bestloss_file = config.persist_bestloss_file
        for i in range(start_i, end_i):
            lr = self.get_lr(i)
            self.train_epoch(train_iter, lr, train_k_prob)
            if i % eval_internal == 0:
                train_acc, train_loss = self.eval_acc_loss(train_iter)
                vali_acc, vali_loss = self.eval_acc_loss(vali_iter)
                if vali_acc > best_acc:
                    best_acc = vali_acc
                    self.saver.save(sess, persist_bestacc_file)
                if vali_loss < best_loss:
                    best_loss = vali_loss
                    self.saver.save(sess, persist_bestloss_file)
                print(
                    'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g , best_acc %g' % (
                        i, train_acc, train_loss, vali_acc, vali_loss, best_acc))
                print(
                    'epoch %d ; train_acc %g , train_loss %g ; vali_acc %g , vali_loss %g' % (
                        i, train_acc, train_loss, vali_acc, vali_loss), file=self.log_file)
            if i % persist_interval == 0 and i > persist_interval:
                self.saver.save(sess, persist_checkpoint_file, global_step=self.train_loop_num)
        self.saver.save(sess, persist_checkpoint_file, global_step=self.train_loop_num)

    def save_result(self, iterator):
        gt_npy = config.gt_npy
        pr_npy = config.pr_npy
        ts_npy = config.ts_npy
        sids_npy = config.sids_npy
        g_ts = list()
        p_rs = list()
        sids = list()
        ts = list()
        sess = self.get_session()
        sess.run(tf.local_variables_initializer())
        sess.run(iterator.initializer)
        next_item = iterator.get_next()
        try:
            t = sess.run(next_item)
        except tf.errors.OutOfRangeError:
            t = None
        while t:
            batch_x = t[0]
            batch_gt = t[1]
            batch_seq_lens = t[2]
            batch_sids = t[4]
            batch_logits = self.logits.eval(feed_dict={
                self.inputs_ph: batch_x,
                self.seq_lens_ph: batch_seq_lens,
                self.fc_dropout_kprob: 1
            }, session=sess)
            batch_pr = np.argmax(batch_logits, 1)
            g_ts += list(batch_gt)
            p_rs += list(batch_pr)
            sids += list(batch_sids)
            ts += list(batch_seq_lens)
            try:
                t = sess.run(next_item)
            except tf.errors.OutOfRangeError:
                t = None
        g_ts_np = np.array(g_ts)
        p_rs_np = np.array(p_rs)
        sids_np = np.array(sids)
        ts_np = np.array(ts)
        np.save(gt_npy, g_ts_np)
        np.save(pr_npy, p_rs_np)
        np.save(ts_npy, ts_np)
        np.save(sids_npy, sids_np)

    def run(self):
        # tf.reset_default_graph()
        with open(config.out_put_log, 'w') as out_put_log_f:
            self.log_file = out_put_log_f
            tf_config = tf.ConfigProto()
            tf_config.gpu_options.allow_growth = config.gpu_allow_growth
            with tf.Session(config=tf_config) as sess:
                self.session = sess
                self.saver = tf.train.Saver()
                train_writer = tf.summary.FileWriter(config.tf_log_dir)
                train_writer.add_graph(tf.get_default_graph())
                # sess = self.session
                if config.is_train:
                    start_i = 0
                    if config.is_restore:
                        start_i = config.restart_epoch_i
                        self.saver.restore(self.session, config.restore_file)
                    else:
                        init = tf.global_variables_initializer()
                        self.session.run(init)
                    self.train(start_i, self.session)
                else:
                    self.saver.restore(self.session, config.restore_file)
                test_iter = self.d_set.test_gen()
                test_acc, test_loss = self.eval_acc_loss(test_iter)
                print('test_acc %g , test_loss %g' % (test_acc, test_loss))
                print('test_acc %g , test_loss %g' % (test_acc, test_loss), file=self.log_file)
                self.save_result(test_iter)


# def print_config(cfg_file='./config.py'):
#     with open(cfg_file, 'r') as cfg_f:
#         for line in cfg_f:
#             if '#' in line and '#' == line[0]:
#                 continue
#             if '=' in line:
#                 print(line)
