# coding=utf-8
import os
import time
import tensorflow as tf
from tqdm import tqdm
from dataflow import dlogger
import horovod.tensorflow as hvd

class HTrainer:
    def __init__(self, model_dir, max_to_keep=5, **kwargs):
        self.model = self.build_model(**kwargs)
        self.optimizer = self.build_optimizer(**kwargs)
        self.first_batch = True
        if hvd.local_rank() == 0:
            self.log = dlogger(log_path=model_dir, app_name='trainer')

            if not tf.io.gfile.exists(model_dir):
                tf.io.gfile.makedirs(model_dir)

            train_dir = os.path.join(model_dir, 'summaries', 'train')
            test_dir = os.path.join(model_dir, 'summaries', 'test')

            self.train_summary_writer = tf.summary.create_file_writer(train_dir, flush_millis=10000, name="train")
            self.test_summary_writer = tf.summary.create_file_writer(test_dir, flush_millis=10000, name="test")

            checkpoint_dir = os.path.join(model_dir, 'checkpoints')
            self.checkpoint = tf.train.Checkpoint(model=self.model, optimizer=self.optimizer)
            self.manager = tf.train.CheckpointManager(self.checkpoint,
                                                        directory=checkpoint_dir,
                                                        checkpoint_name="model.ckpt",
                                                        max_to_keep=max_to_keep)
            self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))

            self.export_path = os.path.join(model_dir, 'export')
            self.log.info("=====================> init trainer ====================>")

    def build_model(self, **kwargs):
        raise NotImplementedError

    def build_optimizer(self, **kwargs):
        raise NotImplementedError

    def compute_loss(self, ytrue, ypred):
        raise NotImplementedError

    def compute_accuracy(self, ytrue, ypred):
        raise NotImplementedError

    def preproc(self, images, labels, training=None):
        return images, labels

    @tf.function
    def __train_step(self, images, labels):
        with tf.GradientTape() as tape:
            ypred = self.model(images, training=True)
            loss_value = self.compute_loss(labels, ypred)
        tape = hvd.DistributedGradientTape(tape)
        grads = tape.gradient(loss_value, self.model.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))

        if self.first_batch:
            # 第一个batch 分发变量
            hvd.broadcast_variables(self.model.trainable_variables, root_rank=0)
            hvd.broadcast_variables(self.optimizer.variables(), root_rank=0)
            self.first_batch = False

        accuracy = self.compute_accuracy(labels, ypred)
        return loss_value, accuracy

    @tf.function
    def __test_step(self, images, labels):
        ypred = self.model(images, training=False)
        loss_value = self.compute_loss(labels, ypred)
        acc_value = self.compute_accuracy(labels, ypred)
        return loss_value, acc_value

    def fit(self, train_dataset, test_dataset, train_epoch):
        total_loss, total_acc = 0, 0

        self.first_batch = True

        for i in range(train_epoch):
            bs = tf.zeros((1,), tf.int32)
            for data in train_dataset.take(hvd.local_rank()):
                images, labels = data
                images, labels = self.preproc(images, labels, True)
                loss, accuracy = self.__train_step(images, labels)

                total_acc += accuracy
                total_loss += loss
                bs = bs + 1

            sum_acc = hvd.allreduce(total_acc, average=False)
            sum_loss = hvd.allreduce(total_loss, average=False)
            sum_bs = hvd.allreduce(bs, average=False)
            if hvd.local_rank() == 0:
                # just calc on gpu 0
                # summary train
                sum_bs = tf.cast(sum_bs, tf.float32)
                iloss = sum_loss / sum_bs
                iacc = sum_acc / sum_bs
                self.log.info("train epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
                print("train epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
                with self.train_summary_writer.as_default():
                    tf.summary.write("loss", iloss, step=i)
                    tf.summary.write('accuracy', iacc, step=i)

                # save model checkpoint
                self.manager.save(checkpoint_number=i)

                # test
            total_acc = tf.Variable([0], tf.float32)
            total_loss = tf.Variable([0], tf.float32)
            bs = tf.Variable([0], tf.float32)
            for data in test_dataset.take(hvd.local_rank()):
                images, labels = data
                images, labels = self.preproc(images, labels, False)
                loss, accuracy = self.__test_step(images, labels)
                total_loss += loss
                total_acc += accuracy
                bs = bs + 1
            sum_acc = hvd.allreduce(total_acc, average=False)
            sum_loss = hvd.allreduce(total_loss, average=False)
            sum_bs = hvd.allreduce(bs, average=False)
            sum_bs = tf.cast(sum_bs, tf.float32)

            # test summary
            if hvd.local_rank() == 0:
                iloss = sum_loss / sum_bs
                iacc = sum_acc / sum_bs
                self.log.info("test epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
                print("test epoch: %d loss: %0.6f acc: %0.6f" % (i+1, iloss, iacc))
                with self.test_summary_writer.as_default():
                    tf.summary.write("loss", iloss, step=i)
                    tf.summary.write('accuracy', iacc, step=i)
        if hvd_local_rank() == 0:
            tf.saved_model.save(self.model, self.export_path)
