# -*- coding: utf-8 -*-


def print_train_log():
    counter = 0
    total_loss = 0
    total_accuracy = 0

    def wrapper(*args, **kwargs):
        nonlocal counter, total_loss, total_accuracy

        epoch_index = kwargs["epoch_index"]
        batch_index = kwargs["batch_index"]
        batch_time = kwargs["batch_time"]
        batch_loss = kwargs["batch_loss"]
        batch_accuracy = kwargs["batch_accuracy"]
        learning_rate = kwargs["learning_rate"]
        counter += 1
        total_loss += batch_loss
        total_accuracy += batch_accuracy
        print(F"epoch: {epoch_index:04d}, batch: {batch_index:04d}, lr: {learning_rate} "
              F"batch consumed time: {batch_time:.2f}")
        print(F"\t batch loss: {batch_loss:.4f}, batch accuracy: {batch_accuracy:.2%}, "
              F"average loss: {total_loss / counter:.4f}, average accuracy: {total_accuracy / counter:.2%}")

    return wrapper
