# -*- coding: utf-8 -*-
# ===========================================
# @Time    : 2021/8/25 上午10:34
# @Author  : shutao
# @FileName: trainer.py
# @remark  : 
# 
# @Software: PyCharm
# Github 　： https://github.com/NameLacker
# ===========================================

import traceback
import datetime
import time
import os

from loguru import logger
from visualdl import LogWriter

import paddle
import paddle.nn.functional as F

log_writer = LogWriter("log/{}".format(int(time.time())))


class Trainer:
    def __init__(self, exp, args):
        """
        init function only defines some basic attr, other attrs like model, optimizer are built in
        before_train methods.
        :param exp:
        :param args:
        """
        self.exp = exp
        self.args = args

        # training related attr
        self.amp_training = args.fp16

        self.max_epoch = exp.max_epoch

    def train(self):
        """
        训练主程序
        :return:
        """
        self.before_train()
        try:
            self.train_in_epoch()
        except Exception as e:
            # 打印错误栈
            logger.info("Training produces errors: \n{}".format(traceback.format_exc()))
        finally:
            self.after_train()

    def train_in_epoch(self):
        """
        训练主循环
        :return:
        """
        logger.info("Train in epoch...")
        for self.epoch in range(self.start_epoch, self.max_epoch):
            self.before_epoch()
            self.train_in_iter()
            self.after_epoch()

    def train_in_iter(self):
        """
        一个epoch
        :return:
        """
        logger.info("Train in iter...")

        for self.iter, data in enumerate(self.train_loader):
            self.before_iter()
            self.train_one_iter(data)
            self.after_iter()

    def train_one_iter(self, data):
        """
        一个batch
        :return:
        """
        logger.info("Train one iter...")

        # ================================ 不同实验需要修改的部分 ================================
        # todo: 不同训练程序需修改此部分
        self.loss = self.training_subject(data, self.loss_func)
        # ====================================================================================

        self.loss.backward()
        # self.optimizer.minimize(self.loss)
        self.model.clear_gradients()

    def before_train(self):
        """
        准备工作
        :return:
        """
        logger.info("exp value:\n{}".format(self.exp))

        model = self.exp.get_model()
        self.training_subject = self.exp.get_training_subject
        # paddle.summary(model, self.exp.test_size)  # 打印网络模型

        # value of epoch will be set in `resume_train`
        model = self.resume_train(model)

        # data related init and max_iter means iters per epoch
        self.train_loader, self.max_iter = self.exp.get_data_loader(batch_size=self.args.batch_size)

        # loading loss function
        self.loss_func = self.exp.get_loss_function()

        self.lr_scheduler = self.exp.get_lr_scheduler(self.exp.base_lr, self.max_iter)
        # solver related init
        # logger.info(self.lr_scheduler.get_lr())
        self.optimizer = self.exp.get_optimizer(self.lr_scheduler, model.parameters())

        self.model = model
        self.model.train()

        self.evaluator = self.exp.get_evaluator(batch_size=self.args.batch_size)

        logger.info("The preparation procedure has been completed, training start...")

    def after_train(self):
        """
        训练结束
        :return:
        """
        logger.info("After train...")

    def before_epoch(self):
        """
        一轮训练之前
        :return:
        """
        logger.info("Before epoch...")

    def after_epoch(self):
        """
        训练之后
        :return:
        """
        logger.info("After epoch...")

    def before_iter(self):
        logger.info("Before iter...")

    def after_iter(self):
        logger.info("After iter...")
        # logger.info("iter: {}, Loss: {:.5}".format(self.progress_in_iter, self.loss.numpy()[0]))
        # log_writer.add_scalar('train/loss', step=self.progress_in_iter, value=self.loss.numpy()[0])

    @property
    def progress_in_iter(self):
        """
        返回当前步数
        :return:
        """
        return self.epoch * self.max_iter + self.iter

    def resume_train(self, model):
        if self.args.resume:
            logger.info("Resume training....")

            # load model params and opt params
            model_params = paddle.load(self.args.model_params)
            opt_params = paddle.load(self.args.opt_params)

            # resume the model/optimizer state dict
            model.load_dict(model_params)
            self.optimizer.load_dict(opt_params)

            # resume the training states variables
            self.start_epoch = self.args.start_epoch - 1 if self.args.start_epoch is not None else opt_params[
                "start_epoch"]
            logger.info("loaded checkpoint '{}' (epoch {})".format(self.args.resume, self.start_epoch))
        else:
            self.start_epoch = 0
        return model

    def evaluate_and_save_model(self):
        logger.info("Evaluate and save model...")

    def save_ckpt(self):
        logger.info("Save ckpt...")
