import os
import time
import logging
import numpy as np
try:
    import moxing as mox
except ModuleNotFoundError:
    logging.info("Moxing should be use of modelarts.")

from mindspore.common.tensor import Tensor
from mindspore.train.callback._callback import Callback


class StateMonitor(Callback):
    """StateMonitor"""
    def __init__(self, data_size, tot_batch_size=None,
                 eval_interval=None, eval_offset=None,
                 eval_engine=None, logger=None):
        super(StateMonitor, self).__init__()
        self.data_size = data_size
        self.tot_batch_size = tot_batch_size
        self.epoch_num = 0
        self.loss = 0
        self.eval_interval = eval_interval
        self.eval_offset = eval_offset
        self.eval_engine = eval_engine
        self.best_acc = -1
        self.best_acc_top5 = -1
        self.best_i2t_recall = -1
        self.best_t2i_recall = -1
        self.mean_fps = 0.0
        self.print = print
        self.epoch_time = 0
        if logger is not None:
            self.print = logger

    def step_end(self, run_context):
        cb_params = run_context.original_args()
        loss = cb_params.net_outputs

        if isinstance(loss, (tuple, list)):
            if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
                loss = loss[0]

        if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
            loss = np.mean(loss.asnumpy())

        self.loss = loss

    def epoch_begin(self, run_context):
        self.epoch_time = time.time()

    def epoch_end(self, run_context):
        epoch_seconds = (time.time() - self.epoch_time)
        per_step_seconds = epoch_seconds / self.data_size

        print_str = "epoch[{}]".format(self.epoch_num)
        print_str += ', epoch time: {:.2f}s'.format(epoch_seconds)
        print_str += ', per step time: {:.4f}s'.format(per_step_seconds)
        print_str += ', loss={:.6f}'.format(self.loss)

        if self.tot_batch_size is not None:
            fps = self.tot_batch_size * self.data_size / epoch_seconds
            self.mean_fps = (self.mean_fps * self.epoch_num + fps) / (self.epoch_num + 1)
            print_str += ', fps={:.2f}'.format(fps)

        if (self.epoch_num + 1) % self.eval_interval == self.eval_offset:
            eval_start = time.time()
            self.eval_engine.eval()
            output = self.eval_engine.get_result()
            eval_seconds = time.time() - eval_start
            if output is not None:
                if isinstance(output, list):
                    print_str += ', top1 accuracy={:.6f}'.format(float(output[0]))
                    print_str += ', top5 accuracy={:.6f}'.format(float(output[1]))
                    print_str += ', i2t_recall={:.6f}'.format(float(output[2]))
                    print_str += ', t2i_recall={:.6f}'.format(float(output[3]))
                    print_str += ', eval_cost={:.2f}'.format(eval_seconds)

                    if float(output[0]) > self.best_acc:
                        self.best_acc = float(output[0])
                    if float(output[1]) > self.best_acc_top5:
                        self.best_acc_top5 = float(output[1])
                    if float(output[2]) > self.best_i2t_recall:
                        self.best_i2t_recall = float(output[2])
                    if float(output[3]) > self.best_t2i_recall:
                        self.best_t2i_recall = float(output[3])
                else:
                    print_str += ', accuracy={:.6f}'.format(float(output))
                    print_str += ', eval_cost={:.2f}'.format(eval_seconds)

                    if float(output) > self.best_acc:
                        self.best_acc = float(output)

        self.print(print_str)
        self.epoch_num += 1


class LossMonitor(Callback):
    """
    Monitor the time in training.

    Args:
        data_size (int): How many steps are the intervals between print information each time.
            if the program get `batch_num` during training, `data_size` will be set to `batch_num`,
            otherwise `data_size` will be used. Default: None.

    Raises:
        ValueError: If data_size is not positive int.
    """

    def __init__(self, steps_size, logger=logging.info):
        super(LossMonitor, self).__init__()
        self.epoch_time = time.time()
        self.steps_size = steps_size
        self.print = logger

    def step_begin(self, run_context):
        self.epoch_time = time.time()

    def step_end(self, run_context):
        cb_params = run_context.original_args()
        if cb_params.cur_epoch_num % self.steps_size == 0:
            epoch_seconds = (time.time() - self.epoch_time) * 1000
            # TrainOneStepWithLossScaleCell returns tuple while TrainOneStepCell returns loss directly
            loss = cb_params.net_outputs[0].asnumpy()

            step_seconds = epoch_seconds / 1000

            self.print("Epoch: {}, Step: {}, Step Time: {} sec, Total Loss: {}."
                       .format(cb_params.cur_epoch_num, (cb_params.cur_step_num - 1) % cb_params.batch_num + 1,
                               str(step_seconds)[:5], str(loss)[:6]))


class UploadCheckpoint(Callback):

    def __init__(self, src_dir, target_dir, upload_frequence=10):
        self.src_dir = src_dir
        self.target_dir = target_dir
        self.upload_frequence = upload_frequence

    def step_end(self, run_context):
        cb_params = run_context.original_args()
        if cb_params.cur_step_num % self.upload_frequence == 0 and os.listdir(self.src_dir):
            print("Starting upload model to obs!")
            print("Find ckpt file:", os.listdir(self.src_dir))
            print("Target dir is:{}".format(self.target_dir))
            try:
                mox.file.copy_parallel(src_url=self.src_dir,
                                       dst_url=self.target_dir)
                print("Upload ckpt succeed!")
            except Exception as e:
                print("Upload ckpt failed!", e)
