import logging
import functools
import os
import sys
import time
from mindspore import Tensor
# import matplotlib.pyplot as plt
import numpy as np
# import re
# import json

from mindspore.train.callback._callback import Callback

logging.getLogger('matplotlib').setLevel(logging.CRITICAL)


def create_logger(args):
    logging.getLogger('PIL').setLevel(51)
    logging.getLogger("PIL.TiffImagePlugin").setLevel(51)

    handlers = [logging.FileHandler(os.path.join(args.output_dir, f'outputs_{args.rank}.log'), mode='a+'), ]
    if args.rank == 0:
        handlers.append(logging.StreamHandler())
    logging.basicConfig(format='[%(asctime)s] - %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S',
                        level=logging.DEBUG,
                        handlers=handlers,
                        # force=True
                        )
    logger = logging.getLogger('med_pretrain')
    logger.info(f'logger created {args.rank}')
    return logger


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self, name, fmt=':f'):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1, sync=False):
        self.val = val
        self.sum += val * n
        self.count += n
        if sync:
            self.synchronize_between_processes()
        self.avg = self.sum / self.count

    def synchronize_between_processes(self):
        """
        Warning: does not synchronize the deque!
        """
        pass

    def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        return '\t'.join(entries)

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = '{:' + str(num_digits) + 'd}'
        return '[' + fmt + '/' + fmt.format(num_batches) + ']'


def str_to_list(s):
    return [int(n) for n in s.split(',')]


def compute_ema(losses, alpha=0.1):
    ema_values = [losses[0]]  # start off with the first loss value

    for loss in losses[1:]:
        ema_values.append((1 - alpha) * ema_values[-1] + alpha * loss)

    return ema_values


class LossMonitor(Callback):
    """
    Monitor the time in training.

    Args:
        data_size (int): How many steps are the intervals between print information each time.
            if the program get `batch_num` during training, `data_size` will be set to `batch_num`,
            otherwise `data_size` will be used. Default: None.

    Raises:
        ValueError: If data_size is not positive int.
    """

    def __init__(self, epochs, step_size, logger=logging.info, args=None):
        super(LossMonitor, self).__init__()
        self.step_time = time.time()
        self.dataset_size = step_size
        self.print = logger
        self.epoch_losses = []
        self.cur_step = 1
        self.cur_epoch = 1
        self.args=args
        self.epochs=epochs
    def step_begin(self, run_context):
        self.step_time = time.time()

    def step_end(self, run_context):
        """Callback when step end."""
        cb_params = run_context.original_args()
        self.cur_step += 1
        self.cur_step = self.cur_step % self.dataset_size
        step_loss = cb_params.net_outputs
        if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
            step_loss = step_loss[0]
        if isinstance(step_loss, Tensor):
            step_loss = np.mean(step_loss.asnumpy())
        step_mseconds = (time.time() - self.step_time) * 1000
        print("epoch: {:3d}/{:3d}, step:{:5d}/{:5d}, loss:{:5.3f}, per step time:{:5.3f} ms".format(
                self.cur_epoch, self.epochs, self.cur_step, self.dataset_size, step_loss, step_mseconds), flush=True)
    def epoch_begin(self, run_context):
        self.step_time = time.time()

    def epoch_end(self, run_context):
        self.cur_epoch += 1
        args = self.args
        cb_params = run_context.original_args()

        step_seconds = (time.time() - self.step_time)

        num_steps = cb_params.cur_step_num
        current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        self.print(
            f"Epoch: {cb_params.cur_epoch_num}, Time: {step_seconds:2f}, Steps: {num_steps:.4f}, TimeStamp: {current_time}")
        # file_path = os.path.join(args.output_dir, f'print_rank_{args.rank}.log')
        # with open(file_path, 'r') as f:
        #     content = f.read()
        #
        # # matches = re.findall(r'value=([^)]+)\)', content)
        # matches = re.findall(r"""\*\*loss=\s*Tensor\(shape=\[\],\s?dtype=Float32,\s?value=(-?[0-9.]+(e[0-9\-]+)?)\)""", content)
        # loss = [float(m[0]) for m in matches]
        # loss_ema = compute_ema(loss)
        # cur_epoch_loss = np.mean(loss[-num_steps:])
        # self.epoch_losses.append(cur_epoch_loss)
        # epoch_losses_ema = compute_ema(self.epoch_losses)
        # self.print(f"Epoch: {cb_params.cur_epoch_num}, Time: {step_seconds:2f}, Loss: {cur_epoch_loss:.4f}, rank: {self.rank}")

        # if self.args.rank == 0:
        #     #plt.ylim([0, 0.1])
        #     plt.plot(loss, label='loss')
        #     plt.plot(loss_ema, label='loss_ema')
        #     plt.legend()
        #     plt.title('loss_step')
        #     plt.savefig(os.path.join(args.output_dir, f'loss_step.png'))
        #     plt.close()
        #
        #     #plt.ylim([0, 0.1])
        #     plt.plot(self.epoch_losses, label='loss')
        #     plt.plot(epoch_losses_ema, label='loss_ema')
        #     plt.legend()
        #     plt.title('loss_epoch')
        #     plt.savefig(os.path.join(args.output_dir, f'AEpoch{cb_params.cur_epoch_num}_loss_epoch.png'))
        #     plt.close()
        #
