import os
import sys
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
import numpy as np
import logging
from pathlib import Path
import datetime

def set_seeds(seed):
    random.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    cudnn.deterministic = True

def resume_from_ckpt(model, optimizer, ckpt_file, device):
    if os.path.isfile(ckpt_file):
        logging.info("=> loading checkpoint '{}'".format(ckpt_file))
        device = list(model.parameters())[0].device
        ckpt = torch.load(ckpt_file, map_location=device)
        start_epoch = ckpt['epoch']
        best_acc1 = ckpt['best_acc1']
        cur_iter = ckpt['cur_iter']

        if isinstance(model, torch.nn.DataParallel):
            r = model.module.load_state_dict(ckpt['state_dict'])
        else:
            r = model.load_state_dict(ckpt['state_dict'])
        logging.info(str(r))
        if optimizer is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            logging.info(f"=> loaded checkpoint '{ckpt_file}' (epoch {start_epoch})")
        
        return best_acc1, start_epoch, cur_iter
    else:
        logging.error("=> no checkpoint found at '{}'".format(ckpt_file))

def set_logger(log_file=None, rank=0):
    logFormatter = logging.Formatter(f"%(asctime)s;%(levelname)s;Rank:{rank}; %(message)s", "%Y-%m-%d %H:%M:%S")
    rootLogger = logging.getLogger()

    # set FileHandler
    if log_file is not None:
        fileHandler = logging.FileHandler(log_file)
        fileHandler.setFormatter(logFormatter)
        rootLogger.addHandler(fileHandler)
    
    # set StreamHandler
    consoleHandler = logging.StreamHandler(sys.stdout)
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)
    
    # set verbosity level
    rootLogger.setLevel(logging.INFO)

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self, name, fmt=':f'):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        logging.info('\t'.join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = '{:' + str(num_digits) + 'd}'
        return '[' + fmt + '/' + fmt.format(num_batches) + ']'

class Saver:
    def __init__(self, exp_name, root='./runs', exist_ok=False):
        self.save_dir = self._get_save_dir(exp_name, root)
        self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
    
    def _get_save_dir(self, exp_name, root='./runs'):
        t = datetime.datetime.now()
        t = t.strftime('%Y-%m-%d-%H:%M:%S')
        return Path(root) / f'{exp_name}_{t}'

class LabelSmoothing(nn.Module):
    """
    NLL loss with label smoothing.
    """

    def __init__(self, smoothing=0.0):
        """
        Constructor for the LabelSmoothing module.

        :param smoothing: label smoothing factor
        """
        super(LabelSmoothing, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing

    def forward(self, x, target):
        logprobs = torch.nn.functional.log_softmax(x, dim=-1)

        nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
        nll_loss = nll_loss.squeeze(1)
        smooth_loss = -logprobs.mean(dim=-1)
        loss = self.confidence * nll_loss + self.smoothing * smooth_loss
        return loss.mean()

class CosAnnealWarmup(LambdaLR):
    def __init__(self, optimizer, warmup, epoch_max, min_ratio=0, last_epoch=-1, verbose=False):
        def lr_lambda(epoch):
            if epoch < warmup:
                return (epoch + 1) / warmup
            else:
                e = epoch - warmup
                es = epoch_max - warmup
                return (1 / (2 + min_ratio)) * (1 + min_ratio + np.cos(np.pi * e / es))
        super().__init__(optimizer, lr_lambda, last_epoch=-1, verbose=False)