import datetime
import os
import os.path as osp
import torch
from torch.nn import Parameter
from torch.autograd import Variable
import random
import numpy as np
import pickle
import time
from contextlib import contextmanager

# Great idea from https://github.com/amdegroot/ssd.pytorch
def str2bool(v):
  return v.lower() in ("yes", "true", "t", "1")

def tight_float_str(x, fmt='{:.4f}'):
  return fmt.format(x).rstrip('0').rstrip('.')

def time_str(fmt=None):
  if fmt is None:
    fmt='%Y-%m-%d_%H:%M:%S'
  return datetime.datetime.today().strftime(fmt)

class ReDirectSTD(object):
  """Modified from Tong Xiao's `Logger` in open-reid.
  This class overwrites sys.stdout or sys.stderr, so that console logs can
  also be written to file.
  Args:
    fpath: file path
    console: one of ['stdout', 'stderr']
    immediately_visible: If `False`, the file is opened only once and closed
      after exiting. In this case, the message written to file may not be
      immediately visible (Because the file handle is occupied by the
      program?). If `True`, each writing operation of the console will
      open, write to, and close the file. If your program has tons of writing
      operations, the cost of opening and closing file may be obvious. (?)
  Usage example:
    `ReDirectSTD('stdout.txt', 'stdout', False)`
    `ReDirectSTD('stderr.txt', 'stderr', False)`
  NOTE: File will be deleted if already existing. Log dir and file is created
    lazily -- if no message is written, the dir and file will not be created.
  """
  def __init__(self, fpath=None, console='stdout', immediately_visible=False):
    import sys

    assert console in ['stdout','stderr']
    self.console = sys.stdout if console == 'stdout' else sys.stderr
    self.file = fpath
    self.f = None
    self.immediately_visible = immediately_visible
    if fpath is not None:
      if osp.exists(fpath):
        os.remove(fpath)
    if console == 'stdout':
      sys.stdout = self
    else:
      sys.stderr = self

  def __del__(self):
    self.close()
  def __enter__(self):
    pass
  def __exit__(self):
    self.close()
  def write(self,msg):
    self.console.write(msg)
    if self.file is not None:
      may_make_dir(os.path.dirname(osp.abspath(self.file)))
      if self.immediately_visible:
        with open(self.file,'a') as f:
          f.write(msg)
      else:
        if self.f is None:
          self.f = open(self.file,'w')
        self.f.write(msg)
  def flush(self):
    self.console.flush()
    if self.f is not None:
      self.f.flush()
      os.fsync(self.f.fileno())

  def close(self):
    self.console.close()
    if self.f is not None:
      self.f.close()
def set_devices_for_ml(sys_device_ids):
  """This version is for mutual learning.

  It sets some GPUs to be visible and returns some wrappers to transferring
  Variables/Tensors and Modules/Optimizers.

  Args:
    sys_device_ids: a tuple of tuples; which devices to use for each model,
      len(sys_device_ids) should be equal to number of models. Examples:

      sys_device_ids = ((-1,), (-1,))
        the two models both on CPU
      sys_device_ids = ((-1,), (2,))
        the 1st model on CPU, the 2nd model on GPU 2
      sys_device_ids = ((3,),)
        the only one model on the 4th gpu
      sys_device_ids = ((0, 1), (2, 3))
        the 1st model on GPU 0 and 1, the 2nd model on GPU 2 and 3
      sys_device_ids = ((0,), (0,))
        the two models both on GPU 0
      sys_device_ids = ((0,), (0,), (1,), (1,))
        the 1st and 2nd model on GPU 0, the 3rd and 4th model on GPU 1

  Returns:
    TVTs: a list of `TransferVarTensor` callables, one for one model.
    TMOs: a list of `TransferModulesOptims` callables, one for one model.
    relative_device_ids: a list of lists; `sys_device_ids` transformed to
      relative ids; to be used in `DataParallel`
  """
  all_ids = []
  for ids in sys_device_ids:
    all_ids += ids
  unique_sys_device_ids = list(set(all_ids))
  # Set the CUDA_VISIBLE_DEVICES environment variable
  visible_devices = ''
  for i in unique_sys_device_ids:
    visible_devices += '{},'.format(i)
  os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices
  # return wrappers
  # Models and usr defined Variablbes/Tensors would be transferred to the first device.
  relative_device_ids = []
  TVTs,TMOs = [],[]
  for ids in sys_device_ids:
    relative_ids = []
    for id in ids:
      if id != -1:
        id = find_index(unique_sys_device_ids,id)
      relative_ids.append(id)
    relative_device_ids.append(relative_ids)
    #  Models and user defined Variables/Tensors would be transferred to the first device
    TVTs.append(TransferVarTensor(relative_ids[0]))
    TMOs.append(TransferModulesOptims(relative_ids[0]))
  return TVTs,TMOs,relative_device_ids

def may_make_dir(path):
  """
  Args:
    path: a dir, or result of `osp.dirname(osp.abspath(file_path))`
  Note:
    `osp.exists('')` returns `False`, while `osp.exists('.')` returns `True`!
  """
  # This clause has mistakes:
  # if path is None or '':

  if path in [None, '']:
    return
  if not osp.exists(path):
    os.makedirs(path)
def find_index(seq,item):
  for i,x in enumerate(seq):
    if item == x:
      return i
  return -1

def to_scalar(vt):
  """Transform a length-1 pytorch Variable or Tensor to scalar.
  Suppose tx is a torch Tensor with shape tx.size() = torch.Size([1]),
  then npx = tx.cpu().numpy() has shape (1,), not 1."""
  if isinstance(vt, Variable):
    return vt.data.cpu().numpy().flatten()[0]
  if torch.is_tensor(vt):
    return vt.cpu().numpy().flatten()[0]
  raise TypeError('Input should be a variable or tensor')
class TransferVarTensor(object):
  """Return a copy of the input Variable or Tensor on specified device."""
  def __init__(self,device_id=-1):
    self.device_id = device_id
  def __call__(self, var_or_tensor):
    return var_or_tensor.cpu() if self.device_id==-1 \
      else var_or_tensor.cuda(self.device_id)
class TransferModulesOptims(object):
  """Transfer optimizers/modules to cpu or specified gpu."""
  def __init__(self,device_id=-1):
    self.device_id = device_id
  def __call__(self,modules_and_or_optims):
    may_transfer_modules_optims(modules_and_or_optims,self.device_id)
def may_transfer_modules_optims(modules_and_or_optims,device_id=-1):
  """Transfer optimizers/modules to cpu or specified gpu.
   Args:
     modules_and_or_optims: A list, which members are either torch.nn.optimizer
       or torch.nn.Module or None.
     device_id: gpu id, or -1 which means transferring to cpu
   """
  for item in modules_and_or_optims:
    if isinstance(item,torch.optim.Optimizer):
      transfer_optim_state(item.state,device_id=device_id)
    elif isinstance(item,torch.nn.Module):
      if device_id == -1:
        item.cpu()
      else:
        item.cuda(device=device_id)
    elif item is not None:
      print('[Warning] Invalid type {}'.format(item.__class__.__name__))
def transfer_optim_state(state,device_id=-1):
  """Transfer an optimizer.state to cpu or sprcified gpu,which means transferring tensors of the optimizer.state to sprcified device
  The modification is in place for the state
  Args:
      state: An torch.optim.Optimizer.state
      device_id: gpu id, or -1 which means transferring to cpu
      """
  for key, val in state.items():
    if isinstance(val,dict):
      transfer_optim_state(val, device_id=device_id)
    elif isinstance(val, Variable):
      raise RuntimeError("Oops, state[{}] is a Variable!".format(key))
    elif isinstance(val, torch.nn.Parameter):
      raise RuntimeError("Oops, state[{}] is a Parameter!".format(key))
    else:
      try:
        if device_id == -1:
          state[key] = val.cpu()
        else:
          state[key] = val.cuda(device=device_id)
      except:
        pass

def set_seed(seed):
  random.seed(seed)
  print('setting random-seed to {}'.format(seed))
  np.random.seed(seed)
  print('setting np-random-seed to {}'.format(seed))
  torch.backends.cudnn.enabled=False
  print('cudnn.enabled set to {}'.format(torch.backends.cudnn.enabled))
  # set seed for CPU
  torch.manual_seed(seed)
  print('setting torch-seed to {}'.format(seed))
def save_pickle(obj, path):
  '''Create and sanve file.'''
  may_make_dir(osp.dirname(osp.abspath(path)))
  with open(path, 'wb') as f:
    pickle.dump(obj, f, protocol=2)

def load_pickle(path):
  """Check and load pickle object.
    According to this post: https://stackoverflow.com/a/41733927, cPickle and
    disabling garbage collector helps with loading speed."""
  assert  osp.exists(path)
  # gc.disable()
  with open(path, 'rb') as f:
    ret = pickle.load(f)
  # gc.enable()
  return ret

def load_state_dict(model, src_state_dict):
  '''Copy parameters and buffers from 'src_state_dict' into 'model' and its desctndants.
  The 'src_state_dict.keys()' need not exactly match 'model.state_dict().keys()' For dict key mismatch,
  just skip it; for copying error, just output warnings and proceed
  Arguments:
      model: A torch.nn.Module object.
      src_state_dict: A dict containing parameters and persistent buffers.
  Note:
      This is modified from torch.nn.modules.module.load_state_dict(), to make the warnings and errors more detailed.
      '''
  dest_state_dict = model.state_dict()
  for name, param in src_state_dict:
    if name not in dest_state_dict:
      continue
    if isinstance(param, Parameter):
      # backwards compatibility for serialized parameters
      param = param.data
    try:
      dest_state_dict[name].copy_(param)
    except Exception as e:
      print('Warning: Error occurs when copying {}:{}'.format(name, str(e)))

def load_ckpt(modules_optims, ckpt_file, load_to_cpu=True, verbose=True):
  """
  Args:
  :param modules_optims: A list, whose members are either torch.nn.optimizer or torch.nn.Module
  :param ckpt_file: The file path
  :param load_to_cpu: boolean, whether to transform tensors in modules/optimizers to cpu type
  """
  map_location = (lambda  storage, loc:storage) if load_to_cpu else None
  ckpt = torch.load(ckpt_file, map_location=map_location)
  for m, sd in zip(modules_optims, ckpt['state_dicts']):
    m.load_state_dict(sd)
    if verbose:
      print('Resume from ckpt {}, \nepoch {}, \n scores {}'.format(ckpt_file,ckpt['ep'],ckpt['scores']))
    return ckpt['ep'],ckpt['scores']

def save_ckpt(modules_optims, ep, scores, ckpt_file):
  '''Save state_dict's of modules_optimizers to file.
  Args:
  :param modules_optims: A list, which members are either torch.nn.optimizer or torch.nn.Module
  :param ep: the current epoch number
  :param scores:  the performance of current model
  :param ckpt_file:  the file path.
  Note:
  torch.save() reserves device type and id of tenfors to save, so when cpu or your desired gpu, if you change device.
  '''
  state_dicts = [m.state_dict() for m in modules_optims]
  ckpt = dict(state_dicts=state_dicts, ep=ep, scores=scores)
  may_make_dir(osp.dirname(osp.abspath(ckpt_file)))
  torch.save(ckpt, ckpt_file)

def adjust_lr_exp(optimizer, base_lr, ep, total_ep, start_decay_at_ep):
  '''
  Args:
  :param optimizer: a pytorch optimizer object.
  :param base_lr:  starting learning rate.
  :param ep:  current epoch, ep>=1.
  :param total_ep:  total number of epochs to train.
  :param start_decay_at_ep:  start decating at the begining of this epoch.
  '''
  assert ep>=1,"Current epoch number should be >=1"
  if ep < start_decay_at_ep:
    return
  for g in optimizer.parm_group:
    g['lr'] = (base_lr*(0.001**(float(ep+1-start_decay_at_ep)/(total_ep+1-start_decay_at_ep))))
  print('====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))

def adjust_lr_staircase(optimizer, base_lr, ep, decay_at_epochs, factor):
  '''
  Multiplied by a factor at the begining of specified epochs. All parameters in the optimizer share the same learning rate.
  :param optimizer:  a pytorch 'optimizer' object
  :param base_lr:  starting learning rate
  :param ep:  current epoch
  :param decay_at_epochs:
  :param factor:  a number in range (0,1)
  Example:
    base_lr = 1e-3
    decay_at_epochs = [51, 101]
    factor = 0.1
    It means the learning rate starts at 1e-3 and is multiplied by 0.1 at the
    BEGINNING of the 51'st epoch, and then further multiplied by 0.1 at the
    BEGINNING of the 101'st epoch, then stays unchanged till the end of
    training.
  '''
  assert ep>=1,"Current epoch number should be >=1"
  if ep not in decay_at_epochs:
    return
  ind = find_index(decay_at_epochs,ep)
  for g in optimizer.parm_group:
    g['lr'] = base_lr*factor**(ind+1)
  print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))

def may_set_mode(modules_optims, mode):
  assert mode in ['train', 'eval']
  if not is_iterable(modules_optims):
    modules_optims = [modules_optims]
  for m in modules_optims:
    if isinstance(m, torch.nn.Module):
      if mode == 'train':
        m.train()
      else:
        m.eval()
def is_iterable(obj):
  return hasattr(obj, '__len__')

class AverageMeter(object):
  '''Computes and stores the average and current value'''
  def __init__(self):
    self.val = 0
    self.avg = 0
    self.sum = 0
    self.count = 0

  def reset(self):
    self.val = 0
    self.avg = 0
    self.sum = 0
    self.count = 0

  def update(self, val, n=1):
    self.val = val
    self.sum += val * n
    self.count += n
    self.avg = float(self.sum)/(self.count+1e-20)

@contextmanager
def measure_time(enter_msg, verbose=True):
  if verbose:
    st = time.time()
    print(enter_msg)
  yield
  if verbose:
    print('Done, {:.2f}s'.format(time.time()-st))
