import argparse
import omegaconf.dictconfig
import os
import subprocess
import torch
import random
import numpy as np


def dict2namespace(config):
    namespace = argparse.Namespace()
    for key, value in config.items():
        if isinstance(value, dict) or isinstance(value, omegaconf.dictconfig.DictConfig):
            new_value = dict2namespace(value)
        else:
            new_value = value
        setattr(namespace, key, new_value)
    return namespace


def namespace2dict(config):
    conf_dict = {}
    for key, value in vars(config).items():
        if isinstance(value, argparse.Namespace):
            conf_dict[key] = namespace2dict(value)
        else:
            conf_dict[key] = value
    return conf_dict

def select_max_memory_gpus(num_gpus=1):
  # Run nvidia-smi command to get GPU memory information
  result = subprocess.run(
    'nvidia-smi --query-gpu=memory.free --format=csv,noheader,nounits',
    shell=True, capture_output=True, text=True
  )

  # Check if the command was successful
  if result.returncode != 0:
    raise RuntimeError(f"nvidia-smi command failed: {result.stderr}")

  # Parse the free memory values from the output
  free_memory = [int(x) for x in result.stdout.split('\n') if x]

  sorted_indices = sorted(range(len(free_memory)), key=lambda i: free_memory[i], reverse=True)
  selected_gpus = sorted_indices[:num_gpus]
  selected_gpus_memory = [free_memory[i] for i in selected_gpus]

  print(f"Selecting GPUs {selected_gpus} with free memory {selected_gpus_memory}")

  # Set the selected GPUs as the only visible devices
  os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, selected_gpus))

  # Calculate the number of visible GPUs
  visible_device_count = torch.cuda.device_count()
  print(f"Number of visible GPUs: {visible_device_count}")

  return selected_gpus, visible_device_count


def set_seed(seed=0):
    os.environ['PYTHONHASHSEED'] = str(seed)

    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

def save_checkpoint(save_dir, state, is_best, filename=None):
    """
    Save the latest and best training model
    """
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    filename = 'checkpoint_latest.tar' if filename is None else filename
    filename = os.path.join(save_dir, filename)
    torch.save(state, filename)
    if is_best:
        filename = os.path.join(save_dir, 'checkpoint_best.tar')
        torch.save(state, filename)



def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count