"""This module provides functions for checking GPU resource information
   and acting depends on the resources availability.

- get_gpu_usage
- wait_for_gpu
"""

import time
import random
import subprocess
from typing import List, NamedTuple, Optional, Callable

from common.logger import logger


class GPUInfo(NamedTuple):
    """
    GPU Information with its memory and utilization.

    Attributes
    ----------
    uuid: str
        unique id for the GPU

    ind : int
        index number for the GPU

    total_memory : int
        total GPU memory in MiB

    free_memory : int
        free GPU memory in MiB

    gpu_utilization: float
        current GPU utilization in 0.0 - 1.0
    """

    uuid: str
    ind: int
    total_memory: int
    free_memory: int
    gpu_utilization: float


def get_gpu_usage(raise_on_error: bool = False) -> List[GPUInfo]:
    """Get list of GPUs and their usage

    Parameters
    ----------
    raise_on_error : bool, optional
        True to raise Exception when error, False to suppress error and return an empty list.
        Default is False.

    Returns
    -------
    List[GPUInfo]
        List of GPUInfo, see `GPUInfo` for fields.

    Raises
    ------
    Exception
        raise Exception when error happen in fetching the GPU information, and the `raise_on_error` argument is True.
    """
    commands = [
        "nvidia-smi",
        "--format=csv,noheader,nounits",
        "--query-gpu=gpu_uuid,memory.total,memory.free,utilization.gpu",
    ]
    try:
        ret = subprocess.run(commands, capture_output=True)
    except Exception as err:
        if raise_on_error:
            raise
        else:
            logger.warning(
                "Fail to extract gpu information because of {}".format(str(err))
            )
            return []

    res = []
    try:
        str_res = ret.stdout.decode("utf-8").strip()
        for ind, line in enumerate(str_res.split("\n")):
            lst_vars = line.split(",")
            gpu_info = GPUInfo(
                uuid=lst_vars[0].strip(),
                ind=ind,
                total_memory=int(lst_vars[1]),
                free_memory=int(lst_vars[2]),
                gpu_utilization=float(lst_vars[3]) / 100,
            )
            res.append(gpu_info)
    except Exception as err:
        if raise_on_error:
            raise
        else:
            logger.warning(
                "Fail to extract gpu information because of {}".format(str(err))
            )
            return []
    else:
        return res


def check_one_gpu(
    gpu_id: int = 0,
) -> Optional[GPUInfo]:
    """Check the usage of a specific GPU given the `gpu_id`.

    Parameters
    ----------
    gpu_id: int
        The index of GPU. Default: 0.

    Returns
    -------
    Optional[GPUInfo]
        Utilization information of the GPU with index `gpu_id`.
    """
    lst_gpus = get_gpu_usage()
    if not lst_gpus:
        logger.error("None GPU available.")
        return None
    for gpu_info in lst_gpus:
        if gpu_info.ind == gpu_id:
            return gpu_info
    return None


def wait_for_gpu(
    n_gpu: int = 1,
    memory: Optional[int] = None,
    utilization: Optional[float] = None,
    filter_func: Optional[Callable[[GPUInfo], bool]] = None,
    prefer: Optional[str] = None,
    timeout: int = 120,
    wait_per_try: int = 5,
) -> Optional[List[GPUInfo]]:
    """Request and wait for available GPU meeting specified requirements

    Parameters
    ----------
    n_gpu : int, optional
        Number of GPU requested, by default 1.
    memory : Optional[int], optional
        Minimum free memory expected for each GPU in MiB, by default None.
    utilization : Optional[float], optional
        Maximum GPU utilization expected for each GPU, as a float in 0.0 to 1.0, by default None.
    filter_func : Optional[Callable[[GPUInfo, bool]]], optional
        Additional filter function, as a function taking a GPUInfo object and return a bool,
        by default None.
    prefer : Optional[str], optional
        Strategy to choose GPU, one of the following:
          - "random": randomly choose
          - "memory": prefer larger free memory
          - "utilization": prefer less gpu utilization
          - None, just use the default ordering, which is the default strategy to use
    timeout : int, optional
        Time in second to wait for the GPU, by default 120 sec.
    wait_per_try : int, optional
        Time in second to wait between each check of GPU information, by default 5 sec.

    Returns
    -------
    Optional[List[GPUInfo]]
        - None, if there is no matching GPU.
        - List[GPUInfo], if there are enough GPUs matching request requirement.
    """
    total_time_cost = 0
    n_tries = 0

    while total_time_cost <= timeout:
        wait_time = n_tries * wait_per_try
        time.sleep(wait_time)
        total_time_cost += wait_time

        lst_gpus = get_gpu_usage()
        if not lst_gpus:
            return None

        total_gpu = len(lst_gpus)
        if n_gpu > total_gpu:
            logger.warning(
                f"requesting more than available gpus, at most {total_gpu} can be provided"
            )
            n_gpu = total_gpu

        if memory is not None:
            lst_gpus = [
                gpu_info for gpu_info in lst_gpus if (gpu_info.free_memory >= memory)
            ]
        if utilization is not None:
            lst_gpus = [
                gpu_info
                for gpu_info in lst_gpus
                if (gpu_info.gpu_utilization <= utilization)
            ]
        if filter_func is not None:
            lst_gpus = [gpu_info for gpu_info in lst_gpus if filter_func(gpu_info)]

        if n_gpu > len(lst_gpus):
            logger.warning("not enough gpus meeting the requirements")
            n_tries += 1
            continue
        elif n_gpu == len(lst_gpus):
            return lst_gpus

        if len(lst_gpus) == 1:
            return lst_gpus

        if prefer is None:
            pass
        elif prefer == "random":
            random.shuffle(lst_gpus)
        elif prefer == "memory":
            lst_gpus = sorted(
                lst_gpus, key=lambda gpu_info: gpu_info.free_memory, reverse=True
            )
        elif prefer == "utilization":
            lst_gpus = sorted(lst_gpus, key=lambda gpu_info: gpu_info.gpu_utilization)
        else:
            logger.warning(f"unknown `prefer` {prefer}, treat as no preference")

        return lst_gpus[:n_gpu]
    return None
