# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import sys
import warnings
from importlib.util import find_spec
from typing import Any, Callable, Dict, Optional, Tuple
import time
import pynvml
import torch
import psutil
import os

from omegaconf import DictConfig

from fast3r.utils import pylogger, rich_utils

log = pylogger.RankedLogger(__name__, rank_zero_only=True)

def get_gpu_processes(pname='python'):
    """
    Checks for Python processes currently using an NVIDIA GPU, excluding the current process.

    Returns:
        A list of dictionaries, where each dictionary contains information
        about a Python process found on a GPU. Returns an empty list if none are found.
        Example:
        [
            {
                'pid': 1768748,
                'name': 'python',
                'gpu_index': 0,
                'used_gpu_memory_mb': 22286
            }
        ]
    """
    python_processes_on_gpu = []
    
    try:
        pynvml.nvmlInit()
        handle = pynvml.nvmlDeviceGetHandleByIndex(0)
        # We check for compute processes, as this is the typical use case for ML/DL.
        procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
        
        for p in procs:
            try:
                # We need psutil to get the process name from its PID.
                proc_info = psutil.Process(p.pid)
                
                # IMPORTANT: Exclude the current script from the check.
                if p.pid == os.getpid():
                    continue
                
                # Check if the process name is 'python' or 'python3'.
                if pname in proc_info.name().lower():
                    python_processes_on_gpu.append({
                        'pid': p.pid,
                        'name': proc_info.name(),
                        'used_gpu_memory_GB': p.usedGpuMemory // (1024**3) # Bytes to MiB
                    })

            except (psutil.NoSuchProcess, psutil.AccessDenied):
                # Process might have ended, or we don't have permission to inspect it.
                continue

    except pynvml.NVMLError as e:
        print(f"NVML Error: {e}. Is an NVIDIA driver installed and running?")
    finally:
        # Ensure NVML is shut down properly.
        try:
            pynvml.nvmlShutdown()
        except pynvml.NVMLError:
            pass

    return python_processes_on_gpu

def wait_for_python_gpu_processes(poll_interval_minutes: float = 3):
    """
    Waits until no other Python processes are using any NVIDIA GPU.

    This function periodically checks for Python processes running on any GPU
    and waits until all of them have completed. It ignores the current
    process running this script.

    Args:
        poll_interval_minutes (int): The interval in minutes to wait between checks.
    """
    while True:
        # Get a list of other python processes on the GPU.
        other_py_procs = get_gpu_processes(pname='python')

        if not other_py_procs:
            print("-> No other Python processes found on the GPU. It is safe to proceed.")
            break
        else:
            # Construct a clear message showing what we're waiting for.
            # pids = [p['pid'] for p in other_py_procs]
            print(
                f"-> Waiting for other Python GPU process(es) to finish. "
                f"Found PIDs: {other_py_procs}. "
                f"Checking again in {poll_interval_minutes} minutes."
            )
            time.sleep(poll_interval_minutes*60)

def wait_for_gpu_memory(target_usage_gb: int = 20, poll_interval_minutes: float = 1):
    """Waits until the GPU memory usage on device 0 is below a specified threshold.

    This function is designed for a single-host, single-GPU setup.

    Args:
        target_usage_gb (int): The target GPU memory usage in Gigabytes (GB).
                               The script will wait until usage is below this value.
        poll_interval_minutes (int): The interval in minutes to wait between checks.
    """
    if not torch.cuda.is_available():
        log.warning("CUDA not available. Skipping GPU memory check.")
        return

    try:
        pynvml.nvmlInit()
        # Assuming a single GPU, so we get a handle to device 0
        handle = pynvml.nvmlDeviceGetHandleByIndex(0)
        threshold_bytes = target_usage_gb * (1024**3)

        while True:
            mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            used_gb = mem_info.used / (1024**3)

            if mem_info.used < threshold_bytes:
                log.warning(
                    f"GPU memory usage is {used_gb:.2f} GB, which is below the "
                    f"{target_usage_gb} GB threshold. Proceeding with training."
                )
                break
            else:
                log.warning(
                    f"Waiting for GPU memory to become available. "
                    f"Current usage: {used_gb:.2f} GB. "
                    f"Target: < {target_usage_gb} GB. "
                    f"Checking again in {poll_interval_minutes} minutes."
                )
                time.sleep(poll_interval_minutes * 60)
    except pynvml.NVMLError as e:
        log.error(f"An NVML error occurred: {e}. Could not monitor GPU memory. "
                  "Proceeding without memory check.")
    finally:
        try:
            pynvml.nvmlShutdown()
        except pynvml.NVMLError:
            pass  # Can be ignored if already shut down or failed to init

def extras(cfg: DictConfig) -> None:
    """Applies optional utilities before the task is started.

    Utilities:
        - Ignoring python warnings
        - Setting tags from command line
        - Rich config printing

    :param cfg: A DictConfig object containing the config tree.
    """
    # return if no `extras` config
    if not cfg.get("extras"):
        log.warning("Extras config not found! <cfg.extras=null>")
        return

    # disable python warnings
    if cfg.extras.get("ignore_warnings"):
        log.info("Disabling python warnings! <cfg.extras.ignore_warnings=True>")
        warnings.filterwarnings("ignore")

    # prompt user to input tags from command line if none are provided in the config
    if cfg.extras.get("enforce_tags"):
        log.info("Enforcing tags! <cfg.extras.enforce_tags=True>")
        rich_utils.enforce_tags(cfg, save_to_file=True)

    # pretty print config tree using Rich library
    if cfg.extras.get("print_config"):
        log.info("Printing config tree with Rich! <cfg.extras.print_config=True>")
        rich_utils.print_config_tree(cfg, resolve=True, save_to_file=True)


def task_wrapper(task_func: Callable) -> Callable:
    """Optional decorator that controls the failure behavior when executing the task function.

    This wrapper can be used to:
        - make sure loggers are closed even if the task function raises an exception (prevents multirun failure)
        - save the exception to a `.log` file
        - mark the run as failed with a dedicated file in the `logs/` folder (so we can find and rerun it later)
        - etc. (adjust depending on your needs)

    Example:
    ```
    @utils.task_wrapper
    def train(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]:
        ...
        return metric_dict, object_dict
    ```

    :param task_func: The task function to be wrapped.

    :return: The wrapped task function.
    """

    def wrap(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]:
        # execute the task
        try:
            metric_dict, object_dict = task_func(cfg=cfg)

        # things to do if exception occurs
        except Exception as ex:
            # save exception to `.log` file
            log.exception("")

            # some hyperparameter combinations might be invalid or cause out-of-memory errors
            # so when using hparam search plugins like Optuna, you might want to disable
            # raising the below exception to avoid multirun failure
            raise ex

        # things to always do after either success or exception
        finally:
            # display output dir path in terminal
            log.info(f"Output dir: {cfg.paths.output_dir}")

            # always close wandb run (even if exception occurs so multirun won't fail)
            if find_spec("wandb"):  # check if wandb is installed
                import wandb

                if wandb.run:
                    log.info("Closing wandb!")
                    wandb.finish()

        return metric_dict, object_dict

    return wrap


def get_metric_value(metric_dict: Dict[str, Any], metric_name: Optional[str]) -> Optional[float]:
    """Safely retrieves value of the metric logged in LightningModule.

    :param metric_dict: A dict containing metric values.
    :param metric_name: If provided, the name of the metric to retrieve.
    :return: If a metric name was provided, the value of the metric.
    """
    if not metric_name:
        log.info("Metric name is None! Skipping metric value retrieval...")
        return None

    if metric_name not in metric_dict:
        print(list(metric_dict.keys()))
        raise Exception(
            f"Metric value not found! <metric_name={metric_name}>\n"
            "Make sure metric name logged in LightningModule is correct!\n"
            "Make sure `optimized_metric` name in `hparams_search` config is correct!"
        )

    metric_value = metric_dict[metric_name].item()
    log.info(f"Retrieved metric value! <{metric_name}={metric_value}>")

    return metric_value
