
import logging
import os
from typing import Any, Dict, List, Optional, Union

import torch
import torch_npu

import pytorch_lightning as pl
from pytorch_lightning.accelerators.accelerator import Accelerator
from lightning_npu.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import _DEVICE

_log = logging.getLogger(__name__)


class NPUAccelerator(Accelerator):
    """Accelerator for NPU devices."""

    def setup_environment(self, root_device: torch.device) -> None:
        """
        Raises:
            MisconfigurationException:
                If the selected device is not NPU.
        """
        super().setup_environment(root_device)
        if root_device.type != "npu":
            raise MisconfigurationException(f"Device should be NPU, got {root_device} instead")
        if root_device.index is None:
            root_device = torch.device("npu:0")
        torch.npu.set_device(root_device)

    def setup(self, trainer: "pl.Trainer") -> None:
        # TODO refactor input from trainer to local_rank @four4fish
        self.set_npu_flags(trainer.local_rank)
        # clear cache before training
        torch.npu.empty_cache()

    @staticmethod
    def set_npu_flags(local_rank: int) -> None:
        # set the correct cuda visible devices (using pci order)
        all_npu_ids = ",".join(str(x) for x in range(torch.npu.device_count()))
        devices = os.getenv("CUDA_VISIBLE_DEVICES", all_npu_ids)
        _log.info(f"LOCAL_RANK: {local_rank} - NPU_VISIBLE_DEVICES: [{devices}]")

    def get_device_stats(self, device: _DEVICE) -> Dict[str, Any]:
        """Gets stats for the given NPU device.

        Args:
            device: NPU device for which to get stats

        Returns:
            A dictionary mapping the metrics to their values.

        """
        return torch.npu.memory_stats(device)

    @staticmethod
    def parse_devices(devices: Union[int, str, List[int]]) -> Optional[List[int]]:
        """Accelerator device parsing logic."""
        return device_parser.parse_npu_ids(devices)

    @staticmethod
    def get_parallel_devices(devices: List[int]) -> List[torch.device]:
        """Gets parallel devices for the Accelerator."""
        return [torch.device("npu", i) for i in devices]

    @staticmethod
    def auto_device_count() -> int:
        """Get the devices when set to auto."""
        return torch.npu.device_count()

    @staticmethod
    def is_available() -> bool:
        return torch.npu.device_count() > 0

    @classmethod
    def register_accelerators(cls, accelerator_registry: Dict) -> None:
        accelerator_registry.register(
            "npu",
            cls,
            description=f"{cls.__class__.__name__}",
        )
