import logging
import os
from typing import Dict, List, Optional, Any, Union

import torch
import torch_npu
import torch.distributed

import pytorch_lightning as pl
from pytorch_lightning.overrides.torch_distributed import _object_to_tensor, _rank_not_in_group, _tensor_to_object
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.ddp import DDPStrategy
from pytorch_lightning.utilities.distributed import group as _group, sync_ddp_if_available, ReduceOp
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.distributed import get_rank, broadcast

from lightning_npu.plugins.io.npu_plugin import NPUCheckpointIO


log = logging.getLogger(__name__)


class NPUParallelStrategy(DDPStrategy):
    """Strategy for distributed training on multiple NPU devices."""

    strategy_name = "npu_parallel"

    def __init__(
        self,
        accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None,
        parallel_devices: Optional[List[torch.device]] = None,
        checkpoint_io: Optional[CheckpointIO] = None,
        precision_plugin: Optional[PrecisionPlugin] = None,
        process_group_backend: Optional[str] = "hccl",
    ) -> None:

        if not torch_npu.npu.is_available():
            raise MisconfigurationException("`NPUParallelStrategy` requires NPU devices to run")

        self.int64_warning_flag = True
        super().__init__(
            accelerator=accelerator,
            parallel_devices=parallel_devices,
            checkpoint_io=checkpoint_io or NPUCheckpointIO(),
            precision_plugin=precision_plugin,
            process_group_backend=process_group_backend,
        )

    def setup_environment(self) -> None:

        os.environ["ID"] = str(self.local_rank)
        if self._process_group_backend == "hccl":
            # this env is used in overrides to check the backend initiated
            os.environ["HCCL_DISTRIBUTED_BACKEND"] = str(1)
        super().setup_environment()


    def broadcast_object_list(self, object_list, src=0, group=_group.WORLD, device=None):
        if _rank_not_in_group(group):
            return

        my_rank = get_rank()
        # Serialize object_list elements to tensors on src rank.
        if my_rank == src:
            tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list])
            object_sizes_tensor = torch.cat(size_list).to(torch.int32)
        else:
            object_sizes_tensor = torch.empty(len(object_list), dtype=torch.int32)

        current_device = torch.device("npu", torch.npu.current_device())
        object_sizes_tensor = object_sizes_tensor.to(current_device)

        # Broadcast object sizes
        broadcast(object_sizes_tensor, src=src, group=group)

        # Concatenate and broadcast serialized object tensors
        if my_rank == src:
            object_tensor = torch.cat(tensor_list)
        else:
            object_tensor = torch.empty(
                torch.sum(object_sizes_tensor).int().item(),
                dtype=torch.int,
            )

        object_tensor = object_tensor.to(current_device).to(torch.int)

        broadcast(object_tensor, src=src, group=group)
        # Deserialize objects using their stored sizes.
        offset = 0
        if my_rank != src:
            for i, obj_size in enumerate(object_sizes_tensor):
                obj_view = object_tensor[offset: offset + obj_size].to(torch.uint8)
                obj_view = obj_view.type(torch.uint8)
                if obj_view.device != torch.device("cpu"):
                    obj_view = obj_view.cpu()
                offset += obj_size
                object_list[i] = _tensor_to_object(obj_view, obj_size)

    def broadcast(self, obj: object, src: int = 0) -> object:  # type: ignore
        obj = [obj]
        if self.global_rank != src:
            obj = [None]

        self.broadcast_object_list(obj, src, group=_group.WORLD)
        return obj[0]

    def teardown(self) -> None:
        log.detail(f"{self.__class__.__name__}: tearing down strategy.")
        super().teardown()

        log.detail(f"{self.__class__.__name__}: moving model to CPU")
        self.lightning_module.cpu()  # type: ignore
        # Was set to local rank
        os.environ.pop("ID", None)
        os.environ.pop("HCCL_DISTRIBUTED_BACKEND", None)

    def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = "mean") -> torch.Tensor:
        """Reduces a tensor from several distributed processes to one aggregated tensor.

        Args:
            tensor: the tensor to sync and reduce
            group: the process group to gather results from. Defaults to all processes (world)
            reduce_op: the reduction operation. Defaults to 'mean'/'avg'.
                Can also be a string 'sum' to calculate the sum during reduction.

        Return:
            reduced value, except when the input was not a tensor the output remains is unchanged
        """
        if isinstance(tensor, torch.Tensor):
            if tensor.dtype == torch.int64:
                # npu 上的 all_reduce 操作不支持torch.int64，需要转换为torch.int
                if self.int64_warning_flag and self.local_rank == 0:
                    log.warning("int64 was not supported by npu in all_reduce ops, change tensor to torch.int")
                    self.int64_warning_flag = False
                tensor = tensor.to(torch.int)
            tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
        return tensor

    @classmethod
    def register_strategies(cls, strategy_registry: Dict) -> None:
        strategy_registry.register(
            cls.strategy_name,
            cls,
            description=f"{cls.__class__.__name__}",
        )

    def load_checkpoint(self, checkpoint_path) -> Dict[str, Any]:
        torch.npu.empty_cache()
        current_device = torch.device("npu", torch.npu.current_device())
        return self.checkpoint_io.load_checkpoint(checkpoint_path, current_device)
