
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union,Any
from geesibling.core.lib._graph import DataType
import numpy as np
from enum import Enum
@dataclass()
class AbstractTensor:
    shape:List = None
    dtype:DataType=None

@dataclass()
class DeviceMesh:
    device_type:str
    mesh: np.array
    mesh_dim_names:Optional[Tuple[str, ...]] = None

    # 使其可哈希
    def __hash__(self):
        # lazily compute hash
        self._hash = getattr(self, "_hash", None)
        if not self._hash:
            self._hash = hash(
                (
                    self.mesh.shape,
                    self.device_type,
                )
            )
        return self._hash
    def __eq__(self, other: object) -> bool:
        if not isinstance(other, DeviceMesh):
            return False
        if id(self) == id(other):
            return True
        else:
            return (
                self.mesh.shape == other.mesh.shape
                and self.device_type == other.device_type
            )

@dataclass()
class Placement:
    name:str
    dim:int = None
    reduce_op:str = None
    
    def is_replicate(self) -> bool:
        return isinstance(self, Replicate)
    def is_shard(self, dim: Optional[int] = None) -> bool:
        is_shard_instance = isinstance(self, Shard)
        if dim is not None and is_shard_instance:
            return cast(Shard, self).dim == dim
        else:
            return is_shard_instance
    def is_partial(self) -> bool:
        return isinstance(self, _Partial)

class Replicate(Placement):
    def __init__(self):
        self.name = 'Replicate()'
    def __str__(self) -> str:
        """
        human readable representation of the Replicate placement
        """
        return "R"

class Shard(Placement):
    def __init__(self,dim:int):
        self.name = 'Shard({})'.format(dim)
        self.dim = dim
    def __str__(self) -> str:
        """human readable representation of the Shard placement"""
        return f"S({self.dim})"

class _Partial(Placement):
    def __init__(self,reduce_op='SUM'):
        self.name = '_Partial({})'.format(reduce_op)
        self.reduce_op = reduce_op
    def __str__(self) -> str:
        """
        human readable representation of the Partial placement
        """
        return "P"
        

@dataclass()
class DTensorSpec:
    mesh: DeviceMesh
    placements: Placement   #为什么是tuple
    tensor_meta: Optional[AbstractTensor] = None

    def is_replicated(self):
        """
        return True if the current DTensorSpec replicates on all mesh dims (devices)
        """
        return all(placement.is_replicate() for placement in self.placements)
    @property
    def num_shards(self) -> int:
        num_shards = 1
        for i, placement in enumerate(self.placements):
            if placement.is_shard():
                # num_shards *= self.mesh.size(i)
                # print("gpu数量")
                # print(self.mesh.mesh.shape[-(i+1)])
                num_shards *= self.mesh.mesh.shape[-(i+1)]
        return num_shards
    @property
    def shape(self):
        if self.tensor_meta is None:
            raise ValueError("tensor_meta is not set")
        return self.tensor_meta.shape

def _pretty_print_spec(spec: object) -> str:
    if spec is None:
        return "None"
    elif isinstance(spec, DTensorSpec):
        return "".join([str(p) for p in spec.placements])
    elif isinstance(spec, Sequence):
        return "(" + ", ".join([_pretty_print_spec(s) for s in spec]) + ")"
    else:
        raise RuntimeError(f"Unknown spec type to print: spec={spec}")

@dataclass()
class PlacementStrategy:
    # 得到每个node的PlacementStrategy
    output_specs: DTensorSpec
    input_specs: Optional[Sequence[DTensorSpec]] = None
    redistribute_cost: Optional[List[List[float]]] = None

    def __str__(self) -> str:
        if self.input_specs is not None:
            input_specs_str = f"{_pretty_print_spec(self.input_specs)} -> "

        else:
            input_specs_str = ""
        output_spec_str = _pretty_print_spec(self.output_specs)
        return f"{input_specs_str}{output_spec_str}"


class OpStrategy:
    """
    OpStrategy that consists of a list of placement strategies associated with the op
    """

    def __init__(self, strategies: List[PlacementStrategy]) -> None:
        super().__init__()
        self.strategies: List[PlacementStrategy] = strategies

    def __str__(self) -> str:
        strategy_list_str = ", ".join([str(strategy) for strategy in self.strategies])
        mesh_shape = self.output_mesh_shape
        return f"[{strategy_list_str}] @ mesh: {mesh_shape}"

    @property
    def output_mesh_shape(self):
        output_spec = self.strategies[0].output_specs
        if isinstance(output_spec, DTensorSpec):
            return output_spec.mesh.shape
        else:
            assert isinstance(
                output_spec, tuple
            ), "found no DTensorSpec in the OpStrategy!"
            assert output_spec[0] is not None
            return output_spec[0].mesh.shape
class OperationDataType(Enum):
    """
    An operation can come from the argument list of an operator or the parameter list of a module.
    """
    INPUT = 0
    ARG = 1
    PARAM = 2
    BUFFER = 3
    OUTPUT = 4


@dataclass
class OperationData:
    """
    OperationData is the data related to an operator, the data can be the operand or the output.

    Args:
        name (str): the name of the operation-related data
        type (OperationDataType): the type of the operation data
        data (Any): the value for this data, usually it is a meta tensor.
        logical_shape (Tuple[int]): the logical shape of the data, it can be different from the its actual shape in memory.
    """
    name: str
    type: OperationDataType
    data: Any   #AbstractTensor(dtype,shape)

    def __repr__(self) -> str:
        return f'OperationData(name={self.name}, type={self.type})'
    def __repr__(self) -> str:
        return f'OperationData(name={self.name})'

    def __hash__(self) -> int:
        return hash(f'{self.name}')
@dataclass
class ShardingStrategy:
    """
    ShardingStrategy is a dataclass to store the meta information on tensor sharding for a node.

    Args:
        name (str): express the sharding strategies in string, such as '(R, R) -> R'.
        output_sharding_spec (ShardingSpec): ShardingSpec of the output node.
        compute_cost (TrainCycleItem): Computation cost to complete this strategy. (default to None)
        communication_cost (TrainCycleItem): Communication cost to complete this strategy. (default to None)
        memory_cost (TrainCycleItem): Memory cost of the output node using this strategy. (default to None)
        input_sharding_specs (List(ShardingSpec)): The ShardingSpecs of the input nodes.
    """
    name: str
    sharding_specs: PlacementStrategy = None
    compute_cost: int = None
    memory_cost: int = None
    communication_cost:int = None
    # communication_actions: Dict[OperationData, CommAction] = None
    # redistribute_costs: Dict[Node, List] = None
    redistribute_costs: Dict[str, List] = None