from ..shard.placement_types import DTensorSpec,DeviceMesh
import math
from dataclasses import dataclass
from functools import lru_cache
from typing import List, Optional
from geesibling.core.lib._graph import DataType
@dataclass
class MeshTopoInfo:
    """
    Mesh information for collective cost estimation
    """

    mesh: DeviceMesh
    mesh_dim_devices: List[int]
    mesh_dim_bandwidth: List[float]
    mesh_dim_latency: List[float]

    @staticmethod
    @lru_cache(None)
    def build_from_mesh(device_mesh: DeviceMesh) -> "MeshTopoInfo":
        # Generate mesh topology info for intra-host/inter-host communication pattern
        # Note that we made bunch of assumptions for simplicity:
        # 1. we assume the mesh is homogeneous, and it's gpu/nccl model
        # 2. we assume gpu arch is Ampere or Hopper
        # 3. we assume collectives are all ring base algo for now
        # 删除了mesh是否超出一个节点的gpu数
        # num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type)
        # the base bw number (intra-node), GB/s
        mesh = device_mesh.mesh
        base_bw = 87.7
        mesh_dim_bandwidth = [base_bw] * mesh.ndim
        # the latency in terms of us (intra-node, nv-link)
        mesh_dim_latency = [0.6] * mesh.ndim
        mesh_dim_devices = [1] * mesh.ndim

        total_num_devices = 1
        for mesh_dim in reversed(range(mesh.ndim)):
            # print(mesh.shape)
            # num_devices = mesh.size(mesh_dim)
            num_devices = mesh.shape[mesh_dim]
            mesh_dim_devices[mesh_dim] = num_devices
            total_num_devices *= num_devices
            # 删除了节点间带宽
            # if total_num_devices > num_devices_per_host:
            #     # magic number for inter-host communication bandwidth/latency factor
            #     # This number assumes latest GPU arch, i.e. Ampere or Hopper
            #     # TODO: see if we need to tweak this or offer a way for user
            #     # to specify the bandwidths/latency
            #     mesh_dim_bandwidth[mesh_dim] *= 0.22
            #     # set to ethernet latency for inter-host
            #     mesh_dim_latency[mesh_dim] = 2.7

        return MeshTopoInfo(
            mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency
        )

def spec_to_bytes(spec: DTensorSpec) -> int:
    assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
    dtype_to_itemsize = {
        DataType.I8: 1,    # 8-bit integer
        DataType.I16: 2,   # 16-bit integer
        DataType.I32: 4,   # 32-bit integer
        DataType.I64: 8,   # 64-bit integer
        DataType.F16: 2, # 16-bit float
        DataType.F32: 4, # 32-bit float
        DataType.F64: 8, # 64-bit float
        # 更多数据类型...TODO
    }
    return dtype_to_itemsize[spec.tensor_meta.dtype] * math.prod(spec.tensor_meta.shape)

def allgather_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
    num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
    mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
    num_hops = num_devices_on_mesh_dim - 1
    # base latency + comm latency
    latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]  # us
    bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth  # s
    # return latency + bw * 1e6  # rescale to us
    return latency + bw 


def allreduce_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
    num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
    mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
    # allreduce have almost 2x comm bytes compare to allgather/reduce_scatter
    num_hops = 2 * num_devices_on_mesh_dim - 1

    latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
    bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
    # return latency + bw * 1e6
    return latency + bw

def reduce_scatter_cost(
    bytes_gb: float,
    mesh_topo: MeshTopoInfo,
    mesh_dim: int,
) -> float:
    num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
    mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
    num_hops = num_devices_on_mesh_dim - 1
    # base latency + comm latency
    latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
    bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
    # return latency + bw * 1e6
    return latency + bw

def redistribute_cost(
    current_spec: DTensorSpec,
    target_spec: DTensorSpec,
) -> float:
    """
    This function returns the cost of redistribute from current to target DTensorSpec.

    NOTE:
    1. Only consider communication cost here, since computation costs for redistribute
       are quite trival (i.e. we only need to narrow or simple division)
    2. Only consider redistribute cost on same mesh, cross mesh communication cost is
       not quite needed for operator strategy estimation/selection.
    """
    if not (current_spec.mesh.mesh == target_spec.mesh.mesh).all():
        # make infinite cost if meshes are not same
        # TODO: see if we want to support this once there's cross mesh communication
        return 9999.99
    # 复制到所有切片方式都是0？

    # if current_spec.is_replicated():
    #     # short-cut:
    #     # comm cost is 0 if current spec is already full replication
    #     return 0.0

    # print("current->target")
    # print(current_spec)
    # print(target_spec)
    mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh)
    cost = 0.0
    comm_bytes_gb = (
        spec_to_bytes(current_spec) / current_spec.num_shards / 1024 / 1024  / 8  #mb
    )
    # Transformation that considered for redistribute cost:
    # 1. allgather 2. alltoall
    # 3. allreduce 4. reduce_scatter
    for i, (current, target) in enumerate(
        zip(current_spec.placements, target_spec.placements)
    ):
        if current == target:
            continue
        num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[i]
        if current.is_shard() and target.is_replicate():
            # allgather gives larger comm bytes
            comm_bytes_gb *= num_devices_on_mesh_dim
            # add up allgather comm cost
            cost += allgather_cost(comm_bytes_gb, mesh_topo, i)
        elif current.is_shard() and target.is_shard():
            # should be alltoall comm, since we haven't implement it yet, add penalty
            # to favor allgather instead
            cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + 1.0
        elif current.is_partial() and target.is_replicate():
            # add up allreduce comm cost
            cost = 0.0
            # cost += allreduce_cost(comm_bytes_gb, mesh_topo, i)
        elif current.is_partial() and target.is_shard():
            # add up reduce_scatter comm cost
            cost += reduce_scatter_cost(comm_bytes_gb, mesh_topo, i)
            # after reduce_scatter the comm bytes for further collectives halved.
            comm_bytes_gb /= num_devices_on_mesh_dim
        elif current.is_shard() and target.is_partial():
            # ban shard -> partial as it does not make sense to perform
            # this redistribute
            return 9999.99
        else:
            # 给个变化的约束值
            return 10.0
    return cost