"""Cluster Environment"""
import numpy as np


class ClusterEnvironment:
    def __init__(self, device_mesh, mesh_alpha, mesh_beta, memory_per_device, solver_option=None):
        self.device_mesh = np.array(device_mesh)
        self.mesh_alpha = mesh_alpha # [1, 1]
        self.mesh_beta = mesh_beta # [1, 1]
        assert len(self.mesh_alpha) == len(self.device_mesh.shape)
        assert len(self.mesh_beta) == len(self.device_mesh.shape)
        self.memory_per_device = memory_per_device
        self.all_gather_penalty = 0
        self.all_reduce_penalty = 0
        self.reduce_scatter_penalty = 0
        self.partial_reduction_penalty = 10
        self.num_devices = np.prod(self.device_mesh.shape)

        self.force_all_gather_cost = None
        self.force_all_reduce_cost = None
        self.force_reduce_scatter_cost = None

        if solver_option:
            self.force_all_gather_cost = solver_option.force_all_gather_cost
            self.force_all_reduce_cost = solver_option.force_all_reduce_cost
            self.force_reduce_scatter_cost = solver_option.force_reduce_scatter_cost

    # 在特定网格维度上执行 all-gather 操作的通信代价
    # num_bytes -- 传输的数据量
    # mesh_dim -- 在哪个网格维度执行all-gather
    def all_gather_cost(self, num_bytes, mesh_dim=0):
        if self.force_all_gather_cost:
            return self.force_all_gather_cost

        num_devices = self.device_mesh.shape[mesh_dim]
        return ((int(
            self.mesh_alpha[mesh_dim] +
            self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices * num_bytes) + 0.1) +
                self.all_gather_penalty)

    def all_reduce_cost(self, num_bytes, mesh_dim=0):
        if self.force_all_reduce_cost:
            return self.force_all_reduce_cost

        num_devices = self.device_mesh.shape[mesh_dim]
        return (int(self.mesh_alpha[mesh_dim] +
                    self.mesh_beta[mesh_dim] * 2 * (num_devices - 1) / num_devices * num_bytes) +
                0.01) + self.all_reduce_penalty

    def reduce_scatter_cost(self, num_bytes, mesh_dim=0):
        if self.force_reduce_scatter_cost:
            return self.force_reduce_scatter_cost

        num_devices = self.device_mesh.shape[mesh_dim]
        return (int(self.mesh_alpha[mesh_dim] +
                    self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices * num_bytes) +
                0.001)

    def all_to_all_cost(self, num_bytes, mesh_dim=0):
        num_devices = self.device_mesh.shape[mesh_dim]
        penalty_factor = 1.5
        return (int(self.mesh_alpha[mesh_dim] +
                    self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices /
                    num_devices * num_bytes * penalty_factor) +
                0.001)

    def resharding_cost(self, tensor_shape, src_spec, dst_spec):
        if src_spec == dst_spec:
            return 0

        tensor_size = compute_bytes(tensor_shape)
        cost = 0

        # === 数据并行 DP 部分 ===
        if src_spec.data_parallel_degree != dst_spec.data_parallel_degree:
            if src_spec.data_parallel_mesh_dim == dst_spec.data_parallel_mesh_dim:
                cost += self.all_reduce_cost(tensor_size, src_spec.data_parallel_mesh_dim)
            else:
                cost += self.all_to_all_cost(tensor_size)

        elif src_spec.data_parallel_mesh_dim != dst_spec.data_parallel_mesh_dim:
            cost += self.all_to_all_cost(tensor_size)

        # === 张量并行 TP 部分 ===
        if src_spec.tensor_parallel_degree != dst_spec.tensor_parallel_degree:
            if src_spec.tensor_parallel_mesh_dim == dst_spec.tensor_parallel_mesh_dim:
                cost += self.all_gather_cost(tensor_size, src_spec.tensor_parallel_mesh_dim)
            else:
                cost += self.all_to_all_cost(tensor_size)

        elif src_spec.tensor_parallel_mesh_dim != dst_spec.tensor_parallel_mesh_dim:
            cost += self.all_to_all_cost(tensor_size)

        return cost


def compute_bytes(shape):
    # (3, 4, 5) -- 3 * 4 * 5 * 4 bytes
    # 每个元素占4字节
    return np.prod(shape) * 4


