from abc import ABC, abstractmethod
from typing import Dict, List, Tuple, Union
from ..shard.placement_types import DeviceMesh,ShardingStrategy
import ast
from ..generator import StrategyGenerator
from ..StrategiesVector import StrategiesVector
from geesibling.core.types import Graph, Node
from ..shard.placement_types import DTensorSpec,Placement,Replicate,PlacementStrategy,Shard,_Partial
from .redistribute_cost import redistribute_cost,MeshTopoInfo,allreduce_cost,spec_to_bytes,allgather_cost,allreduce_cost
class NodeHandler(ABC):
    '''
    The NodeHandler is an abstract class used to generate every possible strategies for an operator node.

    Args:
        node (Node): the input node in node argument list.
        device_mesh (DeviceMesh): A logical view of a physical mesh.
        strategies_vector (StrategiesVector): all the strategies generated in this handler will be recorded into the strategies_vector.
    '''

    def __init__(self,
                 node: Node,
                 device_mesh: DeviceMesh,
                 strategies_vector: StrategiesVector) -> None:
        self.node = node
        self.predecessor_node = list(node.inputs)
        self.successor_node = list(node.outputs)
        self.device_mesh = device_mesh
        self.strategies_vector = strategies_vector

    @abstractmethod
    def get_strategy_generator(self) -> List[StrategyGenerator]:
        """
        Define which generators should be used by this NodeHandler object.
        """
        pass
    
    # def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]:
    #     # transform the strategy generated
    #     # e.g. to process the sharding strategy for the transposed weights
    #     return strategy

    def update_resharding_cost(self, strategy: ShardingStrategy) -> None:
        """
        Compute the resharding costs and save the costs in the ShardingStrategy object.
        得到重排代价
        """
        # 得到的顺序有误啊
        # TODO: test this function when other handlers are ready
        redistribute_costs = {}
        # shape_consistency_manager = ShapeConsistencyManager() 计算重排的工具
        # print("update_resharding_cost")
        # print(self.predecessor_node)
        if self.node.op == 'output':
            for index,node in enumerate(self.predecessor_node):
                node_name = str(node)
                if index ==0:
                    prev_strategy_vector = self.pre_strategies_vectors
                    prev_sharding_specs = [
                        prev_strategy.sharding_specs for prev_strategy in prev_strategy_vector
                    ]
                    current_sharding_spec = strategy.sharding_specs
                    # create data structure to store costs
                    if node not in redistribute_costs:
                        redistribute_costs[node] = []
                    for prev_sharding_spec in prev_sharding_specs:
                        if isinstance(current_sharding_spec.input_specs,List):
                            costs = redistribute_cost(prev_sharding_spec.output_specs, current_sharding_spec.input_specs[0])
                        else:   #只有一个输入策略时
                            costs = redistribute_cost(prev_sharding_spec.output_specs, current_sharding_spec.input_specs)
                        
                        redistribute_costs[node].append(costs)
                else:
                    redistribute_costs[node] = []
                    redistribute_costs[node].append(0.0)
            print(redistribute_costs)
        else:
            for index,node in enumerate(self.predecessor_node):
                if self.node.input_shape(index):
                    # node_name = node.name
                    node_name = str(node)
                    # get the current sharding spec generated by this node handler

                    # we will not compute the resharding costs for the node not counted in the strategy.
                    # And the node with tuple or list output need to be handled below.
                    prev_strategy_vector = self.pre_strategies_vectors[node_name]   #List
                    # print("prev_sharding_specs")

                    prev_sharding_specs = [
                        prev_strategy.sharding_specs for prev_strategy in prev_strategy_vector
                    ]
                    # print(prev_sharding_specs)

                    current_sharding_spec = strategy.sharding_specs
                    # create data structure to store costs
                    if node not in redistribute_costs:
                        redistribute_costs[node] = []

                    for prev_sharding_spec in prev_sharding_specs:
                        if isinstance(current_sharding_spec.input_specs,List):
                            costs = redistribute_cost(prev_sharding_spec.output_specs, current_sharding_spec.input_specs[index])
                        else:   #只有一个输入策略时
                            costs = redistribute_cost(prev_sharding_spec.output_specs, current_sharding_spec.input_specs)
                        
                        redistribute_costs[node].append(costs)
        # strategy.resharding_costs = resharding_costs
        strategy.redistribute_costs = redistribute_costs
        return strategy

    def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesVector:
        """
        Register different sharding strategies for the current node.
        """
        # 根据node类型得到对应的策略生成器
        strategy_generators = self.get_strategy_generator()
        for generator in strategy_generators:
            strategies = generator.generate()
            #print(strategies)
            # TODO  self.update_resharding_cost
            updated_strategies = map(self.update_resharding_cost, strategies)
            post_processed_strategies = list(updated_strategies)
            self.strategies_vector.extend(post_processed_strategies)
        # get compute_cost
        # get communication_cost
        # print(self.node.name)
        # print("总的策略数： ",len(self.strategies_vector))
        for strategy in self.strategies_vector:
            self.update_compute_and_memory_cost(strategy)
            self.update_communication_cost(strategy)
            # self.update_compute_cost(strategy)    #TODO
            # with open('cost.txt', 'a') as file:
            #     print(self.node.name,file=file)
            #     print(strategy.sharding_specs,file = file)
            #     print("compute_cost:    ",strategy.compute_cost,file = file)
            #     print("memory_cost:    ",strategy.memory_cost,file = file)
            #     print("redistribute_costs:    ",strategy.redistribute_costs,file = file)
        return self.strategies_vector

    # def update_qb_cost(self,strategy: ShardingStrategy = None):
    #     # 梯度更新量all_reduce+cpu->GPU的数据传输量
    #     # 倾向于切参数
    #     cost=0.0
    #     if self.node.op == 'placeholder' and self.node.attrs['target']!='USER_INPUT':
    #         # 反向更新参数的通信量
    #         # shape(param)/2
    #         current_spec = strategy.sharding_specs.output_specs
    #         mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh)
    #         comm_bytes_gb = (
    #             spec_to_bytes(current_spec) / current_spec.num_shards  #b
    #         )
    #         # 算个all_gather
    #         # TODO 是否数据并行有差别
    #         for i, current, in enumerate(current_spec.placements):
    #             cost += allreduce_cost(comm_bytes_gb, mesh_topo, -(i+1))
    #         cost += comm_bytes_gb*mesh_topo.mesh_dim_bandwidth[-1]
    #     strategy.qb_cost =cost

    def update_communication_cost(self,strategy: ShardingStrategy = None):
        # 梯度更新量all_reduce+cpu->GPU的数据传输量
        cost=0
        input_specs = strategy.sharding_specs.input_specs
        output_spec = strategy.sharding_specs.output_specs
        is_shard = False
        if input_specs:
            for input_spec in input_specs:
                if isinstance(input_spec.placements[0],Shard):
                    is_shard=True
            if is_shard and isinstance(output_spec.placements[0],Replicate):
                mesh_topo = MeshTopoInfo.build_from_mesh(output_spec.mesh)
                comm_bytes_gb = (
                    spec_to_bytes(output_spec) / output_spec.num_shards #b
                )
                for i, current, in enumerate(output_spec.placements):
                    cost += allreduce_cost(comm_bytes_gb, mesh_topo, -(i+1))
            # print("communication_cost:  ",cost)
        strategy.communication_cost =cost

    def update_compute_and_memory_cost(self,strategy: ShardingStrategy = None):
        output_specs = strategy.sharding_specs.output_specs
        input_specs = strategy.sharding_specs.input_specs
        mesh_topo = MeshTopoInfo.build_from_mesh(output_specs.mesh)
        device_num = mesh_topo.mesh_dim_devices[1]
        cost = self.node.compute_cost
        memory = self.node.temporary_memory
        shard_info = False
        if input_specs:
            if isinstance(input_specs,List):
                for input_spec in input_specs:
                    # 反向更新参数的通信量
                    # shape(param)/2
                    if isinstance(input_spec.placements[0],Shard):
                        shard_info = True 
            else:
                if isinstance(input_spec.placements[0],Shard):
                    shard_info = True
        if isinstance(output_specs.placements[0],Shard):
            shard_info = True
        if shard_info:
            cost = cost/device_num
            memory = memory/device_num
        # print("compute_cost:  ",cost)
        strategy.compute_cost =cost
        strategy.memory_cost = memory


    # def update_communication_cost(self,strategy: ShardingStrategy = None):
    #     output_specs = strategy.sharding_specs.output_specs
    #     input_specs = strategy.sharding_specs.input_specs
    #     cost = 0.0
    #     if input_specs:
    #         if isinstance(input_specs,List):
    #             for input_spec in input_specs:
    #                 # 反向更新参数的通信量
    #                 # shape(param)/2
    #                 cost += redistribute_cost(input_spec,output_specs)
    #         else:
    #             cost += redistribute_cost(input_specs,output_specs)
    #     strategy.communication_cost =cost


    # def update_communication_cost(self, strategy: ShardingStrategy = None):
    #     costs = []
    #     for d in dims:
    #         cost = 0
    #         for input_dim, input_spec in zip(input_dims, input_specs):
    #             if (
    #                 d in input_dim
    #                 and input_spec.dim_map[input_dim.index(d)] == mesh_dim
    #             ):
    #                 assert input_spec.tensor_meta is not None
    #                 global_shape = input_spec.tensor_meta.shape
    #                 local_shape = compute_local_shape(
    #                     global_shape, input_spec.mesh, input_spec.placements
    #                 )
    #                 cost += prod(local_shape) * input_spec.mesh.size(mesh_dim)
    #         costs.append(cost)

    def get_target_function(self) -> callable:
        """
        This function is used to get the target function for the node handler.
        The target function is used to analyze the costs of strategies.
        """
        if self.node.op in ('placeholder', 'get_attr', 'output'):
            return None

        if self.node.op == 'call_module':
            target = self.node.graph.owning_module.get_submodule(self.node.target)
        elif self.node.op == 'call_function':
            target = self.node.target
        elif self.node.op == 'call_method':
            target = getattr(self.node.args[0]._meta_data.__class__, self.node.target)
        else:
            raise ValueError(f'Unsupported node type: {self.node.op}')

        return target

class FollowHandler(NodeHandler):
    def __init__(self,node: Node,device_mesh: DeviceMesh,strategies_vector: StrategiesVector,pre_strategies_vectors:Dict[str,List[StrategiesVector]]):
        super().__init__(node, device_mesh, strategies_vector)  # 调用父类的构造方法
        self.pre_strategies_vectors = pre_strategies_vectors

