from geesibling.core.types import Graph, Node
# from torch.distributed._tensor import DeviceMesh
from .shard.placement_types import DeviceMesh
from .StrategiesVector import StrategiesVector
from .handler import (
    # GetattrHandler,
    OutputHandler,
    PlaceholderHandler,
    operator_registry,
)
from .shard.placement_types import DTensorSpec,Placement,Replicate,PlacementStrategy
class StrategiesConstructor:
    """
    StrategiesConstructor is used to construct the parallelization plan for the model execution.

    Args:
        graph (Graph): a Graph object used for analysis and strategy generation.
        device_mesh (DeviceMesh): a DeviceMesh object which contains the meta information about the cluster.
        solver_options (SolverOptions): a SolverOptions object which specifies the preferences for plan searching.
    """

    def __init__(self, graph: Graph,device_mesh:DeviceMesh):
        self.graph = graph
        # self.graph_fx = graph_fx
        # self.root_module = model
        self.nodes = list(graph.nodes)
        self.device_mesh = device_mesh
        self.leaf_strategies = []
        self.strategy_map = {}  #为了记录node 的strategyvector
        self.no_strategy_nodes = []
        self.alias_set = None

    def remove_duplicated_strategy(self, strategies_vector):
        '''
        In build_strategies_and_cost method, we may produce some duplicated strategies.
        In this method, we will remove the duplicated strategies depending on the strategies name.
        Note that this operation is in-place.
        '''
        name_checklist = []
        remove_list = []
        for strategy in strategies_vector:
            if strategy.name not in name_checklist:
                name_checklist.append(strategy.name)
            else:
                remove_list.append(strategy)
        for strategy in remove_list:
            strategies_vector.remove(strategy)

    def build_strategies_and_cost(self):
        """
        This method is to build the strategy vector for each node in the computation graph.
        """
        # 只抽象为3种类型placeholder，call_function，output
        def _check_no_strategy_for_node(node):
            if node.output_shape(0) :
                return False
            elif node.op=='output':
                return False
            else:   #主要是full算子怎么处理
                return True
        for node in self.nodes:
            strategies_vector = StrategiesVector(node)  #delete self.root_module
            pre_strategies_vectors = {}
            for node_input in node.inputs:
                if node_input in self.strategy_map:
                    pre_strategies_vectors[node_input]=self.strategy_map[node_input]   #为了计算reshape，没有区分不止一个前驱节点的情况
                else:
                    pre_strategies_vectors[node_input]=None

            if _check_no_strategy_for_node(node):
                self.no_strategy_nodes.append(node.name)
                pass
            # print(self.no_strategy_nodes)
            # placeholder node，input_ids Replica Placeholder   attention_mask Replica Placeholder，先不考虑
            elif node.op == 'placeholder':
                placeholder_handler = PlaceholderHandler(node,
                                                         self.device_mesh,
                                                         strategies_vector)
                placeholder_handler.register_strategy()
            elif node.op == 'call_function':
                target = node.attrs['target']    #TODO
                # print(target)
                if operator_registry.has(target):   #test
                    # 直接传入前一个节点的strategies_vector
                    # pre_node = self.graph.get_node(node.inputs[0])
                    # if target in ['aten.arange.start_step','aten.embedding.default']:   # 与前面算子策略wu关,
                    #     handler = operator_registry.get(target)(node,
                    #                                         self.device_mesh,
                    #                                         strategies_vector)
                    # else:   #这里是不是应该传入所有inputs的策略？
                    handler = operator_registry.get(target)(node,
                                                        self.device_mesh,
                                                        strategies_vector, pre_strategies_vectors)
                    handler.register_strategy()
                    # attach strategies_info to node
                    if hasattr(handler, 'strategies_info'):
                        setattr(node, 'strategies_info', handler.strategies_info)
                else:
                    assert operator_registry.has(target), f"{target} is need TODO"
        #     # output node
            elif node.op == 'output':
                # pre_strategies_vectors = []
                # for node_input in node.inputs:
                #     pre_strategies_vectors.append(self.strategy_map[node_input])
                output_handler = OutputHandler(node, self.device_mesh, strategies_vector,pre_strategies_vectors)
                output_handler.register_strategy()
            else:
                assert f"{node.op} is need TODO"
                break
            if node.name in self.no_strategy_nodes:
                pass
            else:
                self.leaf_strategies.append(strategies_vector)
                self.strategy_map[node.name] = strategies_vector
            print(node.name)
            if node.name in self.strategy_map:
                for strategy in self.strategy_map[node.name]:
                    print(strategy.sharding_specs)
            else:
                print("no strategy_map")

