import torch
from typing import Dict, List
from ..shard.placement_types import DeviceMesh,AbstractTensor,OperationData, OperationDataType
from .node_handler import NodeHandler
from ..generator import ReplicateGenerator,StrategyGenerator
from ..StrategiesVector import StrategiesVector
from geesibling.core.types import Graph, Node
from .registry import operator_registry
__all__ = ['TensorConstructorHandler']


@operator_registry.register('aten.arange.start_step')   #arange算子通常用于生成位置编码（Positional Encoding）或者作为序列索引
@operator_registry.register('aten.full.default') 
@operator_registry.register('aten.scalar_tensor.default')

# 2025.5.27 new add
# @operator_registry.register('aten.full_like.default')

class TensorConstructorHandler(NodeHandler):
    """
    A TensorConstructorHandler which deals with the sharding strategies for tensor constructor operations, such as torch.arange.
    """
    def __init__(self,node: Node,device_mesh: DeviceMesh,strategies_vector: StrategiesVector,pre_strategies_vectors:List[StrategiesVector]):
        super().__init__(node, device_mesh, strategies_vector)  # 调用父类的构造方法
        self.pre_strategies_vectors = pre_strategies_vectors

    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        generators.append(ReplicateGenerator(op_data_mapping, self.device_mesh))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # use transposed shape for strategies
        # the strategies will be transformed back to its original shape in self.post_process
        # TODO 修改所有的data为TensorMeta
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        physical_output = OperationData(name=str(self.node), data=output_abs_data,type=OperationDataType.OUTPUT)
        mapping = {"output": physical_output}
        return mapping
