from typing import Dict, List, Union
from ..shard.placement_types import DeviceMesh
from geesibling.core.types import Graph, Node
from ..StrategiesVector import StrategiesVector
from .node_handler import NodeHandler
from .registry import operator_registry
from ..generator import MatMulStrategyGenerator,StrategyGenerator,ElementwiseGenerator
from ..shard.placement_types import (
    AbstractTensor,OperationData,OperationDataType
)

__all__ = ['ADDMMFunctionHandler']


@operator_registry.register('aten.addmm.default')
class ADDMMFunctionHandler(NodeHandler):
    """
    This is a NodeHandler class which deals with the batched matrix multiplication operation in PyTorch.
    Such operations including `torch.bmm` and `torch.Tensor.bmm` require the tensor to be 3D, thus, there is
    no logical-physical shape conversion in this handler.
    """
    def __init__(self,node: Node,device_mesh: DeviceMesh,strategies_vector: StrategiesVector,pre_strategies_vectors:List[StrategiesVector]):
        super().__init__(node, device_mesh, strategies_vector)  # 调用父类的构造方法
        self.pre_strategies_vectors = pre_strategies_vectors

    # def _infer_op_data_type(self, tensor: torch.Tensor) -> OperationDataType:
    #     if isinstance(tensor, torch.nn.parameter.Parameter):
    #         data_type = OperationDataType.PARAM
    #     else:
    #         data_type = OperationDataType.ARG
    #     return data_type

    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        # bias input weight
        generators.append(
            MatMulStrategyGenerator(op_data_mapping, self.device_mesh,'addmm'))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # bias input weight
        input_abs_data = AbstractTensor(self.node.input_shape(1),self.node.input_type(1))
        bias_abs_data = AbstractTensor(self.node.input_shape(0),self.node.input_type(0))
        weight_abs_data = AbstractTensor(self.node.input_shape(2),self.node.input_type(2))
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        op_data_mapping = {
            "bias":OperationData(name=str(self.node.input_name(0)), data=bias_abs_data,type=OperationDataType.PARAM),
            "input":OperationData(name=str(self.node.input_name(1)), data=input_abs_data,type=OperationDataType.INPUT),
            "weight":OperationData(name=str(self.node.input_name(2)), data=weight_abs_data,type=OperationDataType.PARAM),
            "output":OperationData(name=str(self.node.name), data=output_abs_data,type=OperationDataType.OUTPUT)
        }
        return op_data_mapping


@operator_registry.register('aten.bmm.default')
class BMMFunctionHandler(NodeHandler):
    """
    This is a NodeHandler class which deals with the batched matrix multiplication operation in PyTorch.
    Such operations including `torch.bmm` and `torch.Tensor.bmm` require the tensor to be 3D, thus, there is
    no logical-physical shape conversion in this handler.
    """
    def __init__(self,node: Node,device_mesh: DeviceMesh,strategies_vector: StrategiesVector,pre_strategies_vectors:List[StrategiesVector]):
        super().__init__(node, device_mesh, strategies_vector)  # 调用父类的构造方法
        self.pre_strategies_vectors = pre_strategies_vectors

    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        # bias input weight
        # 手动删除了bmm的多种策略

        generators.append(
            MatMulStrategyGenerator(op_data_mapping, self.device_mesh,'bmm'))
        # generators.append(
        #     ElementwiseGenerator(op_data_mapping, self.device_mesh,self.pre_strategies_vectors))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        input_abs_data = AbstractTensor(self.node.input_shape(0),self.node.input_type(0))
        input1_abs_data = AbstractTensor(self.node.input_shape(1),self.node.input_type(1))
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        op_data_mapping = {
            "input_0":OperationData(name=str(self.node.inputs[0]), data=input_abs_data,type=OperationDataType.INPUT),
            "input_1":OperationData(name=str(self.node.inputs[1]), data=input1_abs_data,type=OperationDataType.INPUT),
            "output":OperationData(name=str(self.node.name), data=output_abs_data,type=OperationDataType.OUTPUT)
        }
        return op_data_mapping


@operator_registry.register('aten.mm.default')
class MMFunctionHandler(NodeHandler):
    """
    This is a NodeHandler class which deals with the batched matrix multiplication operation in PyTorch.
    Such operations including `torch.bmm` and `torch.Tensor.bmm` require the tensor to be 3D, thus, there is
    no logical-physical shape conversion in this handler.
    """
    def __init__(self,node: Node,device_mesh: DeviceMesh,strategies_vector: StrategiesVector,pre_strategies_vectors:List[StrategiesVector]):
        super().__init__(node, device_mesh, strategies_vector)  # 调用父类的构造方法
        self.pre_strategies_vectors = pre_strategies_vectors

    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        # bias input weight
        # 手动删除了bmm的多种策略

        generators.append(
            MatMulStrategyGenerator(op_data_mapping, self.device_mesh,'mm'))
        # generators.append(
        #     ElementwiseGenerator(op_data_mapping, self.device_mesh,self.pre_strategies_vectors))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        input_abs_data = AbstractTensor(self.node.input_shape(0),self.node.input_type(0))
        input1_abs_data = AbstractTensor(self.node.input_shape(1),self.node.input_type(1))
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        op_data_mapping = {
            "input_0":OperationData(name=str(self.node.inputs[0]), data=input_abs_data,type=OperationDataType.INPUT),
            "input_1":OperationData(name=str(self.node.inputs[1]), data=input1_abs_data,type=OperationDataType.INPUT),
            "output":OperationData(name=str(self.node.name), data=output_abs_data,type=OperationDataType.OUTPUT)
        }
        return op_data_mapping