from typing import Dict, List
from .registry import operator_registry
import ast
from .node_handler import NodeHandler,FollowHandler
from ..generator import PermuteGenerator, StrategyGenerator,ViewGenerator,UnsqueezeGenerator,ExpandGenerator
from ..StrategiesVector import StrategiesVector
from ..shard.placement_types import DeviceMesh,AbstractTensor,OperationData, OperationDataType
from geesibling.core.types import Graph, Node

__all__ = ['ViewHandler','PermuteHandler']

@operator_registry.register('aten.permute.default')
class PermuteHandler(FollowHandler):
    """
    A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape.
    """
    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        immutable_list = ast.literal_eval(self.node.attrs['immutable_list'])
        if isinstance(immutable_list,int):
            immutable_list=[immutable_list]
        generators = []
        generators.append(PermuteGenerator(op_data_mapping, self.device_mesh, self.pre_strategies_vectors,immutable_list))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # 一个输入一个输出
        # 得到node的输入node
        mapping = {}
        for i in range(len(self.node.inputs)):
            input_abs_data = AbstractTensor(self.node.input_shape(i),self.node.input_type(i))
            physical_input = OperationData(name=str(self.node.inputs[i]), type=OperationDataType.ARG, data=input_abs_data)
            mapping["input_{}".format(i)] = physical_input
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        physical_output = OperationData(name=str(self.node), data=output_abs_data,type=OperationDataType.OUTPUT)
        mapping['output'] = physical_output
        return mapping


@operator_registry.register('aten.view.default')
class ViewHandler(FollowHandler):
    """
    A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape.
    """
    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        generators.append(ViewGenerator(op_data_mapping, self.device_mesh, self.pre_strategies_vectors))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # 一个输入一个输出
        # 得到node的输入node
        mapping = {}
        for i in range(len(self.node.inputs)):
            input_abs_data = AbstractTensor(self.node.input_shape(i),self.node.input_type(i))
            physical_input = OperationData(name=str(self.node.inputs[i]), type=OperationDataType.ARG, data=input_abs_data)
            mapping["input_{}".format(i)] = physical_input
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        physical_output = OperationData(name=str(self.node), data=output_abs_data,type=OperationDataType.OUTPUT)
        mapping['output'] = physical_output
        return mapping

@operator_registry.register('aten.expand.default')
@operator_registry.register('aten.constant_pad_nd.default')
class ExpandHandler(FollowHandler):
    """
    A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape.
    """
    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        generators.append(ExpandGenerator(op_data_mapping, self.device_mesh, self.pre_strategies_vectors))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # 一个输入一个输出
        # 得到node的输入node
        mapping = {}
        for i in range(len(self.node.inputs)):
            input_abs_data = AbstractTensor(self.node.input_shape(i),self.node.input_type(i))
            physical_input = OperationData(name=str(self.node.inputs[i]), type=OperationDataType.ARG, data=input_abs_data)
            mapping["input_{}".format(i)] = physical_input
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        physical_output = OperationData(name=str(self.node), data=output_abs_data,type=OperationDataType.OUTPUT)
        mapping['output'] = physical_output
        return mapping

@operator_registry.register('aten.unsqueeze.default')
class UnsqueezeHandler(FollowHandler):
    """
    A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape.
    """
    def get_strategy_generator(self) -> List[StrategyGenerator]:
        op_data_mapping = self.get_operation_data_mapping()
        generators = []
        immutable_list = ast.literal_eval(self.node.attrs['immutable_list'])
        if isinstance(immutable_list,int):
            immutable_list=[immutable_list]
        generators.append(UnsqueezeGenerator(op_data_mapping, self.device_mesh, self.pre_strategies_vectors,immutable_list))
        return generators

    def get_operation_data_mapping(self) -> Dict[str, OperationData]:
        # 一个输入一个输出
        # 得到node的输入node
        mapping = {}
        for i in range(len(self.node.inputs)):
            input_abs_data = AbstractTensor(self.node.input_shape(i),self.node.input_type(i))
            physical_input = OperationData(name=str(self.node.inputs[i]), type=OperationDataType.ARG, data=input_abs_data)
            mapping["input_{}".format(i)] = physical_input
        output_abs_data = AbstractTensor(self.node.output_shape(0),self.node.output_type(0))
        physical_output = OperationData(name=str(self.node), data=output_abs_data,type=OperationDataType.OUTPUT)
        mapping['output'] = physical_output
        return mapping
