import copy
from typing import List
from .strategy_generator import StrategyGenerator,FollowingStrategyGenerator
from ..shard.placement_types import DTensorSpec,Placement,Replicate,PlacementStrategy,Shard,ShardingStrategy

__all__ = ['ScaleDotProductGenerator']


class ScaleDotProductGenerator(FollowingStrategyGenerator):
    """
    _scaled_dot_product_efficient_attention = torch.ops.aten._scaled_dot_product_efficient_attention.default(clone, clone_1, clone_2, expand_4, False);  clone = clone_1 = clone_2 = expand_4 = None.
    expand_4 无需切分，前三个继承前面的,有三个输入or四个输入？
    """

    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        # For element-wise function, we keep the sharding spec of output node same as
        # the input. Therefore, the different strategies of input node with same
        # output sharding spec will generate same strategy for element-wise function.
        # 复制前面的策略

        # 如果输入节点数和op_data的input不符合，即第二个是张量不是节点，张量都是Replicate
        pre_node_nums = len(self.pre_strategies_vectors)

        if pre_node_nums == len(self.op_data)-1:
            for index, strategy in enumerate(self.pre_strategies_vector):
                # add for _Partial to Replicate
                pre_specs =copy.deepcopy(strategy.sharding_specs.output_specs)
                # if isinstance(strategy.sharding_specs.output_specs,_Partial):
                #     pre_specs.placements =(Replicate(),)
                if pre_node_nums==4:
                    replicate_spec = DTensorSpec(mesh = self.device_mesh, placements=(Replicate(),))
                    input_specs = [pre_specs]*(pre_node_nums-1)
                    input_specs.append(replicate_spec)
                else:
                    input_specs = [pre_specs]*(pre_node_nums)
                # input_specs[0].tensor_meta = self.op_data['input_0'].data
                output_specs = pre_specs
                output_specs.tensor_meta = self.op_data['output'].data
                # 得到PlacementStrategy
                # name = str(input_specs.placements)+' -> '+str(output_specs.placements)
                if not isinstance(input_specs,List):
                    input_specs.tensor_meta = self.op_data['input_0'].data
                    input_specs = [input_specs]
                else:
                    for i,input_spec in enumerate(input_specs):
                        input_spec.tensor_meta = self.op_data['input_{}'.format(i)].data
                sharding_specs = PlacementStrategy(input_specs=input_specs,output_specs=output_specs)
                name = str(sharding_specs)
                strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,compute_cost =0))

        else:
            for index, strategy in enumerate(self.pre_strategies_vector):
                input_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
                # add for _Partial to Replicate
                # if isinstance(strategy.sharding_specs.output_specs,_Partial):
                #     input_specs.placements =(Replicate(),)
                input_specs.tensor_meta = self.op_data['input_0'].data
                output_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
                output_specs.tensor_meta = self.op_data['output'].data
                sharding_specs = PlacementStrategy(input_specs=[input_specs],output_specs=output_specs)
                strategy_list.append(ShardingStrategy(name=str(sharding_specs),sharding_specs=sharding_specs,compute_cost =0))
        return strategy_list
