import copy
from typing import List,Dict
from .strategy_generator import StrategyGenerator,FollowingStrategyGenerator
from ..shard.placement_types import DTensorSpec,Placement,Replicate,PlacementStrategy,Shard,ShardingStrategy

__all__ = [ 'ViewGenerator', 'PermuteGenerator', 'TransposeGenerator', 'SplitGenerator']


class PermuteGenerator(FollowingStrategyGenerator):
    """
    ViewGenerator deals with the sharding strategies of view op.
    """
    def __init__(self, operation_data_mapping, device_mesh,
                 pre_strategies_vectors,immutable_list):
        super().__init__(operation_data_mapping, device_mesh,pre_strategies_vectors)
        # 感觉要传入所有的input_node的策略
        self.immutable_list = immutable_list

    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        # 复制前面的策略
        # 无法得到前面node的策略
        for index, strategy in enumerate(self.pre_strategies_vector):
            # 复制前一个策略,不要想复杂，一输入一输出,但是shard维度会变
            input_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
            # 改变tensor_meta
            input_specs.tensor_meta = self.op_data['input_0'].data
            shard_dim = input_specs.placements[0].dim
            if isinstance(input_specs.placements[0],Shard):
                #要看shape如何变化
                shard_dim = input_specs.placements[0].dim
                output_shard_dim = self.immutable_list[shard_dim]
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=(Shard(output_shard_dim),),tensor_meta=self.op_data['output'].data)
            else:
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=input_specs.placements,tensor_meta=self.op_data['output'].data)
            # else:
            #     output_specs = DTensorSpec(mesh = self.device_mesh, placements=(Replicate(),),tensor_meta=self.op_data['output'].data)
            # 得到PlacementStrategy
            # name = str(input_specs.placements)+' -> '+str(output_specs.placements)
            sharding_specs = PlacementStrategy(input_specs=[input_specs],output_specs=output_specs)
            name = str(sharding_specs)
            strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,compute_cost =0))
        return strategy_list

#view 的切法有问题？
class ViewGenerator(FollowingStrategyGenerator):
    """
    PermuteGenerator deals with the sharding strategies of permute op.
    """
    # 没有变化
    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        # 复制前面的策略
        # TODO view只有一输入一输出
        for index, strategy in enumerate(self.pre_strategies_vector):
            # 复制前一个策略,不要想复杂，一输入一输出,但是shard维度会变,怎么办
            input_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
            if isinstance(input_specs.placements[0],Shard):
                '''
                view 的几种情况：
                1、[10,768]->[1,10,768]
                2、[1, 11, 4096]->[1, 11, 32, 128]
                3、[1, 12, 64, 10]->[12, 64, 10] 不对
                4、[1, 11, 32, 128]->[1, 11, 4096]
                '''
                shard_dim = input_specs.placements[0].dim
                origin_shape = input_specs.tensor_meta.shape
                new_shape = self.op_data['output'].data.shape
                data_byte = 1
                # 从后往前
                for i in range(len(origin_shape)-1,shard_dim,-1):
                    data_byte*=origin_shape[i]
                # data_byte*=origin_shape[shard_dim]
                
                new_data_byte=1
                new_shard_dim=len(new_shape)-1
                for i in range(len(new_shape)-1,-1, -1):
                    new_data_byte *= new_shape[i]
                    if (new_data_byte>=data_byte and len(new_shape)>len(origin_shape)) or (new_data_byte>data_byte and len(new_shape)<=len(origin_shape)):
                        new_shard_dim = i
                        break
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=(Shard(new_shard_dim),),tensor_meta=self.op_data['output'].data)

            else:
            # 改变tensor_meta
            # input_specs.tensor_meta = self.op_data['input_0'].data
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=input_specs.placements,tensor_meta=self.op_data['output'].data)
            # 得到PlacementStrategy
            sharding_specs = PlacementStrategy(input_specs=[input_specs],output_specs=output_specs)
            name = str(sharding_specs)
            strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,compute_cost =0))

        return strategy_list

# unsqueeze操作用于在指定位置增加一个维度，这个新维度的大小是1
class UnsqueezeGenerator(FollowingStrategyGenerator):
    """
    ViewGenerator deals with the sharding strategies of view op.
    """
    def __init__(self, operation_data_mapping, device_mesh,
                 pre_strategies_vectors,immutable_list):
        super().__init__(operation_data_mapping, device_mesh,pre_strategies_vectors)
        # 感觉要传入所有的input_node的策略
        self.immutable_list = immutable_list

    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        # 复制前面的策略
        # 无法得到前面node的策略
        # youwenti TODO
        for index, strategy in enumerate(self.pre_strategies_vector):
            # 复制前一个策略,不要想复杂，一输入一输出,但是shard维度会变
            input_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
            # 改变tensor_meta
            input_specs.tensor_meta = self.op_data['input_0'].data
            shard_dim = input_specs.placements[0].dim
            if isinstance(input_specs.placements[0],Shard):
                #要看shape如何变化
                shard_dim = input_specs.placements[0].dim
                if self.immutable_list[0]<=shard_dim:
                    new_shard_dim = shard_dim+1
                else:
                    new_shard_dim=shard_dim
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=(Shard(new_shard_dim),),tensor_meta=self.op_data['output'].data)
            else:
                output_specs = DTensorSpec(mesh = self.device_mesh, placements=input_specs.placements,tensor_meta=self.op_data['output'].data)
            sharding_specs = PlacementStrategy(input_specs=[input_specs],output_specs=output_specs)
            name = str(sharding_specs)
            strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,compute_cost =0))
        return strategy_list

#y = x.expand(4, -1, -1)  # 将第0维扩展到4，其他维度保持不变,不增加改变维度
class ExpandGenerator(FollowingStrategyGenerator):
    """
    ViewGenerator deals with the sharding strategies of view op.
    """

    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        # 复制前面的策略
        # 无法得到前面node的策略
        # youwenti TODO
        for index, strategy in enumerate(self.pre_strategies_vector):
            # 复制前一个策略,不要想复杂，一输入一输出,但是shard维度会变
            input_specs = copy.deepcopy(strategy.sharding_specs.output_specs)
            output_specs = DTensorSpec(mesh = self.device_mesh, placements=input_specs.placements,tensor_meta=self.op_data['output'].data)
            # else:
            #     output_specs = DTensorSpec(mesh = self.device_mesh, placements=(Replicate(),),tensor_meta=self.op_data['output'].data)
            # 得到PlacementStrategy
            # name = str(input_specs.placements)+' -> '+str(output_specs.placements)
            sharding_specs = PlacementStrategy(input_specs=[input_specs],output_specs=output_specs)
            name = str(sharding_specs)
            strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,compute_cost =0))
        return strategy_list