from typing import Any, Dict, List, Union

from ..shard.placement_types import (
    DeviceMesh,Replicate,PlacementStrategy,Shard,DTensorSpec,Shard,_Partial,ShardingStrategy
)
from ..shard.basic_stategy import gen_einsum_strategies
import copy
from .strategy_generator import StrategyGenerator

class MatMulStrategyGenerator(StrategyGenerator):
    def __init__(self,
                 operation_data_mapping,
                 device_mesh,
                 mat_type='matmul'):
        super().__init__(operation_data_mapping,device_mesh)
        self.mat_type = mat_type

# output_spec = DTensorSpec(mesh = self.device_mesh, placements=(Shard(0)))
# sharding_specs = PlacementStrategy(output_specs=output_spec)
# strategy_list.append(ShardingStrategy(name=name,sharding_specs=sharding_specs,communication_cost=0))

    def collate_strategies(self) -> List[ShardingStrategy]:
        strategy_list = []
        if self.mat_type == 'addmm' or self.mat_type == 'mm':
            mm_strategy = gen_einsum_strategies("mk,kn->mn", self.device_mesh)
        elif self.mat_type == 'bmm':
            mm_strategy = gen_einsum_strategies("bmk,bkn->bmn", self.device_mesh)
        # 包装成 ShardingStrategy（name: sharding_specs: compute_cost:communication_cost:resharding_costs: 
        for strategy in mm_strategy.strategies:
            input_specs = []
            # 将partial替换为replicate
            if isinstance(strategy.output_specs.placements[0],_Partial):
                strategy.output_specs.placements = (Replicate(),)
            if self.mat_type == 'addmm':
                # bias 的策略根据输出策略
                if isinstance(strategy.output_specs.placements[0],Shard):
                    if strategy.output_specs.placements[0].dim ==1:
                        bias_spec =DTensorSpec(mesh = self.device_mesh, placements=(Shard(0),),tensor_meta=self.op_data['bias'].data)
                    else:
                        bias_spec =DTensorSpec(mesh = self.device_mesh, placements=(Replicate(),),tensor_meta=self.op_data['bias'].data)
                else:
                    bias_spec =strategy.output_specs
                    bias_spec.tensor_meta = self.op_data['bias'].data
                input_specs.append(bias_spec)
                for input_spec in strategy.input_specs:
                    input_specs.append(copy.deepcopy(input_spec))
                input_specs[1].tensor_meta = self.op_data['input'].data
                input_specs[2].tensor_meta = self.op_data['weight'].data
                strategy.output_specs.tensor_meta = self.op_data['output'].data
                sharding_specs = PlacementStrategy(input_specs=input_specs,output_specs=strategy.output_specs)
            else:
                '''
                (R, R) -> R
                (S(0), S(0)) -> S(0)
                (S(2), S(1)) -> P
                (S(1), R) -> S(1)
                (R, S(2)) -> S(2)
                '''
                # 没有bias和weight
                strategy.input_specs[0].tensor_meta = self.op_data['input_0'].data
                strategy.input_specs[1].tensor_meta = self.op_data['input_1'].data
                strategy.output_specs.tensor_meta = self.op_data['output'].data
                sharding_specs = PlacementStrategy(input_specs=strategy.input_specs,output_specs=strategy.output_specs)
            strategy_list.append(ShardingStrategy(name=str(sharding_specs),sharding_specs=sharding_specs,compute_cost=0))
        return strategy_list
    

