from .placement_types import (
    OpStrategy,
    PlacementStrategy,
    _Partial,
    DTensorSpec,
    Placement,
    Replicate,
    Shard,
    DeviceMesh,
)
import itertools
from dataclasses import dataclass
from typing import List, Set, Tuple
import numpy as np
def embedding_strategy(mesh: DeviceMesh,weight_shape,indices_shape,grad_out_shape):
    """
    This strategy handles embedding op. We have two possible embedding shardings:
    rowwise and colwise
    (R, R) -> R
    (S(1), R) -> S(1)
    (R, S(0)) -> S(0)
    (R, S(1)) -> S(1)
    """
    # op_schema = OpSchema([])    #新建一个空的op_schema
    # weight_strategy = cast(OpStrategy, op_schema.args_schema[0])
    # indices_strategy = cast(OpStrategy, op_schema.args_schema[1])
    # weight:[50257, 768],input:[1, 10],output:[1, 10, 768]
    # 直接传入输入和输出的shape
    output_emd_dim = len(indices_shape)

    all_mesh_dim_strategies = []

    for mesh_dim in range(mesh.mesh.ndim):
        single_mesh_dim_strategies = []
        if mesh.mesh.shape[mesh_dim] <= 1:
            # only replicate strategy for mesh dim with size 1
            # TODO: see if this is valid for the submesh case
            continue
        # placement list stores placements of [output, weight, input_indices]
        # first we always have replicate all for inputs and output
        all_replicate: List[Placement] = [Replicate()] * 3
        single_mesh_dim_strategies.append(all_replicate)

        # colwise sharding, output shard on last dim, weight shard on dim 1, input replicate
        colwise_sharding = [Shard(output_emd_dim-1), Shard(1), Replicate()]
        single_mesh_dim_strategies.append(colwise_sharding)
        # TODO embedding的行切，就是megatron提出的sequence parallel

        '''
        # rowwise sharding, output is embedding partial, weight shard on dim 0, input accepts embedding partial
        embedding_partial_placement = _MaskPartial(logical_dim_size=weight_shape[0])

        # NOTE we want to reuse the same mask partial placement so that we can reuse the same mask that generates
        # from the input indices and use it for output reduction
        rowwise_sharding = [
            embedding_partial_placement,
            Shard(0),
            embedding_partial_placement,
        ]
        single_mesh_dim_strategies.append(rowwise_sharding)
        '''
        # batch dim sharding, weight replicated, input can shard on any dim, output follows input
        for input_dim in range(len(indices_shape)):
            batch_sharding = [Shard(input_dim), Replicate(), Shard(input_dim)]
            single_mesh_dim_strategies.append(batch_sharding)
        all_mesh_dim_strategies.append(single_mesh_dim_strategies)

    strategy_combs = itertools.product(*all_mesh_dim_strategies)
    # 4 
    all_strategies = []
    for strategy_comb in strategy_combs:
        spec_list = []
        for specs in zip(*strategy_comb):
            spec_list.append(DTensorSpec(mesh, tuple(specs)))
        strat = PlacementStrategy(
            output_specs=spec_list[0],
            input_specs=spec_list[1:],
        )
        all_strategies.append(strat)
    return OpStrategy(all_strategies)
