from torch.distributed._tensor.placement_types import (
    Placement,
    Replicate,
    Shard,
    TensorMeta,
    _Partial
)
from torch.distributed._tensor.op_schema import (
    DTensorSpec,
    OpSchema,
    OutputSharding,
    OutputSpecType,
    PlacementStrategy,
)
from torch.fx.node import Node
from typing import Dict
from .shard.placement_types import PlacementStrategy as GeeStrategy
# 1、placement转换函数
def transform_placement(gee_placement:Dict[str, GeeStrategy]):
    if 'Replicate' in gee_placement.name:
        return (Replicate(),)
    elif 'Shard' in gee_placement.name:
        return (Shard(gee_placement.dim),)
    elif '_Partial' in gee_placement.name:
        # 默认了allreduce TODO
        return (_Partial(),)

# 2、转换策略为pytorch策略
def transform_strategy(gm,parallel_strategies,mesh)-> Dict[Node, PlacementStrategy]:
    # 1、转node；2、转策略
    node_str_map={}
    for node in gm.graph.nodes:
        if node.name in parallel_strategies:
            gee_p_str = parallel_strategies[node.name]
            #p_str{output_spec,input_specs,}
            input_specs = []
            if gee_p_str.input_specs:
                for gee_input_spec in gee_p_str.input_specs:
                    input_spec = DTensorSpec(mesh=mesh,placements=transform_placement(gee_input_spec.placements))
                    input_specs.append(input_spec)
            output_spec = DTensorSpec(mesh=mesh,placements=transform_placement(gee_p_str.output_spec.placements))
            p_str = PlacementStrategy(input_specs=input_specs,output_specs=output_spec)
            node_str_map[node] = p_str
    return node_str_map
