#需要封装的部分，从strategy_map转成pytorch可用
#1、转device_mesh
from geesibling.core.types import Graph, Node
from geesibling.core.lib._graph import DataType,DeviceType,Device,search_policy
from geesibling.tools import log
from torch.distributed.device_mesh import init_device_mesh
import numpy as np
from collections import defaultdict
from typing import Optional, Union, Tuple, Dict
from torch.fx.node import Node
# 2、placement转换函数
from torch.distributed._tensor.placement_types import (
    Placement,
    Replicate,
    Shard,
    TensorMeta,
    _Partial
)
import re
import os
from .StrategiesConstructor import StrategiesConstructor
from .solver.SolveSolution import solve_solution,solution_annotation_pass
from torch.distributed.device_mesh import init_device_mesh
from .graphToGeesiGraph import graphToGeeGraph
from torch.export import ExportedProgram
from torch.export.exported_program import ExportGraphSignature
from torch.fx import GraphModule
from functools import partial
import torch
import torch.nn as nn
from .shard.placement_types import PlacementStrategy as GeeStrategy
from .shard.placement_types import DeviceMesh as GeeMesh
def transform_placement(gee_placement):
    placements=()
    if isinstance(gee_placement,tuple):
        for placement in gee_placement:
            if 'Replicate' in placement.name:
                # return (Replicate(),)
                placements = placements+(Replicate(),)
            elif 'Shard' in placement.name:
                # return (Shard(gee_placement.dim),)
                placements = placements+(Shard(placement.dim),)
            elif '_Partial' in placement.name:
                # 默认了allreduce TODO
                # return (_Partial(),)
                # return (Replicate(),)   #替换为复制因为output没有P
                 placements = placements+(Replicate(),)
    else:
        if 'Replicate' in gee_placement.name:
            # return (Replicate(),)
            placements = placements+(Replicate(),)
        elif 'Shard' in gee_placement.name:
            # return (Shard(gee_placement.dim),)
            placements = placements+(Shard(gee_placement.dim),)
        elif '_Partial' in gee_placement.name:
            # 默认了allreduce TODO
            # return (_Partial(),)
            # return (Replicate(),)   #替换为复制因为output没有P
                placements = placements+(Replicate(),)
    return placements



#3、转策略为pytorch可识别
from torch.distributed._tensor.op_schema import (
    DTensorSpec,
    OpSchema,
    OutputSharding,
    OutputSpecType,
    PlacementStrategy,
)

def transform_strategy(gm,parallel_strategies,mesh)-> Dict[Node, PlacementStrategy]:
    # 1、转node；2、转策略
    node_str_map={}
    for node in gm.graph.nodes:
        if node.name in parallel_strategies:
            gee_p_str = parallel_strategies[node.name]
            #p_str{output_spec,input_specs,}
            input_specs = []
            if gee_p_str.input_specs:
                for gee_input_spec in gee_p_str.input_specs:
                    input_spec = DTensorSpec(mesh=mesh,placements=transform_placement(gee_input_spec.placements))
                    input_specs.append(input_spec)
            output_spec = DTensorSpec(mesh=mesh,placements=transform_placement(gee_p_str.output_specs.placements))
            p_str = PlacementStrategy(input_specs=input_specs,output_specs=output_spec)  
            # output_spec: DTensorSpec
            # input_specs: Optional[Sequence[DTensorSpec]] = None
            node_str_map[node] = p_str
    return node_str_map

from torch.distributed.tensor.parallel.style import (
    ParallelStyle,PrepareModuleInput
)
from torch.distributed._tensor import DeviceMesh, DTensor, Placement, Replicate, Shard, distribute_tensor, distribute_module
#TODO 为考虑placements为列表的情况，
#TODO 将列表转为list
class ShardParallel(ParallelStyle):
    """
    Modified from ColwiseParallel(ParallelStyle)
    Parameters:
    input_layouts: Optional[Placement]
    output_layouts: Optional[Placement]
    param_layouts:Dict[str,Optional[Placement]]
    """
    def __init__(
        self,
        *,
        input_layouts: Optional[Placement] = None,
        output_layouts: Optional[Placement] = None,
        use_local_output: bool = True,
        param_layouts:Dict[str,Optional[Placement]] = None,
    ):
        super().__init__()
        #self.input_layouts = input_layouts
        self.input_layouts = input_layouts or (Replicate(),)
        self.output_layouts = output_layouts or Replicate()
        #self.output_layouts = output_layouts
        self.use_local_output = use_local_output
        self.param_layouts = param_layouts


    @staticmethod
    def _prepare_input_fn(input_layouts, inputs, device_mesh):
        prepared_inputs = []
        if not isinstance(inputs, tuple):
            inputs = (inputs,)
        for inp, input_layout in zip(inputs, input_layouts):
            if input_layout is not None:
                if not isinstance(inp, DTensor):
                    input_tensor = DTensor.from_local(inp, device_mesh, [input_layout], run_check=False)
                    prepared_inputs.append(input_tensor)
        return tuple(prepared_inputs)

    def _partition_fn(self, name, module, device_mesh):
        for name, param in module.named_parameters():
            dist_param = nn.Parameter(
                distribute_tensor(param, device_mesh, [self.param_layouts[name]])
            )
            module.register_parameter(name, dist_param)


    @staticmethod
    def _prepare_output_fn(output_layouts, use_local_output, outputs, device_mesh):
        # outputs is a shard on last dimension DTensor, i.e. Shard(-1)
        # output也不止一个
        if isinstance(outputs,tuple):
            outputs_return=[]
            for output in outputs:
                if output.placements != output_layouts:
                    output = output.redistribute(placements=[output_layouts],async_op=True)
                    outputs_return.append(output.to_local())
                else:
                    outputs_return.append(output.to_local())
            return tuple(outputs_return) 
        else:
            if outputs.placements != output_layouts:
                outputs = outputs.redistribute(placements=[output_layouts],async_op=True)
            return outputs.to_local() if use_local_output else outputs

    def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
        return distribute_module(
            module,
            device_mesh,
            self._partition_fn,
            partial(self._prepare_input_fn, self.input_layouts),
            partial(self._prepare_output_fn, self.output_layouts, self.use_local_output),
        )

    def __str__(self):
        """返回对象的简洁字符串表示"""
        param_str = ""
        if self.param_layouts:
            param_items = [f"'{k}': {v}" for k, v in self.param_layouts.items()]
            param_str = "{" + ", ".join(param_items) + "}"

        return (f"ShardParallel("
                f"input={self.input_layouts}, "
                f"output={self.output_layouts}, "
                f"params={param_str})")


def TransformPlan(exported_program,strategy_map,mesh)-> Dict[str, ShardParallel]:
    """ 
    Convert the operator strategy to a module sharding strategy for pytorch
    Parameters: 
    exported_program:ExportedProgram
    strategy_map:Dict[str, PlacementStrategy]
    mesh:DeviceMesh
    Returns: 
    Dict[str, ShardParallel]
    """
    # parallelize_plan={module_name:ShardParallel()}
    node_str_map = transform_strategy(exported_program.graph_module,strategy_map,mesh)
    # param_node = {}
    parallelize_plan = {}
    module_param_node ={}
    module_input={}
    module_param = {}
    input_node = []
    from torch.export.graph_signature import InputKind
    for input_spec in exported_program.graph_signature.input_specs:
        if input_spec.kind==InputKind.PARAMETER or input_spec.kind==InputKind.BUFFER:    #就是参数
            module_param_node[input_spec.arg.name] = input_spec.target
        elif input_spec.kind==InputKind.USER_INPUT and hasattr(input_spec.arg, 'name'):
            input_node.append(input_spec.arg.name)
    # print(module_param_node)
    for node in exported_program.graph.nodes:
        if node not in node_str_map:
            continue
        if node.op == 'call_function':
            #识别function所属的模块
            nn_module_stack = node.meta['nn_module_stack']
            nn_module_key = next(reversed(nn_module_stack))
            nn_module = nn_module_stack[nn_module_key][0]   #c_fc
            if 'ln_' in nn_module or 'embed' in nn_module or 'layernorm' in nn_module or 'norm' in nn_module or 'wpe' in nn_module:
                continue
            # 是参数节点 
            if nn_module not in module_param:
                module_param[nn_module] = {}
            # print(node.name,nn_module)
            stratgy = node_str_map[node]
            i=0
            for pre_node in node.args: #为了找前驱是加载参数节点和input的spec
                if isinstance(pre_node,torch.fx.node.Node):
                    if pre_node.name in module_param_node:  #是加载参数的节点
                        module_param[nn_module][module_param_node[pre_node.name]] = stratgy.input_specs[i].placements[0]
                    else:#input
                        if pre_node.name in input_node:
                            if nn_module in module_input:
                                module_input[nn_module] = (module_input[nn_module][0],stratgy.input_specs[i].placements[0])
                            else:
                                module_input[nn_module] = (stratgy.input_specs[i].placements[0],)
                            if nn_module in parallelize_plan:
                                parallelize_plan[nn_module].input_layouts = module_input[nn_module]
                                # print("change input ",module_input[nn_module])
                        if nn_module not in module_input:
                            module_input[nn_module] = (stratgy.input_specs[i].placements[0],)
                            # 修改input
                            if nn_module in parallelize_plan:
                                parallelize_plan[nn_module].input_layouts = module_input[nn_module]
                                # print("change input ",module_input[nn_module])
                    if str(pre_node.target)== 'aten.arange.start_step':
                        input_node.append(node.name)
                        # print("input_node",input_node)
                    i+=1
            # print(module_input)
            if nn_module in parallelize_plan:
                # 修改output和param
                if isinstance(parallelize_plan[nn_module],ShardParallel):
                    parallelize_plan[nn_module].output_layouts = stratgy.output_spec.placements[0]
                    parallelize_plan[nn_module].param_layouts = module_param[nn_module]
                    # print(nn_module," change ")
                    # print('change param ',module_param[nn_module])
                    # print('change output ',stratgy.output_spec.placements[0])
            elif nn_module in module_param and nn_module in module_input and nn_module not in parallelize_plan:
                if module_param[nn_module] and module_input[nn_module]:
                    module_output = stratgy.output_spec.placements[0]
                    parallelize_plan[nn_module]=ShardParallel(input_layouts=module_input[nn_module],output_layouts=module_output,param_layouts=module_param[nn_module])
                    # print(nn_module,' add plan')
                    # print('input ',module_input[nn_module])
                    # print('param ',module_param[nn_module])
                    # print('output ',module_output)
            elif module_param[nn_module]:
                module_output = stratgy.output_spec.placements[0]
                parallelize_plan[nn_module]=ShardParallel(output_layouts=module_output,param_layouts=module_param[nn_module])
                # print(nn_module,' add plan only param')
                # print('param ',module_param[nn_module])
                # print('output ',module_output)
        else:
            #是参数节点，处理embedding切参数的情况
            if node.name in module_param_node:
                module_name = module_param_node[node.name].replace('.weight', '').replace('.bias', '')
                if module_name not in parallelize_plan and 'ln_' not in module_name and 'wpe' not in module_name:
                    stratgy = node_str_map[node]
                    # print(module_name,stratgy.output_spec.placements[0],"--------------")
                    parallelize_plan[module_name] = ShardParallel(param_layouts={module_param_node[node.name].split(".")[-1]:stratgy.output_spec.placements[0]})
 
    for key in list(parallelize_plan.keys()):
        if isinstance(parallelize_plan[key],ShardParallel):
            param_layouts = parallelize_plan[key].param_layouts
            new_param_layouts={}
            for param_key in param_layouts:
                new_param_layouts[param_key.split('.')[-1]] = param_layouts[param_key]
            # print(key,new_param_layouts)
            parallelize_plan[key].param_layouts = new_param_layouts
            #if 'weight' not in parallelize_plan[key].param_layouts and 'inv_freq' not in parallelize_plan[key].param_layouts:
            if 'weight' not in parallelize_plan[key].param_layouts:
                del parallelize_plan[key]
    return parallelize_plan

def has_module_param(node,module_param_node):
    for arg in node.args:
        if isinstance(arg,torch.fx.node.Node):
            if arg.name in module_param_node.keys():
                return True  #有参数要切
    return False

def GetProfile(model,inputs,geesiblingGraph):
    with torch.autograd.profiler.profile(enabled=True, use_cuda=True, record_shapes=False, profile_memory=True) as prof:
        output = model(**inputs)
    prof_results = prof.key_averages()
    op_stats = {}
    # 遍历分析结果，并将它们添加到字典中
    for event in prof_results:
        new_key = re.sub(r'^aten::', '', event.key)
        op_stats[new_key] = {
            'CPUtime': event.cpu_time,
            'CUDAtime': event.cuda_time,
            'CUDAmemory': event.cuda_memory_usage,
        }
    # dropout 和 native_layer_norm要特殊处理
    for node in geesiblingGraph.nodes:
        node_name = node.name.split('_')[0]
        # print(node_name)
        if node_name in op_stats:
            node.compute_cost = int(op_stats[node_name]['CUDAtime'])*100
            node.temporary_memory = op_stats[node_name]['CUDAmemory']
        elif node.name == 'native_layer_norm':
            node.compute_cost = int(op_stats['native_layer_norm']['CUDAtime'])*100
            node.temporary_memory = op_stats['native_layer_norm']['CUDAmemory']
        elif 'droupout' in node.name:
            node_name = node.name.split('_')[1]
            node.compute_cost = int(op_stats[node_name]['CUDAtime'])*100
            node.temporary_memory = op_stats[node_name]['CUDAmemory']

def SearchTensorParallel(geesiblingGraph:Graph,mesh)->Dict[str, PlacementStrategy]:
    """
    Search to get the best tensor parallel strategy
    Parameters: 
    geesiblingGraph:Geesibling Graph 
    Returns: 
    Dict[str, PlacementStrategy]
    """
    world_size = mesh.size()   #test TODO
    # device_mesh = GeeMesh(device_type ='cuda',mesh = np.array([np.arange(world_size),]))
    device_mesh = GeeMesh(device_type ='cuda',mesh = np.array([[0,1,2,3],]))
    strategies_constructor = StrategiesConstructor(geesiblingGraph,device_mesh)
    # # 得到每个node的所有切分策略
    # dict{node_name : PlacementStrategy}
    strategy_map = strategies_constructor.build_strategies_and_cost()
    # print(f"strategy_map is {strategy_map}")

    # # TODO 搜一个最佳策略
    solution = solve_solution(geesiblingGraph, strategies_constructor)
    best_strategy_map = solution_annotation_pass(solution,strategies_constructor)
    return best_strategy_map

def getExportedProgram(model:torch.nn.Module,inputs:Tuple)->ExportedProgram:
    """ 
    use torch.export.export to get the pytorch graph of model  
    Parameters: 
    model:torch.nn.Module 
    inputs:Tuple
    Returns: 
    ExportedProgram
    """
    with torch.no_grad():
        exported_program = torch.export.export(
            model,
            args=(),
            kwargs=inputs
        ).run_decompositions()
    return exported_program

def getGeeGraph(exported_program:ExportedProgram)->Graph:
    """ 
    Parameters: 
    exported_program:ExportedProgram 
    Returns: 
    Geesibling Graph
    """
    geesiblingGraph = graphToGeeGraph(exported_program)

    return geesiblingGraph

def TPPolicy(model,tokenizer,mesh,pre = True,add_dim = None):
    text = "Replace me by any text you'd like."
    encoded_input = tokenizer(text, return_tensors='pt')
    input_size =encoded_input['input_ids']
    if not pre:
        input_size = input_size.unsqueeze(-1)
        new_shape = list(input_size.shape)
        new_shape[-1] = add_dim
        input_size = input_size.expand(new_shape)
        input_size = input_size.to(torch.float32)
        inputs ={'inputs_embeds': input_size}
    else:
        inputs ={'input_ids':input_size,'attention_mask':encoded_input['attention_mask']}

    exported_program = getExportedProgram(model,inputs)    # pass
    # log_file = "exported_fx_graph.log"
    # with open(log_file, "w") as log:
    #     log.write("======= Exported FX Graph =======\n")
    #     log.write(str(exported_program.graph))  # 打印计算图
    #     log.write("\n======= End of Graph =======\n")
    # print(f"Exported FX graph logged to {os.path.abspath(log_file)}")
    # # print(exported_program)
    geesiblingGraph = getGeeGraph(exported_program) #pass
    # log_file_geesG = "exported_fx_Gees_graph.log"
    # with open(log_file_geesG, "w") as log:
    #     log.write("======= Exported FX Graph =======\n")
    #     log.write(str(geesiblingGraph))  # 打印计算图
    #     log.write("\n======= End of Graph =======\n")
    # print(f"Exported FX graph logged to {os.path.abspath(log_file_geesG)}")
    GetProfile(model,inputs,geesiblingGraph)
    strategy_map = SearchTensorParallel(geesiblingGraph,mesh)  #TODO
    # log_file_strategy_map = "strategy_map.log"
    # with open(log_file_strategy_map, "w") as log:
    #     log.write("======= Exported map =======\n")
    #     # log.write(str(strategy_map))  # 打印计算图
    #     for key, value in strategy_map.items():
    #         log.write(f"{key}: {value}\n")  # 按行写入每个策略的键和值
    #     log.write("\n======= End of strategy_map =======\n")
    # print(f"Exported FX graph logged to {os.path.abspath(log_file_strategy_map)}")
    parallelize_plan = TransformPlan(exported_program,strategy_map,mesh)
    return parallelize_plan
