import torch

head_num = 64
group_query_attention = 8


def gather_from_tp(src_data: dict, layer_num: int, tp_rank_list: list, ep_rank: int, param_name: str, dim):
    return torch.cat([src_data[(layer_num, tp_rank, ep_rank)][param_name] for tp_rank in tp_rank_list], dim=dim)


def gather_gqa(src_data: dict, layer_num: int, tp_rank_list: list, ep_rank: int, param_name: str):
    query_num = head_num // group_query_attention
    dim_ph = src_data[(layer_num, tp_rank_list[0], ep_rank)][param_name].shape[0] // (query_num + 2)
    tmp_dict = {}
    for tp_rank in tp_rank_list:
        # [1408, 9216] -> [1024, 9216], [128, 9216], [128, 9216]
        tp_param = src_data[(layer_num, tp_rank, ep_rank)][param_name]
        tmp_dict[(layer_num, tp_rank, ep_rank)] = {}
        tmp_dict[(layer_num, tp_rank, ep_rank)]["q"] = tp_param[:dim_ph * query_num, :]
        tmp_dict[(layer_num, tp_rank, ep_rank)]["k"] = tp_param[dim_ph * query_num: dim_ph * (query_num + 1), :]
        tmp_dict[(layer_num, tp_rank, ep_rank)]["v"] = tp_param[dim_ph * (query_num + 1):, :]
    # [1024, 9216], [128, 9216], [128, 9216] -> [9126, 9216], [1024, 9216], [1024, 9216]
    q = gather_from_tp(tmp_dict, layer_num, tp_rank_list, ep_rank, "q", 0)
    k = gather_from_tp(tmp_dict, layer_num, tp_rank_list, ep_rank, "k", 0)
    v = gather_from_tp(tmp_dict, layer_num, tp_rank_list, ep_rank, "v", 0)
    # [8192, 9216], [1024, 9216], [1024, 9216] -> [11264, 9216]
    return torch.cat([q, k, v], dim=0)


def gather_gmm(src_data: dict, layer_num: int, tp_rank_list: list, ep_rank_list: list, param_name: str, dim: int):
    ep_dict = {}
    for ep_rank in ep_rank_list:
        # dim = 0: [36864, 9216] -> [8 * 36864, 9216]
        # dim = 1: [9216, 73728] -> [9216, 8 * 73728]
        ep_dict[ep_rank] = gather_from_tp(src_data, layer_num, tp_rank_list, ep_rank, param_name, dim)
    # dim = 0: [8 * 36864, 9216] -> [2 * 8 * 36864, 9216]
    # dim = 1: [9216, 8 * 73728] -> [9216, 2 * 8 * 73728]
    return torch.cat([ep_dict[ep_rank] for ep_rank in ep_rank_list], dim=dim)


def merge_dst_data(src_data: dict,
                   layer_nums: list,
                   tp_rank_list: list,
                   ep_rank_list: list):
    """
    --hidden-size 9216 \
    --ffn-hidden-size 4608 \
    --num-attention-heads 72 \
    --num-query-groups 8 \

    embedding.word_embeddings.weight torch.Size([15872, 9216])
    decoder.layers.0.input_layernorm.weight torch.Size([9216])
    decoder.layers.0.self_attention.linear_proj.weight torch.Size([9216, 1152])
    decoder.layers.0.self_attention.linear_qkv.weight torch.Size([1408, 9216])
    decoder.layers.0.pre_mlp_layernorm.weight torch.Size([9216])
    decoder.layers.0.mlp.router.weight torch.Size([128, 9216])
    decoder.layers.0.mlp.experts.weight1 torch.Size([9216, 73728])
    decoder.layers.0.mlp.experts.weight2 torch.Size([36864, 9216])
    decoder.layers.0.mlp.shared_experts.linear_fc1.weight torch.Size([1152, 9216])
    decoder.layers.0.mlp.shared_experts.linear_fc2.weight torch.Size([9216, 576])
    decoder.final_layernorm.weight torch.Size([9216])
    output_layer.weight torch.Size([15872, 9216])
    """
    layer_nums = sorted(layer_nums)
    dst_dict = {i: dict() for i in layer_nums}

    if 0 in layer_nums:
        # [15872, 9216] -> [126976, 9216]
        dst_dict[0]["embedding.word_embeddings.weight"] = gather_from_tp(
            src_data, 0, tp_rank_list, ep_rank_list[0], "embedding.word_embeddings.weight", 0)

    last_layer_num = layer_nums[-1]
    if "decoder.final_layernorm.weight" in src_data[(last_layer_num, tp_rank_list[0], ep_rank_list[0])]:
        # [9216]
        dst_dict[last_layer_num]["decoder.final_layernorm.weight"] = (
            src_data[(last_layer_num, tp_rank_list[0], ep_rank_list[0])]["decoder.final_layernorm.weight"])
        # [15872, 9216] -> [126976, 9216]
        dst_dict[last_layer_num]["output_layer.weight"] = gather_from_tp(
            src_data, last_layer_num, tp_rank_list, ep_rank_list[0], "output_layer.weight", 0)

    for i in layer_nums:
        # [9216]
        dst_dict[i][f"decoder.layers.{i}.input_layernorm.weight"] = (
            src_data[(i, tp_rank_list[0], ep_rank_list[0])][f"decoder.layers.{i}.input_layernorm.weight"])

        # [1408, 9216] -> [11264, 9216]
        dst_dict[i][f"decoder.layers.{i}.self_attention.linear_qkv.weight"] = gather_gqa(
            src_data, i, tp_rank_list, ep_rank_list[0], f"decoder.layers.{i}.self_attention.linear_qkv.weight")

        # [9216, 1152] -> [9216, 9216]
        dst_dict[i][f"decoder.layers.{i}.self_attention.linear_proj.weight"] = gather_from_tp(
            src_data, i, tp_rank_list, ep_rank_list[0], f"decoder.layers.{i}.self_attention.linear_proj.weight", 1)

        # [9216]
        dst_dict[i][f"decoder.layers.{i}.pre_mlp_layernorm.weight"] = (
            src_data[(i, tp_rank_list[0], ep_rank_list[0])][f"decoder.layers.{i}.pre_mlp_layernorm.weight"])

        # [128, 9216]
        dst_dict[i][f"decoder.layers.{i}.mlp.router.weight"] = (
            src_data[(i, tp_rank_list[0], ep_rank_list[0])][f"decoder.layers.{i}.mlp.router.weight"])

        # [1152, 9216] -> [9216, 9216]
        dst_dict[i][f"decoder.layers.{i}.mlp.shared_experts.linear_fc1.weight"] = gather_from_tp(
            src_data, i, tp_rank_list, ep_rank_list[0], f"decoder.layers.{i}.mlp.shared_experts.linear_fc1.weight", 0)

        # [9216, 576] -> [9216, 1152]
        dst_dict[i][f"decoder.layers.{i}.mlp.shared_experts.linear_fc2.weight"] = gather_from_tp(
            src_data, i, tp_rank_list, ep_rank_list[0], f"decoder.layers.{i}.mlp.shared_experts.linear_fc2.weight", 1)

        # [9216, 73728] -> [9216, 2 * 8 * 73728]
        dst_dict[i][f"decoder.layers.{i}.mlp.experts.weight1"] = gather_gmm(
            src_data, i, tp_rank_list, ep_rank_list, f"decoder.layers.{i}.mlp.experts.weight1", 1)

        # [36864, 9216] -> [2 * 8 * 36864, 9216]
        dst_dict[i][f"decoder.layers.{i}.mlp.experts.weight2"] = gather_gmm(
            src_data, i, tp_rank_list, ep_rank_list, f"decoder.layers.{i}.mlp.experts.weight2", 0)

    return dst_dict
