# Copyright (c) 2021-2023, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import datetime
import os
import torch
import einops

name_mapping = {
    'embedding': 'embeddings.word_embeddings.weight',
    'input_layernorm.weight': 'norm1.weight',
    'self_attention.query_key_value.weight': 'mixer.Wqkv.weight',
    'self_attention.dense.weight': 'mixer.out_proj.weight',
    'post_attention_layernorm.weight': 'norm2.weight',
    'mlp.dense_h_to_4h.weight': 'mlp.fc1.weight',
    'mlp.dense_4h_to_h.weight': 'mlp.fc2.weight',
    'final_layernorm.weight': 'ln_f.weight',
    'output_layer': 'lm_head.weight',
}


def convert_qkv(Wqkv, args, bqkv=None):
    # Megatron stores Wqkv as ((nheads 3 headdim), hidden_dim)
    # while we store Wqkv as ((3 nheads headdim), hidden_dim)
    hidden_dim = args.hidden_size
    Wqkv_new, bqkv_new = None, None
    headdim = args.hidden_size // args.num_attention_heads
    nheads = args.num_attention_heads

    # GQA
    if args.group_query_attention:
        num_query_groups = args.num_query_groups
        tmp =  nheads // num_query_groups
        new_tensor_shape = (num_query_groups,
                            tmp + 2,
                            headdim,
                            Wqkv.size()[-1])
        wq = Wqkv.view(new_tensor_shape)[:, 0:tmp        :, :]
        wk = Wqkv.view(new_tensor_shape)[:, tmp:tmp+1    :, :]
        wv = Wqkv.view(new_tensor_shape)[:, tmp+1:tmp+2, :, :]

        wk = wk.repeat_interleave(tmp, dim=1)
        wv = wv.repeat_interleave(tmp, dim=1)

        wq = wq.contiguous().view([nheads, 1, headdim, hidden_dim])
        wk = wk.contiguous().view([nheads, 1, headdim, hidden_dim])
        wv = wv.contiguous().view([nheads, 1, headdim, hidden_dim])
        Wqkv = torch.cat((wq, wk, wv), dim=1)

    # Permute Wqkv in advance for Megatron
    # Megatron not support rotary_interleaved=True
    if args.rotary_interleaved_patch:
        def permute(w):
            return w.view(args.num_attention_heads, 3, 2, headdim // 2, hidden_dim).transpose(3, 2).reshape(hidden_dim*3, hidden_dim)
        Wqkv_new = permute(Wqkv)

    Wqkv_new = einops.rearrange(Wqkv_new, '(nheads three headdim) ... -> (three nheads headdim) ...', three=3, headdim=headdim)

    if bqkv is not None:
        bqkv_new = einops.rearrange(bqkv, '(three nheads headdim) -> (nheads three headdim)', three=3, headdim=headdim) 
    return Wqkv_new, bqkv_new


def convert_fc1(Wfc1, args):
    split_size = Wfc1.size()[0]//2
    W1, W2= torch.split(Wfc1, split_size)
    Wfc1_new = torch.cat((W2, W1))
    return Wfc1_new


def convert_checkpoint(src_state_dict, dst_state_dict_path, args):
    params_dtype = torch.float
    if args.data_type == 'bf16':
        params_dtype = torch.bfloat16
    elif args.data_type == 'fp16':
        params_dtype = torch.half

    src_state_dict_names = list(src_state_dict.keys())

    model_state_dict = src_state_dict['model']
    language_model_state_dict = model_state_dict['language_model']
    embedding_state_dict = language_model_state_dict['embedding']
    encoder_state_dict = language_model_state_dict['encoder']

    dst_state_dict = dict()

    embedding_weight_state_dict = embedding_state_dict['word_embeddings']['weight'][:args.true_vocab_size,:]
    dst_state_dict['transformer.' + name_mapping['embedding']] = embedding_weight_state_dict.to(params_dtype)
    del embedding_state_dict['word_embeddings']['weight']

    for layer_num in range(args.num_layers):
        src_prefix = 'layers.' + str(layer_num) + "."
        src_norm1_name = "input_layernorm.weight"
        src_qkv_name = "self_attention.query_key_value.weight"
        src_proj_name = "self_attention.dense.weight"
        src_norm2_name = "post_attention_layernorm.weight"
        src_fc1_name = "mlp.dense_h_to_4h.weight"
        src_fc2_name = "mlp.dense_4h_to_h.weight"

        dst_prefix = 'transformer.layers.' + str(layer_num) + "."

        dst_state_dict[dst_prefix + name_mapping[src_norm1_name]] = \
            encoder_state_dict[src_prefix + src_norm1_name]
        del encoder_state_dict[src_prefix + src_norm1_name]

        dst_state_dict[dst_prefix + name_mapping[src_qkv_name]], _ = \
            convert_qkv(encoder_state_dict[src_prefix + src_qkv_name], args)
        dst_state_dict[dst_prefix + name_mapping[src_qkv_name]].to(params_dtype)
        del encoder_state_dict[src_prefix + src_qkv_name]

        dst_state_dict[dst_prefix + name_mapping[src_proj_name]] = \
            encoder_state_dict[src_prefix + src_proj_name].to(params_dtype)
        del encoder_state_dict[src_prefix + src_proj_name]

        dst_state_dict[dst_prefix + name_mapping[src_norm2_name]] = \
            encoder_state_dict[src_prefix + src_norm2_name]
        del encoder_state_dict[src_prefix + src_norm2_name]

        dst_state_dict[dst_prefix + name_mapping[src_fc1_name]] = \
            convert_fc1(encoder_state_dict[src_prefix + src_fc1_name], args).to(params_dtype)
        del encoder_state_dict[src_prefix + src_fc1_name]

        dst_state_dict[dst_prefix + name_mapping[src_fc2_name]] = \
            encoder_state_dict[src_prefix + src_fc2_name].to(params_dtype)
        del encoder_state_dict[src_prefix + src_fc2_name]

        inv_freq = torch.load('examples/aquila/files/aquila_rotary_emb_inv_freq.pt')
        dst_state_dict[dst_prefix + 'mixer.rotary_emb.inv_freq'] = inv_freq

        print(f"[INFO] Layer {layer_num} is converted.")

    dst_state_dict['transformer.' + name_mapping['final_layernorm.weight']] = \
        encoder_state_dict['final_layernorm.weight']
    del encoder_state_dict['final_layernorm.weight']

    dst_state_dict[name_mapping['output_layer']] = \
        language_model_state_dict['output_layer']['weight'][:args.true_vocab_size,:].to(params_dtype)
    del language_model_state_dict['output_layer']

    print(f"[WARNING] The following parameters are not converted: {embedding_state_dict}")
    print(f"[WARNING] The following parameters are not converted: {encoder_state_dict}")

    torch.save(dst_state_dict, dst_state_dict_path)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input-dir", "-input_dir", "-i",
                        help="folder name of input files", required=True)
    parser.add_argument("--output-dir", "-output_dir", "-o",
                        help="folder name of output files", required=True)
    parser.add_argument(
        "--num-layers",
        type=int,
        help="The number of transformer layers"
    )
    parser.add_argument(
        "--hidden-size",
        type=int,
        help="The number of hidden size"
    )
    parser.add_argument(
        "--num-attention-heads",
        type=int,
        help="The number of attention heads"
    )
    parser.add_argument(
        '--group-query-attention',
        action='store_true',
        help='Use group-query attention.'
    )
    parser.add_argument(
        '--num-query-groups',
        type=int,
        default=1
    )
    parser.add_argument(
        "--data-type", "-data_type", "-d",
        choices=["bf16", "fp32", "fp16"],
        default="fp32", help=" data type of the parameters"
    )
    parser.add_argument(
        '--rotary-interleaved-patch', action='store_true',
        help='Patch for loading models using interleaved rotary position embeddings.'
    )
    parser.add_argument(
        '--true-vocab-size', type=int, default=None,
        help='original size of vocab, if specified will trim padding from embedding table.'
    )

    args = parser.parse_args()
    print("\n=============== Argument ===============")
    for key in vars(args):
        print(f"{key}: {vars(args)[key]}")
    print("========================================")

    src_state_dict_path = os.path.join(args.input_dir, "model_optim_rng.pt")
    src_state_dict = torch.load(src_state_dict_path, map_location="cpu")
    dst_state_dict_path = os.path.join(args.output_dir, "pytorch_model.bin")
    start_time = datetime.datetime.now()
    convert_checkpoint(src_state_dict, dst_state_dict_path, args)
    run_time = datetime.datetime.now() - start_time
    print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")


if __name__ == "__main__":
    main()
