# Copyright (c) 2021-2023, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import datetime
import os
import torch
import einops

name_mapping = {
    'embedding': 'embeddings.word_embeddings.weight',
    'input_layernorm.weight': 'norm1.weight',
    'self_attention.query_key_value.weight': 'mixer.Wqkv.weight',
    'self_attention.dense.weight': 'mixer.out_proj.weight',
    'post_attention_layernorm.weight': 'norm2.weight',
    'mlp.dense_h_to_4h.weight': 'mlp.fc1.weight',
    'mlp.dense_4h_to_h.weight': 'mlp.fc2.weight',
    'final_layernorm.weight': 'ln_f.weight',
    'output_layer': 'lm_head.weight',
}


def convert_qkv(Wqkv, args, bqkv=None):
    # Megatron stores Wqkv as ((nheads 3 headdim), hidden_dim)
    # while we store Wqkv as ((3 nheads headdim), hidden_dim)
    hidden_dim = args.hidden_size
    Wqkv_new, bqkv_new = None, None
    headdim = args.hidden_size // args.num_attention_heads
    Wqkv_new = einops.rearrange(Wqkv, '(three nheads headdim) ... -> (nheads three headdim) ...', three=3, headdim=headdim)

    # Permute Wqkv in advance for Megatron
    # Megatron not support rotary_interleaved=True
    if args.rotary_interleaved_patch:
        def permute(w):
            return w.view(args.num_attention_heads, 3, headdim // 2, 2, hidden_dim).transpose(3, 2).reshape(hidden_dim*3, hidden_dim)
        Wqkv_new = permute(Wqkv_new)

    if bqkv is not None:
        bqkv_new = einops.rearrange(bqkv, '(three nheads headdim) -> (nheads three headdim)', three=3, headdim=headdim) 
    return Wqkv_new, bqkv_new


def convert_fc1(Wfc1, args):
    split_size = Wfc1.size()[0]//2
    W1, W2= torch.split(Wfc1, split_size)
    Wfc1_new = torch.cat((W2, W1))
    return Wfc1_new


def convert_checkpoint(src_state_dict, dst_state_dict_path, args):
    src_state_dict_names = list(src_state_dict.keys())
    dst_state_dict = {'args': None,
                      'checkpoint_version': 3.0,
                      'iteration': 0,
                      'model': None,
                      'optimizer': None,
                      'opt_param_scheduler': None,
                      'rng_state': None
                      }
    before_padding_embedding_weight = src_state_dict['transformer.' + name_mapping['embedding']]
    after_padding_embedding_weight = torch.zeros(100096, args.hidden_size) 
    after_padding_embedding_weight[:100008, :] = before_padding_embedding_weight
    # embedding_weight_state_dict = {
    #     'weight': src_state_dict['transformer.' + name_mapping['embedding']]}
    embedding_weight_state_dict = {
        'weight': after_padding_embedding_weight}
    src_state_dict_names.remove('transformer.' + name_mapping['embedding'])
    embedding_state_dict = {'word_embeddings': embedding_weight_state_dict}

    encoder_state_dict = {}
    for layer_num in range(args.num_layers):
        dst_prefix = 'layers.' + str(layer_num) + "."
        dst_norm1_name = "input_layernorm.weight"
        dst_qkv_name = "self_attention.query_key_value.weight"
        dst_proj_name = "self_attention.dense.weight"
        dst_norm2_name = "post_attention_layernorm.weight"
        dst_fc1_name = "mlp.dense_h_to_4h.weight"
        dst_fc2_name = "mlp.dense_4h_to_h.weight"

        src_prefix = 'transformer.layers.' + str(layer_num) + "."

        encoder_state_dict[dst_prefix +
                           dst_norm1_name] = src_state_dict[src_prefix + name_mapping[dst_norm1_name]]
        src_state_dict_names.remove(src_prefix + name_mapping[dst_norm1_name])

        encoder_state_dict[dst_prefix +
                           dst_qkv_name], _ = convert_qkv(src_state_dict[src_prefix + name_mapping[dst_qkv_name]], args)
        src_state_dict_names.remove(src_prefix + name_mapping[dst_qkv_name])

        encoder_state_dict[dst_prefix +
                           dst_proj_name] = src_state_dict[src_prefix + name_mapping[dst_proj_name]]
        src_state_dict_names.remove(src_prefix + name_mapping[dst_proj_name])

        encoder_state_dict[dst_prefix +
                           dst_norm2_name] = src_state_dict[src_prefix + name_mapping[dst_norm2_name]]
        src_state_dict_names.remove(src_prefix + name_mapping[dst_norm2_name])

        encoder_state_dict[dst_prefix +
                           dst_fc1_name] = convert_fc1(src_state_dict[src_prefix + name_mapping[dst_fc1_name]], args)
        src_state_dict_names.remove(src_prefix + name_mapping[dst_fc1_name])

        encoder_state_dict[dst_prefix +
                           dst_fc2_name] = src_state_dict[src_prefix + name_mapping[dst_fc2_name]]
        src_state_dict_names.remove(src_prefix + name_mapping[dst_fc2_name])

        print(f"[INFO] Layer {layer_num} is converted.")

    encoder_state_dict['final_layernorm.weight'] = src_state_dict['transformer.' +
                                                             name_mapping['final_layernorm.weight']]
    src_state_dict_names.remove('transformer.' + name_mapping['final_layernorm.weight'])

    before_padding_output_layer_weight = src_state_dict[name_mapping['output_layer']]
    after_padding_output_layer_weight = torch.zeros(100096, args.hidden_size) 
    after_padding_output_layer_weight[:100008, :] = before_padding_output_layer_weight
    # output_layer_weight_state_dict = {
    #     'weight': src_state_dict[name_mapping['output_layer']]}
    output_layer_weight_state_dict = {
        'weight': after_padding_output_layer_weight}
    src_state_dict_names.remove(name_mapping['output_layer'])

    language_model_state_dict = {
        'embedding': embedding_state_dict, 'encoder': encoder_state_dict, 'output_layer': output_layer_weight_state_dict}
    model_state_dict = {'language_model': language_model_state_dict}
    dst_state_dict['model'] = model_state_dict
    if len(src_state_dict_names) > 0:
        print(f"[WARNING] The following parameters are not converted: {src_state_dict_names}")

    torch.save(dst_state_dict, dst_state_dict_path)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input-dir", "-input_dir", "-i",
                        help="folder name of input files", required=True)
    parser.add_argument("--output-dir", "-output_dir", "-o",
                        help="folder name of output files", required=True)
    parser.add_argument(
        "--num-layers",
        type=int,
        help="The number of transformer layers"
    )
    parser.add_argument(
        "--hidden-size",
        type=int,
        help="The number of hidden size"
    )
    parser.add_argument(
        "--num-attention-heads",
        type=int,
        help="The number of attention heads"
    )
    parser.add_argument(
        "--data-type", "-data_type", "-d", choices=["fp32", "fp16"], default="fp32", help=" data type of the parameters"
    )
    parser.add_argument(
        '--rotary-interleaved-patch', action='store_true',
        help='Patch for loading models using interleaved rotary position embeddings.')

    args = parser.parse_args()
    print("\n=============== Argument ===============")
    for key in vars(args):
        print(f"{key}: {vars(args)[key]}")
    print("========================================")

    src_state_dict_path = os.path.join(args.input_dir, "pytorch_model.bin")
    src_state_dict = torch.load(src_state_dict_path, map_location="cpu")
    dst_state_dict_path = os.path.join(args.output_dir, "model_optim_rng.pt")
    start_time = datetime.datetime.now()
    convert_checkpoint(src_state_dict, dst_state_dict_path, args)
    run_time = datetime.datetime.now() - start_time
    print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")


if __name__ == "__main__":
    main()
