"""
This script is based on Megatron's checkpoint_loader_megatron.py, but it only load the model ckpt without building the model.
"""
import json
import os
import sys
import types

import torch
from checkpoint_util_lite import (
    get_model_ckpt_paths,
    get_num_layers_from_ckpt,
)


def add_arguments(parser):
    group = parser.add_argument_group(title="Megatron loader")

    group.add_argument(
        "--true-vocab-size",
        type=int,
        default=None,
        help="original size of vocab, if specified will trim padding from embedding table.",
    )
    group.add_argument(
        "--vocab-file",
        type=str,
        default=None,
        help="Path to the vocab file. If specified will use this to get vocab size and "
        "trim padding from the embedding table.",
    )
    group.add_argument(
        "--megatron-path",
        type=str,
        default=None,
        help="Base directory of deepspeed repository",
    )


def _load_checkpoint(queue, args):
    # Search in directory above this
    sys.path.append(
        os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
    )
    if args.megatron_path is not None:
        sys.path.insert(0, args.megatron_path)

    try:
        from megatron.arguments import parse_args, validate_args, _print_args
        from megatron.global_vars import set_args, set_global_variables
        from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint
        from megatron.model import module
        from megatron.core import mpu
        from megatron.core.enums import ModelType
        from megatron import fused_kernels
    except ModuleNotFoundError:
        print(
            "Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting."
        )
        queue.put("exit")
        exit(1)

    # We want all arguments to come from us
    sys.argv = [
        "script.py",
        "--no-masked-softmax-fusion",
        "--no-bias-gelu-fusion",
        "--no-bias-dropout-fusion",
        "--no-async-tensor-model-parallel-allreduce",
        "--use-cpu-initialization",
        "--micro-batch-size",
        "1",
        "--no-load-optim",
        "--no-load-rng",
        "--no-save-optim",
        "--no-save-rng",
        "--no-initialization",
        "--load",
        args.load_dir,
    ]

    margs = parse_args()
    margs, checkpoint_args = load_args_from_checkpoint(margs)

    # Arguments do sanity checks on the world size, but we don't care,
    # so trick it into thinking we are plenty of processes
    margs.world_size = (
        margs.tensor_model_parallel_size * margs.pipeline_model_parallel_size
    )

    margs = validate_args(margs)

    def check_for_arg(arg_name, default=None):
        if getattr(margs, arg_name, None) is None:
            if default is not None:
                setattr(margs, arg_name, default)
            else:
                print(f"Checkpoint does not specify the argument {arg_name}. Exiting.")
                print(f"Arguments: {margs}")
                queue.put("exit")
                exit(1)

    check_for_arg("tensor_model_parallel_size")
    check_for_arg("pipeline_model_parallel_size")
    check_for_arg("num_layers")
    check_for_arg("hidden_size")
    check_for_arg("seq_length")
    check_for_arg("num_attention_heads")
    check_for_arg("max_position_embeddings")
    check_for_arg("position_embedding_type")
    check_for_arg("tokenizer_type")
    check_for_arg("iteration")
    check_for_arg("bert_binary_head")
    check_for_arg("disable_bias_linear", False)
    check_for_arg("params_dtype")
    check_for_arg("swiglu", False)

    # Determine how to make our models
    if args.model_type == "GPT":
        # from pretrain_gpt import model_provider
        margs.model_type = ModelType.encoder_or_decoder
    else:
        raise Exception(f"unrecognized model type: {args.model_type}")
    
    set_global_variables(margs, build_tokenizer=False)

    # Get true (non-padded) vocab size
    if args.true_vocab_size is not None:
        true_vocab_size = args.true_vocab_size
    elif args.vocab_file is not None:
        vocab = json.load(open(args.vocab_file))
        true_vocab_size = len(vocab)
        if args.true_vocab_size is not None and true_vocab_size != args.true_vocab_size:
            print(
                "Both --true-vocab-size and --vocab-file specified and the vocab size does not match, aborting."
            )
            queue.put("exit")
            exit(1)
    else:
        true_vocab_size = None

    # short aliases
    tp_size = margs.tensor_model_parallel_size
    pp_size = margs.pipeline_model_parallel_size
    vp_size = margs.virtual_pipeline_model_parallel_size
    if vp_size is None:
        vp_size = 1

    # metadata
    md = types.SimpleNamespace()
    md.load = margs.load
    md.model_type = args.model_type
    md.num_layers = margs.num_layers
    md.hidden_size = margs.hidden_size
    md.seq_length = margs.seq_length
    md.num_attention_heads = margs.num_attention_heads
    md.max_position_embeddings = margs.max_position_embeddings
    md.tokenizer_type = margs.tokenizer_type
    md.iteration = margs.iteration
    md.params_dtype = margs.params_dtype
    md.bert_binary_head = margs.bert_binary_head
    md.output_layer = margs.untie_embeddings_and_output_weights
    md.position_embedding_type = margs.position_embedding_type
    md.linear_bias = margs.add_bias_linear
    md.swiglu = margs.swiglu
    md.previous_tensor_parallel_size = margs.tensor_model_parallel_size
    md.previous_pipeline_parallel_size = margs.pipeline_model_parallel_size
    md.true_vocab_size = true_vocab_size
    md.make_vocab_size_divisible_by = margs.make_vocab_size_divisible_by
    md.checkpoint_args = checkpoint_args
    md.apply_layernorm_rms = margs.apply_layernorm_rms

    consumed_train_samples = None
    consumed_valid_samples = None
    model_ckpt_paths = get_model_ckpt_paths(md.load, tp_size, pp_size, md.iteration)
    model_ckpts = [[None for _ in range(pp_size)] for _ in range(tp_size)]
    def get_model_ckpt(model_ckpts, model_ckpt_paths, tp_rank, pp_rank):
        nonlocal consumed_train_samples
        nonlocal consumed_valid_samples
        if model_ckpts[tp_rank][pp_rank] is None:
            model_ckpt_path = model_ckpt_paths[tp_rank][pp_rank]
            model_ckpts[tp_rank][pp_rank] = torch.load(
                model_ckpt_path, map_location="cpu"
            )
            if consumed_train_samples is not None:
                assert model_ckpts[tp_rank][pp_rank]["args"].consumed_train_samples == consumed_train_samples
            else:
                consumed_train_samples = model_ckpts[tp_rank][pp_rank]["args"].consumed_train_samples
            if consumed_valid_samples is not None:
                assert model_ckpts[tp_rank][pp_rank]["args"].consumed_valid_samples == consumed_valid_samples
            else:
                consumed_valid_samples = model_ckpts[tp_rank][pp_rank]["args"].consumed_valid_samples
            return model_ckpts[tp_rank][pp_rank]
        else:
            return model_ckpts[tp_rank][pp_rank]

    def get_weight_or_bias(model_ckpt, layer_num, vp_size, vp_rank, key, bias=False):
        if vp_size > 1:
            model_key = "model" + str(vp_rank)
        else:
            model_key = "model"
        if key == "word_embeddings":
            return model_ckpt[model_key]["language_model"]["embedding"][
                "word_embeddings"
            ]["weight"]
        elif key == "input_layernorm":
            if not bias:
                full_key = "layers." + str(layer_num) + "." + key + ".weight"
            else:
                full_key = "layers." + str(layer_num) + "." + key + ".bias"
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "query_key_value":
            if not bias:
                full_key = (
                    "layers." + str(layer_num) + ".self_attention." + key + ".weight"
                )
            else:
                full_key = (
                    "layers." + str(layer_num) + ".self_attention." + key + ".bias"
                )
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "dense":
            if not bias:
                full_key = (
                    "layers." + str(layer_num) + ".self_attention." + key + ".weight"
                )
            else:
                full_key = (
                    "layers." + str(layer_num) + ".self_attention." + key + ".bias"
                )
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "post_attention_layernorm":
            if not bias:
                full_key = "layers." + str(layer_num) + "." + key + ".weight"
            else:
                full_key = "layers." + str(layer_num) + "." + key + ".bias"
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "dense_h_to_4h":
            if not bias:
                full_key = "layers." + str(layer_num) + ".mlp." + key + ".weight"
            else:
                full_key = "layers." + str(layer_num) + ".mlp." + key + ".bias"
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "dense_4h_to_h":
            if not bias:
                full_key = "layers." + str(layer_num) + ".mlp." + key + ".weight"
            else:
                full_key = "layers." + str(layer_num) + ".mlp." + key + ".bias"
            return model_ckpt[model_key]["language_model"]["encoder"][full_key]
        elif key == "final_layernorm":
            if not bias:
                return model_ckpt[model_key]["language_model"]["encoder"][
                    "final_layernorm.weight"
                ]
            else:
                return model_ckpt[model_key]["language_model"]["encoder"][
                    "final_layernorm.bias"
                ]
        elif key == "output_layer":
            return model_ckpt[model_key]["language_model"]["output_layer"]["weight"]
        else:
            print("[WARNING]: unrecognized key: " + key)

    model_ckpt = get_model_ckpt(model_ckpts, model_ckpt_paths, 0, 0)
    md.consumed_train_samples = consumed_train_samples
    md.consumed_valid_samples = consumed_valid_samples

    queue.put(md)

    def queue_put(name, msg):
        print(f"sending {name}")
        msg["name"] = name
        queue.put(msg)

    model_ckpt = get_model_ckpt(model_ckpts, model_ckpt_paths, 0, 0)
    message = {
        "optimizer": model_ckpt['optimizer'],
        "opt_param_scheduler": model_ckpt['opt_param_scheduler'], 
    }
    queue_put("optimizer info", message)

    # Send embeddings
    assert md.position_embedding_type != "learned_absolute"
    message = {
        "word embeddings": torch.cat(
            [
                get_weight_or_bias(
                    get_model_ckpt(model_ckpts, model_ckpt_paths, tp_rank, 0),
                    None,
                    vp_size,
                    0,
                    "word_embeddings",
                )
                for tp_rank in range(tp_size)
            ],
            dim=0,
        ),
    }
    queue_put("embeddings", message)

    total_layer_num = 0
    for vp_rank in range(vp_size):
        for pp_rank in range(pp_size):
            model_ckpt = get_model_ckpt(model_ckpts, model_ckpt_paths, 0, pp_rank)
            num_layers = get_num_layers_from_ckpt(model_ckpt, vp_size, vp_rank)
            for layer_num in range(num_layers):
                message = {}

                # Get non-parallel tensors from tp_rank 0
                model_ckpt = get_model_ckpt(model_ckpts, model_ckpt_paths, 0, pp_rank)
                message["input layernorm weight"] = get_weight_or_bias(
                    model_ckpt, layer_num, vp_size, vp_rank, "input_layernorm"
                )
                if not md.apply_layernorm_rms:
                    message["input layernorm bias"] = get_weight_or_bias(
                        model_ckpt, layer_num, vp_size, vp_rank, "input_layernorm", True
                    )
                message["post layernorm weight"] = get_weight_or_bias(
                    model_ckpt, layer_num, vp_size, vp_rank, "post_attention_layernorm"
                )
                if not md.apply_layernorm_rms:
                    message["post layernorm bias"] = get_weight_or_bias(
                        model_ckpt,
                        layer_num,
                        vp_size,
                        vp_rank,
                        "post_attention_layernorm",
                        True,
                    )
                if md.linear_bias:
                    message["dense bias"] = get_weight_or_bias(
                        model_ckpt, layer_num, vp_size, vp_rank, "dense", True
                    )
                    message["mlp l1 bias"] = get_weight_or_bias(
                        model_ckpt, layer_num, vp_size, vp_rank, "dense_4h_to_h", True
                    )

                # Grab all parallel tensors for this layer
                qkv_weight = []
                qkv_bias = []
                dense_weight = []
                mlp_l0_weight = []
                mlp_l0_bias = []
                mlp_l1_weight = []

                for tp_rank in range(tp_size):
                    model_ckpt = get_model_ckpt(
                        model_ckpts, model_ckpt_paths, tp_rank, pp_rank
                    )
                    qkv_weight.append(
                        get_weight_or_bias(
                            model_ckpt, layer_num, vp_size, vp_rank, "query_key_value"
                        )
                    )
                    dense_weight.append(
                        get_weight_or_bias(
                            model_ckpt, layer_num, vp_size, vp_rank, "dense"
                        )
                    )
                    mlp_l0_weight.append(
                        get_weight_or_bias(
                            model_ckpt, layer_num, vp_size, vp_rank, "dense_h_to_4h"
                        )
                    )
                    mlp_l1_weight.append(
                        get_weight_or_bias(
                            model_ckpt, layer_num, vp_size, vp_rank, "dense_4h_to_h"
                        )
                    )
                    if md.linear_bias:
                        qkv_bias.append(
                            get_weight_or_bias(
                                model_ckpt,
                                layer_num,
                                vp_size,
                                vp_rank,
                                "query_key_value",
                                True,
                            )
                        )
                        mlp_l0_weight.append(
                            get_weight_or_bias(
                                model_ckpt,
                                layer_num,
                                vp_size,
                                vp_rank,
                                "dense_h_to_4h",
                                True,
                            )
                        )

                # Handle gated linear units
                if md.swiglu:
                    # concat all the first halves ('W's) and all the second halves ('V's)
                    for tp_rank in range(tp_size):
                        mlp_l0_weight[tp_rank] = torch.chunk(
                            mlp_l0_weight[tp_rank], 2, dim=0
                        )
                    message["mlp l0 weight W"] = torch.cat(
                        [w[0] for w in mlp_l0_weight], dim=0
                    )
                    message["mlp l0 weight V"] = torch.cat(
                        [w[1] for w in mlp_l0_weight], dim=0
                    )
                else:
                    message["mlp l0 weight"] = torch.cat(mlp_l0_weight, dim=0)

                # simple concat of the rest
                message["qkv weight"] = torch.cat(qkv_weight, dim=0)
                message["dense weight"] = torch.cat(dense_weight, dim=1)
                message["mlp l1 weight"] = torch.cat(mlp_l1_weight, dim=1)
                if md.linear_bias:
                    message["qkv bias"] = torch.cat(qkv_bias, dim=0)
                    if md.swiglu:
                        for tp_rank in range(tp_size):
                            mlp_l0_bias[tp_rank] = torch.chunk(
                                mlp_l0_bias[tp_rank], 2, dim=0
                            )
                        message["mlp l0 bias W"] = torch.cat(
                            [b[0] for b in mlp_l0_bias], dim=0
                        )
                        message["mlp l0 bias V"] = torch.cat(
                            [b[1] for b in mlp_l0_bias], dim=0
                        )
                    else:
                        message["mlp l0 bias"] = torch.cat(mlp_l0_bias, dim=0)

                queue_put(f"transformer layer {total_layer_num}", message)

                total_layer_num = total_layer_num + 1

    model_ckpt = get_model_ckpt(model_ckpts, model_ckpt_paths, 0, pp_rank)
    # Send final layernorm from tp_rank 0
    if not md.apply_layernorm_rms:
        message = {
            "weight": get_weight_or_bias(
                model_ckpt, None, vp_size, vp_rank, "final_layernorm"
            ),
            "bias": get_weight_or_bias(
                model_ckpt, None, vp_size, vp_rank, "final_layernorm", True
            ),
        }
    else:
        message = {
            "weight": get_weight_or_bias(
                model_ckpt, None, vp_size, vp_rank, "final_layernorm"
            ),
        }
    queue_put("final layernorm", message)

    if md.output_layer:
        # Send output_layer weight tp_rank 0
        message = {
            "weight": torch.cat(
                [
                    get_weight_or_bias(
                        get_model_ckpt(model_ckpts, model_ckpt_paths, tp_rank, pp_rank),
                        None,
                        vp_size,
                        vp_rank,
                        "output_layer",
                    )
                    for tp_rank in range(tp_size)
                ],
                dim=0,
            )
        }
        queue_put("output layer", message)

    queue.put("done")


def load_checkpoint(queue, args):
    try:
        _load_checkpoint(queue, args)
    except:
        queue.put("exit")
        raise
