# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Convert checkpoint from torch/facebook"""
import argparse
import torch
import mindspore as ms
import mindspore.ops as P


def split_param(torch_pth_path):
    param_dict = torch.load(torch_pth_path, map_location=torch.device("cpu"))

    vit_params_dict, qformer_params_dict, llm_params_dict, other_params_dict = {}, {}, {}, {}

    for key, value in param_dict.items():
        if "vision_encoder" in key or "visual_encoder" in key:
            vit_params_dict[key] = value
        elif "qformer" in key or "bert" in key:
            qformer_params_dict[key] = value
        elif "llama_model" in key:
            llm_params_dict[key] = value
        else:
            other_params_dict[key] = value
    print(f"all param num is {len(param_dict)}, vit param num is {len(vit_params_dict)}, "
          f"qformer param num is {len(qformer_params_dict)}, llm param num is {len(llm_params_dict)}, "
          f"other param num is {len(other_params_dict)}")
    return vit_params_dict, qformer_params_dict, llm_params_dict, other_params_dict


def vit_convert_weight(param_dict, ms_ckpt_path):
    """
    convert mae_vit_base_p16 weights from pytorch to mindspore
    pytorch and GPU required.
    """
    new_dict = []
    dense2_bias = None
    for k, v in param_dict.items():
        if "vision_encoder" in k:
            k.replace("vision_encoder", "visual_encoder")
        if k in ("cls_token", "mask_token"):
            k += "s"
        if "head" not in k:
            k = "vit." + k
        if "norm" in k:
            if "fc_norm" not in k:
                k = k.replace("norm", "layernorm")
            if "weight" in k:
                k = k.replace("weight", "gamma")
            elif "bias" in k:
                k = k.replace("bias", "beta")
        if "mlp" in k:
            k = k.replace("mlp", "output")
            if "fc1" in k:
                k = k.replace("fc1", "mapping")
                if "weight" in k:
                    v = v.transpose(-1, 0)
            elif "fc2" in k:
                k = k.replace("fc2", "projection")
                if "weight" in k:
                    v = v.transpose(-1, 0)
        if "attn" in k:
            k = k.replace("attn", "attention")
            if "proj" in k:
                k = k.replace("proj", "projection")
                if "weight" in k:
                    v = v.transpose(-1, 0)
        if "q_bias" in k:
            k = k.replace("q_bias", "dense1.bias")
            if dense2_bias is None:
                dense2_bias = P.zeros_like(ms.Tensor(v.numpy()))
        if "v_bias" in k:
            k = k.replace("v_bias", "dense3.bias")

        if "patch_embed.proj.weight" in k:
            a, b, c, d, e = v.shape
            v = P.Reshape()(v, (a, b * c, d, e))

        if "qkv" not in k:
            new_dict.append({"name": k, "data": ms.Tensor(v.numpy())})
        else:
            data = ms.Tensor(v.numpy())
            length = data.shape[0] // 3
            new_dict.append({"name": k.replace(".qkv", ".dense1"), "data": data[:length]})
            new_dict.append({"name": k.replace(".qkv", ".dense2"), "data": data[length:length*2]})
            new_dict.append({"name": k.replace(".qkv", ".dense3"), "data": data[length*2:]})

    new_dict.append({"name": "dense2.bias", "data": dense2_bias})
    ms.save_checkpoint(new_dict, ms_ckpt_path)

    print(f"vit converted weight param num is {len(new_dict)}")
    return new_dict


def llm_convert_ckpt(param_dict, ms_ckpt_path, dtype=ms.float16):
    """convert hf weight to ms."""

    def name_replace(name: str):
        """replace hf param name to ms."""
        name = name.replace('embed_tokens.weight', 'tok_embeddings.embedding_weight')
        name = name.replace('.self_attn.q_proj.', '.attention.wq.')
        name = name.replace('.self_attn.k_proj.', '.attention.wk.')
        name = name.replace('.self_attn.v_proj.', '.attention.wv.')
        name = name.replace('.self_attn.o_proj.', '.attention.wo.')
        name = name.replace('.mlp.gate_proj.', '.feed_forward.w1.')
        name = name.replace('.mlp.down_proj.', '.feed_forward.w2.')
        name = name.replace('.mlp.up_proj.', '.feed_forward.w3.')
        name = name.replace('.input_layernorm.', '.attention_norm.')
        name = name.replace('.post_attention_layernorm.', '.ffn_norm.')
        name = name.replace('.norm.', '.norm_out.')
        return name

    ckpt_list = []
    for name, value in param_dict.items():
        name = name_replace(name)
        if name == 'norm.weight':
            name = 'norm_out.weight'
        if name[:7] == 'layers.':
            name = name[7:]
        value = value.numpy()
        print(f'\rprocessing parameter: {name} {value.shape}     ', end='', flush=True)
        ckpt_list.append({'name': name, 'data': ms.Tensor(value, dtype=dtype)})

    ms.save_checkpoint(ckpt_list, ms_ckpt_path)
    print(f"\rConvert huggingface checkpoint finished, the mindspore checkpoint is saved in '{param_dict}'.",
          flush=True)
    print(f"llm converted weight param num is {len(ckpt_list)}")
    return ckpt_list


def qformer_convert_weight(param_dict, ms_ckpt_path):
    r"""Convert Weight
    Convert blip2_qformer weights from pytorch to mindspore,
    """
    name_pt2ms = {
        "cls_token": "cls_tokens",
        "attn.proj": "attention.projection",
        "attn.q_bias": "attention.dense1.bias",
        "attn.v_bias": "attention.dense3.bias",
        "norm1.weight": "layernorm1.gamma",
        "norm1.bias": "layernorm1.beta",
        "norm2.weight": "layernorm2.gamma",
        "norm2.bias": "layernorm2.beta",
        "fc_norm.weight": "fc_norm.gamma",
        "fc_norm.bias": "fc_norm.beta",
        "ln_vision.weight": "ln_vision.gamma",
        "ln_vision.bias": "ln_vision.beta",
        "mlp.fc2.weight": "output.mapping.weight",
        "mlp.fc1.bias": "output.mapping.bias",
        "mlp.fc1.weight": "output.projection.weight",
        "mlp.fc2.bias": "output.projection.bias",
        "LayerNorm.": "layernorm.",
        "layernorm.weight": "layernorm.gamma",
        "layernorm.bias": "layernorm.beta",
        "embeddings.weight": "embeddings.embedding_table",
        "self": "self_att",
    }
    ms_param_dict = []
    for pt_name, pt_tensor in param_dict.items():
        # initial name assign
        ms_name = pt_name
        # extract data
        numpy_value = param_dict[pt_name].to(dtype=torch.float32).detach().numpy()
        data = ms.Tensor.from_numpy(numpy_value)
        # split qkv weights
        if "qkv.weight" in pt_name:
            length = pt_tensor.shape[0] // 3
            ms_name1 = pt_name.replace("attn.qkv", "attention.dense1")
            ms_name2 = pt_name.replace("attn.qkv", "attention.dense2")
            ms_name3 = pt_name.replace("attn.qkv", "attention.dense3")
            ms_param_dict.append({"name": ms_name1, "data": data[:length]})
            ms_param_dict.append({"name": ms_name2, "data": data[length:length*2]})
            ms_param_dict.append({"name": ms_name3, "data": data[length*2:length*3]})
            print("rename {} to {}, {} and {}".format(pt_name, ms_name1, ms_name2, ms_name3))
        else:
            #  Rename
            for replace_from, replace_to in name_pt2ms.items():
                ms_name = ms_name.replace(replace_from, replace_to)
                ms_name = ms_name.replace("Qformer.", "qformer.")
            if ms_name.endswith("output.mapping.weight") or \
                    ms_name.endswith("output.projection.weight") or \
                    ms_name.endswith("attention.projection.weight"):
                data = data.T
            ms_param_dict.append({"name": ms_name, "data": data})
            # when loading each query-bias, append a zero value key-bias.
            if ms_name.endswith("attention.dense1.bias"):
                ms_param_dict.append({
                    "name": ms_name.replace("dense1", "dense2"),
                    "data": P.zeros_like(data)
                })
            if ms_name != pt_name:
                print(f"rename {pt_name} to {ms_name}")

    ms.save_checkpoint(ms_param_dict, ms_ckpt_path)
    print(f"qformer converted weight param num is {len(ms_param_dict)}")
    return ms_param_dict


def other_convert_ckpt(param_dict, ms_ckpt_path):
    name_map = {
        "query_tokens": "query_token",
        "extra_query_tokens": "extra_query_tokens",
        "vision_layernorm.weight": "ln_vision.gamma",
        "vision_layernorm.bias": "ln_vision.beta",
        "llama_proj.weight": "llm_proj.weight",
        "llama_proj.bias": "llm_proj.bias"
    }

    new_dict = []

    for key, value in param_dict.items():
        if key not in name_map:
            continue
        new_dict.append({"name": name_map[key], "data": ms.Tensor(value.numpy())})

    ms.save_checkpoint(new_dict, ms_ckpt_path)
    print(f"other converted weight param num is {len(new_dict)}")
    return new_dict


def main(opt):
    all_param_dict = []
    vit_params_dict, qformer_params_dict, llm_params_dict, other_params_dict = \
        split_param(opt.torch_pth_path)
    vit_params_dict, qformer_params_dict, llm_params_dict, other_params_dict = \
        vit_convert_weight(vit_params_dict, opt.vit_ms_ckpt_path), \
        qformer_convert_weight(qformer_params_dict, opt.qformer_ms_ckpt_path), \
        llm_convert_ckpt(llm_params_dict, opt.llm_ms_ckpt_path), \
        other_convert_ckpt(other_params_dict, opt.other_ms_ckpt_path)
    all_param_dict.extend(vit_params_dict)
    all_param_dict.extend(qformer_params_dict)
    all_param_dict.extend(llm_params_dict)
    all_param_dict.extend(other_params_dict)
    ms.save_checkpoint(all_param_dict, opt.ms_ckpt_path)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="mae vit weight convert script")
    parser.add_argument("--torch_pth_path",
                        type=str,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/videochat2_7b_stage2.pth",
                        # required=True,
                        help="The torch checkpoint path.")
    parser.add_argument("--vit_ms_ckpt_path",
                        type=str,
                        # required=True,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/vit.ckpt",
                        help="The output mindspore checkpoint path.")
    parser.add_argument("--qformer_ms_ckpt_path",
                        type=str,
                        # required=True,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/qformer.ckpt",
                        help="The output mindspore checkpoint path.")
    parser.add_argument("--llm_ms_ckpt_path",
                        type=str,
                        # required=True,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/llm.ckpt",
                        help="The output mindspore checkpoint path.")
    parser.add_argument("--other_ms_ckpt_path",
                        type=str,
                        # required=True,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/other.ckpt",
                        help="The output mindspore checkpoint path.")
    parser.add_argument("--ms_ckpt_path",
                        type=str,
                        # required=True,
                        default="/home/zhangyouwen/suite/mobile_commucation/video_chat2_mf/weights_dir/all.ckpt",
                        help="The output mindspore checkpoint path.")
    opt = parser.parse_args()

    main(opt)
