# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Convert llama weight.
Support mindspore format and Meta format.
"""

import time
import json
import argparse
import os
import shutil
import numpy as np
import safetensors.torch
import torch
import mindspore as ms


ms.set_device("CPU")


DTYPE_SIZE = {
    ms.int8: 1,
    ms.uint8: 1,
    ms.float16: 2,
    ms.bfloat16: 2,
    ms.float32: 4,
}


DTYPE_MS = {
    "int8":ms.int8,
    "uint8": ms.uint8,
    "float16": ms.float16,
    "bfloat16": ms.bfloat16,
    "float32": ms.float32,
    'fp32': ms.float32,
    'bf16': ms.bfloat16,
    'fp16': ms.float16,
}


def read_json(path):
    with open(path, "r") as f:
        return json.load(f)


def name_replace(name: str):
    """replace ms param name to hf."""
    name = name.replace('grpo_model_train.policy_model.model.', '')
    name = name.replace('grpo_model.policy_model.', '')
    name = name.replace('tok_embeddings.embedding_weight', 'embed_tokens.weight')
    name = name.replace('.attention.wq.', '.self_attn.q_proj.')
    name = name.replace('.attention.wk.', '.self_attn.k_proj.')
    name = name.replace('.attention.wv.', '.self_attn.v_proj.')
    name = name.replace('.attention.wo.', '.self_attn.o_proj.')
    name = name.replace('.attention.q_norm.', '.self_attn.q_norm.')
    name = name.replace('.attention.k_norm.', '.self_attn.k_norm.')
    name = name.replace('.feed_forward.w1.', '.mlp.gate_proj.')
    name = name.replace('.feed_forward.w2.', '.mlp.down_proj.')
    name = name.replace('.feed_forward.w3.', '.mlp.up_proj.')
    name = name.replace('.attention_norm.', '.input_layernorm.')
    name = name.replace('.ffn_norm.', '.post_attention_layernorm.')
    name = name.replace('.norm_out.', '.norm.')
    if 'lora' in name:
        name = name.replace('lora_a', 'lora_A.weight')
        name = name.replace('lora_b', 'lora_B.weight')
        name = name.replace('mindpet_delta_', '')
        if not name.startswith('base_model.model.'):
            name = 'base_model.model.' + name
    return name


def get_strategy(strategy_path, rank_id=None):
    """Merge strategy if strategy path is dir

    Args:
        strategy_path (str): The path of strategy.
        rank_id (int): The rank id of device.

    Returns:
        None or strategy path
    """
    if not strategy_path or strategy_path == "None":
        return None

    if not os.path.exists(strategy_path):
        raise ValueError(f'{strategy_path} not found!')

    if os.path.isfile(strategy_path):
        return strategy_path

    if os.path.isdir(strategy_path):
        if rank_id:
            merge_path = os.path.join(strategy_path, f'merged_ckpt_strategy_{rank_id}.ckpt')
        else:
            merge_path = os.path.join(strategy_path, f'merged_ckpt_strategy.ckpt')

        if os.path.exists(merge_path):
            os.remove(merge_path)

        ms.merge_pipeline_strategys(strategy_path, merge_path)
        return merge_path

    return None


def load_distributed_checkpoint(src_dir, src_strategy, output_path, ckpt_format='ckpt'):
    if ckpt_format == 'ckpt':
        raise ValueError('`ckpt` need to be merged by `mindformers/tools/ckpt_transform/transform_checkpoint.py`')
    if os.path.isdir(output_path):
        dst_dir = output_path
    else:
        dst_dir = os.path.dirname(output_path)
    tmp_dir = os.path.join(dst_dir, 'tmp')
    os.makedirs(tmp_dir, exist_ok=True)
    src_strategy_file = get_strategy(src_strategy)
    try:
        ms.unified_safetensors(src_dir, src_strategy_file, tmp_dir, max_process_num=64, merge_with_redundancy=False)
    except ValueError:
        ms.unified_safetensors(src_dir, src_strategy_file, tmp_dir, max_process_num=64, merge_with_redundancy=True)
    file_list = os.listdir(tmp_dir)
    param_dict = {}
    for file in file_list:
        if file.endswith('.safetensors'):
            local_param_dict = ms.load_checkpoint(os.path.join(tmp_dir, file), format='safetensors')
            for key in local_param_dict:
                param_dict[key] = local_param_dict[key]
    new_param_dict = {}
    for key in param_dict:
        new_param_dict[name_replace(key)] = param_dict[key]
    shutil.rmtree(tmp_dir)
    return new_param_dict


def load_lora_ckpt(ckpt_strategy, ori_ckpt_path, lora_ckpt_dir, dst_dir, lora_scaling):
    from mindspore import Parameter, Tensor, ops
    src_ckpt_strategy = get_strategy(ckpt_strategy)
    tmp_dir = os.path.join(dst_dir, 'tmp')

    if ckpt_strategy is None:
        src_lora_ckpt_path = lora_ckpt_dir
    else:
        try:
            ms.unified_safetensors(lora_ckpt_dir, src_ckpt_strategy, tmp_dir, max_process_num=64, merge_with_redundancy=False)
        except ValueError:
            ms.unified_safetensors(lora_ckpt_dir, src_ckpt_strategy, tmp_dir, max_process_num=64, merge_with_redundancy=True)
        src_lora_ckpt_path = os.path.join(tmp_dir, "part0.safetensors")

    lora_dict = ms.load_checkpoint(src_lora_ckpt_path, format='safetensors')
    ori_dict = ms.load_checkpoint(ori_ckpt_path, format='safetensors')

    lora_keys = [k for k in lora_dict if 'lora_a' in k]
    for k in lora_keys:
        if k.split('.')[0] in ['adam_m', 'adam_v']:
            continue
        original_key = k.replace('_lora_a', '').replace('mindpet_delta', 'weight')
        lora_a_key = k
        lora_b_key = k.replace('lora_a', 'lora_b')
        original_value = ori_dict[original_key]
        ori_dict[original_key] = Parameter(
            Tensor(ops.add(original_value, ops.mm(lora_dict[lora_b_key], lora_dict[lora_a_key]) * lora_scaling),
                   original_value.dtype),
            name=original_key)
    if os.path.exists(tmp_dir):
        shutil.rmtree(tmp_dir)
    return ori_dict


def save_checkpoint(param_dict, output_path, dtype=None, ckpt_format='ckpt'):
    if os.path.isdir(output_path):
        dst_dir = output_path
        file_name = 'transform'
    else:
        dst_dir = os.path.dirname(output_path)
        file_name = os.path.basename(output_path)
    if dtype is not None:
        param_dict = {_: ms.Parameter(param_dict[_].astype(DTYPE_MS[dtype])) for _ in param_dict}
    if ckpt_format == 'ckpt':
        ms.save_checkpoint(param_dict, os.path.join(dst_dir, file_name), format=ckpt_format)
        return
    state_dict = {}
    for key in param_dict:
        if dtype in ('bf16', 'bfloat16'):
            param = param_dict[key].astype(ms.float32).numpy()
            state_dict[key] = torch.tensor(param).to(torch.bfloat16)
        else:
            param = param_dict[key].numpy()
            state_dict[key] = torch.tensor(param)
    safetensors.torch.save_file(
        state_dict, os.path.join(dst_dir, file_name + '.safetensors'), metadata={"format": "pt"}
    )
    total_size = 0
    for key in param_dict:
        param = param_dict[key]
        dtype_size = DTYPE_SIZE[param.dtype]
        total_size += dtype_size * np.prod(param_dict[key].shape)
    index_file = os.path.join(dst_dir, 'model.safetensors.index.json')
    index_dict = {'metadata': {"total_size": str(total_size)}, 'weight_map': {}}
    for key in param_dict:
        index_dict['weight_map'][key] = file_name + '.safetensors'
    with open(index_file, 'w') as f:
        json.dump(index_dict, f, indent=2)


def convert_ms_to_hf(input_path, output_path, ckpt_strategy=None, dtype=None, ckpt_format='ckpt',
                     lora_safetensors_path=None, lora_scaling=None):
    """convert ms weight to hf."""
    print(f"Trying to convert mindspore checkpoint in '{input_path}'.", flush=True)
    if lora_safetensors_path is not None:
        model_ms = load_lora_ckpt(ckpt_strategy, input_path, lora_safetensors_path, output_path, lora_scaling)
    else:
        if ckpt_strategy is None:
            model_ms = ms.load_checkpoint(input_path, format=ckpt_format)
        else:
            model_ms = load_distributed_checkpoint(input_path, ckpt_strategy, output_path, ckpt_format=ckpt_format)

    state_dict = {}
    for name, value in model_ms.items():
        ms_name = name
        name = name_replace(name)
        print(f'{ms_name} -> {name} shape:{value.shape}', flush=True)
        state_dict[name] = value

    save_checkpoint(state_dict, output_path, dtype=dtype, ckpt_format='safetensors')
    print(f"\rConvert mindspore checkpoint finished, the huggingface checkpoint is saved in '{output_path}'.",
          flush=True)
    return True


if __name__ == "__main__":
    start = time.time()
    parser = argparse.ArgumentParser()
    parser.add_argument('--ms_safetensors_path', required=True, default='mindspore.safetensors')
    parser.add_argument('--hf_safetensors_path', default='transform/')
    parser.add_argument('--ckpt_format', default='safetensors')
    parser.add_argument('--ckpt_strategy', default=None)
    parser.add_argument('--dtype', default='bf16')
    parser.add_argument('--lora_safetensors_path', default=None)
    parser.add_argument('--lora_scaling',
                        default=1,
                        type=float,
                        help='scale of lora when merge model weight, default is lora_alpha/lora_rank')
    args = parser.parse_args()
    convert_ms_to_hf(
        input_path=args.ms_safetensors_path,
        output_path=args.hf_safetensors_path,
        ckpt_strategy=args.ckpt_strategy,
        dtype=args.dtype,
        ckpt_format=args.ckpt_format,
        lora_safetensors_path=args.lora_safetensors_path,
        lora_scaling=args.lora_scaling,
    )
    end = time.time()
    print('time:', end - start, flush=True)
