# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

""" run vllm """
import os
import argparse
import shutil
import json

import numpy as np
import yaml
import mindspore as ms
from mindformers.tools.utils import str2bool


def find_yaml_path(config_dir):
    """
    get yaml path
    """
    if os.environ.get('MINDFORMERS_MODEL_CONFIG') is not None:
        return os.environ.get('MINDFORMERS_MODEL_CONFIG')
    file_list = os.listdir(config_dir)
    yaml_path = ''
    for item in file_list:
        if item.endswith('yaml'):
            yaml_path = os.path.join(config_dir, item)
            break
    return yaml_path


def clear_folder(folder_path):
    """
    delete files
    """
    print(f'delete all files in `{folder_path}`')
    for filename in os.listdir(folder_path):
        file_path = os.path.join(folder_path, filename)
        if os.path.isfile(file_path) or os.path.islink(file_path):
            os.unlink(file_path)
        elif os.path.isdir(file_path):
            shutil.rmtree(file_path)


def get_output_dir(output_dir):
    """
    get output directory
    """
    output_dir = os.path.join(output_dir, 'configs/')
    os.makedirs(output_dir, exist_ok=True)
    clear_folder(output_dir)
    return output_dir


def get_init_config(config_dir, model_parallel_size, seed, max_seq_len, output_dir):
    yaml_path = find_yaml_path(config_dir)
    with open(yaml_path, 'r') as f:
        yaml_dict = yaml.safe_load(f)
    with open(os.path.join(output_dir, 'config.json'), 'r') as f:
        json_dict = json.load(f)
    with open(os.path.join(output_dir, 'generation_config.json'), 'r') as f:
        generation_config_dict = json.load(f)
    new_yaml_dict = {}
    for key in yaml_dict:
        if key in ('output_dir', 'model', 'processor', 'moe_config'):
            new_yaml_dict[key] = yaml_dict[key]
    new_yaml_dict['load_checkpoint'] = ''
    new_yaml_dict['model']['model_config']['checkpoint_name_or_path'] = ''
    new_yaml_dict['run_mode'] = 'predict'
    new_yaml_dict['parallel_config'] = {
        'data_parallel': 1,
        'model_parallel': model_parallel_size,
        'pipeline_stage': 1
    }
    new_yaml_dict['seed'] = seed
    new_yaml_dict['model']['model_config']['seq_length'] = max_seq_len
    new_yaml_dict['model']['model_config']['qkv_concat'] = False
    new_yaml_dict['model']['model_config']['use_past'] = True
    new_yaml_dict['checkpoint_format'] = 'safetensors'
    new_yaml_dict['auto_trans_ckpt'] = True
    new_yaml_dict['model']['model_config']['do_sample'] = generation_config_dict.get('do_sample', False)
    if 'eos_token_id' in generation_config_dict:
        new_yaml_dict['model']['model_config']['eos_token_id'] = generation_config_dict['eos_token_id']
    if 'bos_token_id' in generation_config_dict:
        new_yaml_dict['model']['model_config']['bos_token_id'] = generation_config_dict['bos_token_id']
    if 'pad_token_id' in generation_config_dict:
        new_yaml_dict['model']['model_config']['pad_token_id'] = generation_config_dict['pad_token_id']
    if 'repetition_penalty' in generation_config_dict:
        new_yaml_dict['model']['model_config']['repetition_penalty'] = generation_config_dict['repetition_penalty']
    if 'temperature' in generation_config_dict:
        new_yaml_dict['model']['model_config']['temperature'] = generation_config_dict['temperature']
    if 'top_k' in generation_config_dict:
        new_yaml_dict['model']['model_config']['top_k'] = generation_config_dict['top_k']
    if 'top_p' in generation_config_dict:
        new_yaml_dict['model']['model_config']['top_p'] = generation_config_dict['top_p']

    json_dict['torch_dtype'] = new_yaml_dict['model']['model_config'].get('compute_dtype', 'bfloat16')
    json_dict['architectures'] = ['Qwen2ForCausalLM']
    json_dict['model_type'] = 'qwen2'

    return new_yaml_dict, json_dict


def copy_files(config_dir, output_dir):
    """
    copy json and txt files
    """
    file_list = os.listdir(config_dir)
    for item in file_list:
        if item.endswith('json') or item.endswith('txt'):
            shutil.copy(os.path.join(config_dir, item), os.path.join(output_dir, item))


def name_replace_qwen(name: str):
    """replace ms param name to hf."""
    name = name.replace('grpo_model_train.policy_model.model.', '')
    name = name.replace('grpo_model.policy_model.', '')
    name = name.replace('tok_embeddings.embedding_weight', 'embed_tokens.weight')
    name = name.replace('.attention.wq.', '.self_attn.q_proj.')
    name = name.replace('.attention.wk.', '.self_attn.k_proj.')
    name = name.replace('.attention.wv.', '.self_attn.v_proj.')
    name = name.replace('.attention.wo.', '.self_attn.o_proj.')
    name = name.replace('.feed_forward.w1.', '.mlp.gate_proj.')
    name = name.replace('.feed_forward.w2.', '.mlp.down_proj.')
    name = name.replace('.feed_forward.w3.', '.mlp.up_proj.')
    name = name.replace('.attention_norm.', '.input_layernorm.')
    name = name.replace('.ffn_norm.', '.post_attention_layernorm.')
    name = name.replace('.norm_out.', '.norm.')
    return name


def load_safetensors(safetensors_dir):
    """
    load safetensors in directory
    """
    file_list = os.listdir(safetensors_dir)
    param_dict = {}
    for file in file_list:
        if file.endswith('.safetensors'):
            local_param_dict = ms.load_checkpoint(os.path.join(safetensors_dir, file), format='safetensors')
            for key in local_param_dict:
                param_dict[key] = local_param_dict[key]
    return param_dict


def get_strategy(strategy_path, rank_id=None):
    """Merge strategy if strategy path is dir

    Args:
        strategy_path (str): The path of strategy.
        rank_id (int): The rank id of device.

    Returns:
        None or strategy path
    """
    if not strategy_path or strategy_path == "None":
        return None

    if not os.path.exists(strategy_path):
        raise ValueError(f'{strategy_path} not found!')

    if os.path.isfile(strategy_path):
        return strategy_path

    if os.path.isdir(strategy_path):
        if rank_id:
            merge_path = os.path.join(strategy_path, f'merged_ckpt_strategy_{rank_id}.ckpt')
        else:
            merge_path = os.path.join(strategy_path, f'merged_ckpt_strategy.ckpt')

        if os.path.exists(merge_path):
            os.remove(merge_path)

        ms.merge_pipeline_strategys(strategy_path, merge_path)
        return merge_path

    return None


def merge_safetensors(src_dir, src_strategy, dst_dir):
    from mindspore import unified_safetensors
    tmp_dir = os.path.join(dst_dir, 'tmp')
    os.makedirs(tmp_dir)
    src_strategy_file = get_strategy(src_strategy)
    unified_safetensors(src_dir, src_strategy_file, tmp_dir)
    param_dict = load_safetensors(tmp_dir)
    new_param_dict = {}
    total_size = 0
    for key in param_dict:
        new_param_dict[name_replace_qwen(key)] = param_dict[key]
        if param_dict[key].dtype in (ms.float16, ms.bfloat16):
            total_size += 2 * np.prod(param_dict[key].shape)
        elif param_dict[key].dtype in (ms.float32,):
            total_size += 4 * np.prod(param_dict[key].shape)
        if param_dict[key].dtype in (ms.int8, ms.uint8):
            total_size += 1 * np.prod(param_dict[key].shape)
    safetensors_file_name = 'checkpoint'
    ms.save_checkpoint(new_param_dict, os.path.join(dst_dir, safetensors_file_name), format="safetensors")
    index_file = os.path.join(dst_dir, 'model.safetensors.index.json')
    index_dict = {'metadata': {"total_size": str(total_size)}, 'weight_map': {}}
    for key in new_param_dict:
        index_dict['weight_map'][key] = safetensors_file_name + '.safetensors'
    print(index_dict)
    with open(index_file, 'w') as f:
        json.dump(index_dict, f, indent=2)


def soft_link_safetensors(checkpoint_dir, output_dir):
    """
    soft link safetensors from src directory to dst directory
    """
    from mindformers.tools.ckpt_transform.utils import make_soft_link
    file_list = os.listdir(checkpoint_dir)
    file_list = [_ for _ in file_list if _.endswith('safetensors')]
    for file in file_list:
        safetensor_path = os.path.join(checkpoint_dir, file)
        output_path = os.path.join(output_dir, file)
        make_soft_link(output_path, safetensor_path)


def generate_index_file(safetensors_dir):
    """
    generate model.safetensors.index.json
    """
    import json
    import numpy as np
    import mindspore as ms
    file_list = os.listdir(safetensors_dir)
    param_dict = {}
    file_name_dict = {}
    for file in file_list:
        if file.endswith('.safetensors'):
            file_name = os.path.join(safetensors_dir, file)
            local_param_dict = ms.load_checkpoint(file_name, format='safetensors')
            for key in local_param_dict:
                param_dict[key] = local_param_dict[key]
                file_name_dict[key] = file
    total_size = 0
    for key in param_dict:
        if param_dict[key].dtype in (ms.float16, ms.bfloat16):
            total_size += 2 * np.prod(param_dict[key].shape)
        elif param_dict[key].dtype in (ms.float32,):
            total_size += 4 * np.prod(param_dict[key].shape)
        elif param_dict[key].dtype in (ms.int8, ms.uint8):
            total_size += 1 * np.prod(param_dict[key].shape)
    index_file = os.path.join(safetensors_dir, 'model.safetensors.index.json')
    index_dict = {'metadata': {'total_size': str(total_size)}, 'weight_map': {}}
    for key in param_dict:
        index_dict['weight_map'][key] = file_name_dict[key]
    with open(index_file, 'w') as f:
        json.dump(index_dict, f, indent=2)


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config_dir", type=str, required=True)
    parser.add_argument("--load_checkpoint", type=str, default=None)
    parser.add_argument("--model_parallel_size", type=int, default=1)
    parser.add_argument("--ckpt_strategy", type=str, default=None)
    parser.add_argument("--max_seq_len", type=int, default=32768)
    parser.add_argument("--output_dir", type=str, default='/tmp/')
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--trans_ms_to_hf", type=str2bool, default=False)
    parser.add_argument("--block_size", type=int, default=32)
    parser.add_argument("--max_num_seqs", type=int, default=None)
    parser.add_argument("--max_num_batched_tokens", type=int, default=None)
    args = parser.parse_args()
    return args


def main():
    args = get_args()
    args.config_dir = os.path.abspath(args.config_dir)
    if args.load_checkpoint is None:
        args.load_checkpoint = args.config_dir
    args.load_checkpoint = os.path.abspath(args.load_checkpoint)
    output_dir = get_output_dir(args.output_dir)
    copy_files(args.config_dir, output_dir)
    copy_files(args.load_checkpoint, output_dir)
    yaml_dict, json_dict = get_init_config(
        args.config_dir,
        args.model_parallel_size,
        args.seed,
        args.max_seq_len,
        output_dir,
    )
    with open(os.path.join(output_dir, 'config.json'), 'w') as f:
        json.dump(json_dict, f, indent=4)
    with open(os.path.join(output_dir, 'predict.yaml'), 'w') as f:
        yaml.dump(yaml_dict, f)
    if args.trans_ms_to_hf:
        merge_safetensors(args.load_checkpoint, args.ckpt_strategy, output_dir)
    else:
        soft_link_safetensors(args.load_checkpoint, output_dir)
    index_file = os.path.join(output_dir, 'model.safetensors.index.json')
    if not os.path.exists(index_file):
        generate_index_file(output_dir)
    cmd = f'export MINDFORMERS_MODEL_CONFIG={os.path.join(output_dir, "predict.yaml")} && '
    cmd += f'export PYTHONPATH=/home/ma-user/cmb/msadapter/mindtorch/:$PYTHONPATH && '
    cmd += f'python3 -m vllm_mindspore.entrypoints vllm.entrypoints.openai.api_server '
    cmd += f'--model {output_dir} '
    cmd += f'--tensor_parallel_size={args.model_parallel_size} '
    cmd += '' if args.max_num_seqs is None else f'--max_num_seqs={args.max_num_seqs} '
    cmd += f'--block_size={args.block_size} '
    cmd += f'--max_model_len={args.max_seq_len} '
    cmd += '' if args.max_num_batched_tokens is None else f'--max_num_batched_tokens={args.max_num_batched_tokens} '
    cmd += f'--seed={args.seed} '
    print('run model', output_dir)
    print('run cmd', cmd)
    os.system(cmd)


if __name__ == '__main__':
    main()
