"""
reference: https://hf-mirror.com/docs/transformers/main/en/deepspeed
"""
import argparse

from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
from transformers.integrations import HfDeepSpeedConfig
import deepspeed
import os
import time
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch_npu
from torch_npu.contrib import transfer_to_npu

os.environ["TOKENIZERS_PARALLELISM"] = "false"  # To avoid warnings about parallelism in tokenizers


def get_args():
    parser = argparse.ArgumentParser(
        "Mixtral 8X7B Instruct Inference",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        "--model_name", type=str, default="mistralai/Mixtral-8x7B-Instruct-v0.1",
        help="The path of the weight"
    )
    parser.add_argument(
        "--seed", type=int, default=12306,
        help="Set the random seed"
    )
    parser.add_argument(
        "--local_rank", type=int,
        help="local rank for distributed training"
    )
    return parser.parse_args()


def setup_seeds(seed=12306):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.npu.manual_seed_all(seed)
    torch.use_deterministic_algorithms(True)

    cudnn.benchmark = False
    cudnn.deterministic = True


def main(args):
    # distributed setup
    local_rank = int(os.getenv("LOCAL_RANK", "0"))
    world_size = int(os.getenv("WORLD_SIZE", "1"))
    torch.npu.set_device(local_rank)
    deepspeed.init_distributed()

    setup_seeds(args.seed)
    model_name = args.model_name

    tokenizer = AutoTokenizer.from_pretrained(model_name)
    config = AutoConfig.from_pretrained(model_name)
    model_hidden_size = config.hidden_size

    ''' 相关参数配置 '''
    # note: 部分参数设置只为启动deepspeed，在推理过程中并无作用
    train_batch_size = 1 * world_size
    dtype = torch.float16
    max_new_tokens = 20
    ds_config = {
        "fp16": {
            "enabled": dtype == torch.float16,
        },
        "bf16": {
            "enabled": dtype == torch.bfloat16,
        },
        "zero_optimization": {
            "stage": 3,
            # "offload_param": {
            #     "device": "cpu",
            #     "pin_memory": True
            # },
            "overlap_comm": True,
            "contiguous_gradients": True,
            "reduce_bucket_size": model_hidden_size * model_hidden_size,
            "stage3_prefetch_bucket_size": 0.9 * model_hidden_size * model_hidden_size,
            "stage3_param_persistence_threshold": 10 * model_hidden_size
        },
        "steps_per_print": 2000,
        "train_batch_size": train_batch_size,
        "train_micro_batch_size_per_gpu": 1,
        "wall_clock_breakdown": False
    }
    # 必须优先确保HfDeepSpeedConfig对象的存在(确定zero-3边切分边加载过程有效)
    dschf = HfDeepSpeedConfig(ds_config)

    # 加载模型
    model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=dtype)
    # 初始化deepspeed引擎
    ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0]
    ds_engine.module.eval()  # inference

    rank = torch.distributed.get_rank()
    messages = [
        {"role": "user", "content": "What is your favourite condiment?"},
        {"role": "assistant",
         "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. "
                    "It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
        {"role": "user", "content": "Do you have mayonnaise recipes?"}
    ]

    # 此处以micro_batch = 1为例(global_batch_size=world_size * 1)
    inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(device=local_rank)
    with torch.no_grad():
        outputs = ds_engine.module.generate(**inputs, max_new_tokens=max_new_tokens)
    print(f"rank{rank}:\n in={messages[0]}\n out={tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]}")


if __name__ == '__main__':
    args = get_args()
    main(args)
