"""
reference: https://hf-mirror.com/docs/transformers/main/en/deepspeed
"""
import argparse

from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
from transformers.integrations import HfDeepSpeedConfig
import deepspeed
import os
import time
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch_npu
from torch_npu.contrib import transfer_to_npu

os.environ["TOKENIZERS_PARALLELISM"] = "false"  # To avoid warnings about parallelism in tokenizers


def get_args():
    parser = argparse.ArgumentParser(
        "Mixtral 8X7B Inference",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        "--model_name", type=str, default="mistralai/Mixtral-8x7B-v0.1",
        help="The path of the weight"
    )
    parser.add_argument(
        "--test_speed", action="store_true",
        help="Whether to test running peed"
    )
    parser.add_argument(
        "--seed", type=int, default=12306,
        help="Set the random seed"
    )
    parser.add_argument(
        "--local_rank", type=int,
        help="local rank for distributed training"
    )
    return parser.parse_args()


def setup_seeds(seed=12306):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.npu.manual_seed_all(seed)
    torch.use_deterministic_algorithms(True)

    cudnn.benchmark = False
    cudnn.deterministic = True


def performance_test(model, tokenizer, local_rank):
    context = "Hello my name is"
    batch_sizes = [1]
    input_len = [10, 100, 500, 1000, 2000]
    output_len = [10, 100, 500, 1000, 2000]
    test_cases = [(bs, inseq, outsq) for bs in batch_sizes for inseq in input_len for outsq in output_len]

    # warm up
    context_ids = tokenizer(context, return_tensors="pt", max_length=input_len[0],
                            padding="max_length", truncation=True)
    with torch.no_grad():
        output = model.generate(context_ids["input_ids"].to(device=local_rank), max_new_tokens=output_len[0],
                                do_sample=False)
        output_str = tokenizer.decode(output[0], skip_special_tokens=True)

    # infer performance
    for batch_size, input_seq_len, output_seq_len in test_cases:
        print(f"batch_size is {batch_size}, input_seq_len is {input_seq_len}, output_seq_len is {output_seq_len}")
        context_ids = tokenizer(context, return_tensors="pt", max_length=input_seq_len, padding="max_length",
                                truncation=True)

        torch.npu.empty_cache()
        with torch.no_grad():
            start_time_model = time.time()
            output = model.generate(context_ids["input_ids"].to(device=local_rank), max_new_tokens=output_seq_len,
                                    do_sample=False)
            end_time_model = time.time()
            output_str = tokenizer.decode(output[0], skip_special_tokens=True)
            end_time_e2e = time.time()
            print("model_time:", end_time_model - start_time_model)
            print("end2end_time:", end_time_e2e - start_time_model)
            print("output token delay:", output_seq_len / (end_time_model - start_time_model), " token/s")


def main(args):
    # distributed setup
    local_rank = int(os.getenv("LOCAL_RANK", "0"))
    world_size = int(os.getenv("WORLD_SIZE", "1"))
    torch.npu.set_device(local_rank)
    deepspeed.init_distributed()

    setup_seeds(args.seed)
    model_name = args.model_name

    tokenizer = AutoTokenizer.from_pretrained(model_name)
    config = AutoConfig.from_pretrained(model_name)
    model_hidden_size = config.hidden_size

    ''' 相关参数配置 '''
    # note: 部分参数设置只为启动deepspeed，在推理过程中并无作用
    train_batch_size = 1 * world_size
    dtype = torch.float16
    max_new_tokens = 20
    ds_config = {
        "fp16": {
            "enabled": dtype == torch.float16,
        },
        "bf16": {
            "enabled": dtype == torch.bfloat16,
        },
        "zero_optimization": {
            "stage": 3,
            # "offload_param": {
            #     "device": "cpu",
            #     "pin_memory": True
            # },
            "overlap_comm": True,
            "contiguous_gradients": True,
            "reduce_bucket_size": model_hidden_size * model_hidden_size,
            "stage3_prefetch_bucket_size": 0.9 * model_hidden_size * model_hidden_size,
            "stage3_param_persistence_threshold": 10 * model_hidden_size
        },
        "steps_per_print": 2000,
        "train_batch_size": train_batch_size,
        "train_micro_batch_size_per_gpu": 1,
        "wall_clock_breakdown": False
    }
    # 必须优先确保HfDeepSpeedConfig对象的存在(确定zero-3边切分边加载过程有效)
    dschf = HfDeepSpeedConfig(ds_config)

    # 加载模型
    model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=dtype)
    # 初始化deepspeed引擎
    ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0]
    ds_engine.module.eval()  # inference

    if args.test_speed:
        performance_test(ds_engine.module, tokenizer, local_rank)
    else:
        rank = torch.distributed.get_rank()
        text_in = "Hello my name is"

        # 此处以micro_batch = 1为例(global_batch_size=world_size * 1)
        inputs = tokenizer([text_in], return_tensors="pt").to(device=local_rank)
        with torch.no_grad():
            outputs = ds_engine.module.generate(**inputs, max_new_tokens=max_new_tokens)
        print(f"rank{rank}:\n in={text_in}\n out={tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]}")


if __name__ == '__main__':
    args = get_args()
    main(args)
