# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team

import argparse
import re
import logging
import transformers  # noqa: F401
import os
import json
from transformers import pipeline, set_seed
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM


def parse_args():
    parser = argparse.ArgumentParser()
    # 训练后的actor模型
    parser.add_argument("--path",
                        type=str,
                        help="Directory containing trained actor model")
    # 每次响应生成最大的token数
    parser.add_argument(
        "--max_new_tokens",
        type=int,
        default=128,
        help="Maximum new tokens to generate per response",
    )
    args = parser.parse_args()
    return args


def get_generator(path):
    if os.path.exists(path):
        # Locally tokenizer loading has some issue, so we need to force download
        model_json = os.path.join(path, "config.json")
        if os.path.exists(model_json):
            # 从本低路径加载模型配置和分词器
            model_json_file = json.load(open(model_json))
            model_name = model_json_file["_name_or_path"]
            tokenizer = AutoTokenizer.from_pretrained(model_name,
                                                      fast_tokenizer=True)
    else:
        # 如果本地不存在，从HuggingFace模型库下载模型配置和分词器 
        tokenizer = AutoTokenizer.from_pretrained(path, fast_tokenizer=True)
    # 分词器的填充token被设置为其结果token
    tokenizer.pad_token = tokenizer.eos_token
    # 从提供的路径加载模型配置和模型本身
    model_config = AutoConfig.from_pretrained(path)
    model_class = AutoModelForCausalLM.from_config(model_config)
    model = model_class.from_pretrained(path,
                                        from_tf=bool(".ckpt" in path),
                                        config=model_config).half() # half单精度
    # 将模型的结束token ID 和 填充token ID设置为分词器的结束token ID
    model.config.end_token_id = tokenizer.eos_token_id
    model.config.pad_token_id = model.config.eos_token_id
    # 将模型的token embedding 调整为分词器的大小
    model.resize_token_embeddings(len(tokenizer))
    # 创建一个pipeline，该pipeline是用于文本生成
    generator = pipeline("text-generation",
                         model=model,
                         tokenizer=tokenizer,
                         device="cuda:0")
    return generator


def get_user_input(user_input):
    tmp = input("Enter input (type 'quit' to exit, 'clear' to clean memory): ")
    new_inputs = f"Human: {tmp}\n Assistant: "
    user_input += f" {new_inputs}"
    return user_input, tmp == "quit", tmp == "clear"

# 返回模型生成的文本响应
def get_model_response(generator, user_input, max_new_tokens):
    response = generator(user_input, max_new_tokens=max_new_tokens)
    return response

# 处理模型生成的响应，返回处理后的输出，只包含当前对话轮的模型响应
def process_response(response, num_rounds):
    # 将响应转换为字符串
    output = str(response[0]["generated_text"])
    # 移除其中的结束符（“</s>”）
    output = output.replace("<|endoftext|></s>", "")
    # 找出所有所有“human：”的子串输出位置
    # 找出所有的“Human：”出现位置有助于定位对话的开始和结束
    all_positions = [m.start() for m in re.finditer("Human: ", output)]
    place_of_second_q = -1
    # 如果“human：”的出现次数超过了对话轮数，则意味着新的一轮对话已经开始，应该将其从输出中删除
    if len(all_positions) > num_rounds:
        place_of_second_q = all_positions[num_rounds]
    # 如果找到了新一轮对话的开始位置，将输出限制在这个位置之前，也就是只保留当前对话轮的输出
    if place_of_second_q != -1:
        output = output[0:place_of_second_q]
    return output


def main(args):
    # 1、获取模型的生成器
    generator = get_generator(args.path)
    /2、设置随机种子
    set_seed(42)

    user_input = ""
    num_rounds = 0
    # 3、进入一个无限循环，在每次迭代中，会询问用户的输入
    while True:
        num_rounds += 1
        # 将用户的输入加到user_input字符串的末尾
        user_input, quit, clear = get_user_input(user_input)
        # 如果输入的是quit，那么程序会结束
        if quit:
            break
        # 如果输入了clear，那么用户输入和num_rounds都会重置
        if clear:
            user_input, num_rounds = "", 0
            continue
        # 模型生成的响应会通过process_response函数进行处理，得到最后的输出
        response = get_model_response(generator, user_input,
                                      args.max_new_tokens)
        # 模型生成的响应会通过process_response函数进行处理，得到最后的输出
        output = process_response(response, num_rounds)

        print("-" * 30 + f" Round {num_rounds} " + "-" * 30)
        print(f"{output}")
        user_input = f"{output}\n\n"


if __name__ == "__main__":
    # Silence warnings about `max_new_tokens` and `max_length` being set
    logging.getLogger("transformers").setLevel(logging.ERROR)

    args = parse_args()
    main(args)

# Example:
"""
 Human: what is internet explorer?
 Assistant:
Internet Explorer is an internet browser developed by Microsoft. It is primarily used for browsing the web, but can also be used to run some applications. Internet Explorer is often considered the best and most popular internet browser currently available, though there are many other options available.

 Human: what is edge?
 Assistant:
 Edge is a newer version of the Microsoft internet browser, developed by Microsoft. It is focused on improving performance and security, and offers a more modern user interface. Edge is currently the most popular internet browser on the market, and is also used heavily by Microsoft employees.
"""
