import time

_t = time.time()
import torch

print(f"+{time.time() - _t:>6.3f}", "LOAD torch")

import argparse
from transformers import AutoTokenizer,Qwen2ForCausalLM

import baize_bf16_modeling_qwen2
import baize_config
import baize_gp_util

from transformers.trainer_utils import set_seed

set_seed(1234)

DEFAULT_CKPT_PATH = baize_config.DEFAULT_MODEL_PATH
WELCOME_MSG = baize_config.WELCOME_MSG
HELP_MSG = baize_config.WELCOME_MSG


def _load_model_tokenizer(args):
    _t = time.time()
    tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path)
    print(f"+{time.time() - _t:>6.3f}", "LOAD Tokenizer")

    _t = time.time()
    model = baize_bf16_modeling_qwen2.BaizeFp32Bf16HybridQwen2ForCausalLM.from_pretrained(
        args.checkpoint_path
    ).eval()

    model.set_fp32model(Qwen2ForCausalLM.from_pretrained(args.checkpoint_path).eval())

    print(f"+{time.time() - _t:>6.3f}", "LOAD Model")

    return model, tokenizer


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--checkpoint-path",
        type=str,
        default=DEFAULT_CKPT_PATH,
        help="模型权重位置，默认为 %(default)r",
    )
    parser.add_argument(
        "--share",
        type=bool,
        default=False,
    )
    parser.add_argument(
        "--inbrowser",
        action="store_true",
        default=True,
        help="自动在浏览器中打开",
    )
    parser.add_argument("--server-port", type=int, default=8000, help="服务端口")
    parser.add_argument("--server-name", type=str, default="0.0.0.0")

    return parser.parse_args()


def main():
    history, response = [], ""

    args = get_args()
    model, tokenizer = _load_model_tokenizer(args)

    # streamer = baize_gp_util.get_streamer(tokenizer)

    print(WELCOME_MSG)

    while True:
        # query = _get_input()
        query = input("USER: ")

        print(f"\nUSER: {query}")

        try:
            for response in baize_gp_util.chat_stream(
                model, tokenizer, query, history, debug=True
            ):
                # _clear_screen()
                pass
                # print(f"BAIZE: {response}")
                # print(f"{time.time():0>6.3f}", "end response")

        except KeyboardInterrupt:
            print("[WARNING] Generation interrupted")
            continue

        history.append((query, response))


if __name__ == "__main__":
    main()
