"""
deepspeed --include=localhost:1  daemon_ds.py --deepspeed --deepspeed_config ds_config.json


运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import json
import asyncio
from PyCmpltrtok.util_mongo import get_history
from PyCmpltrtok.auth.mongo.conn import conn
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import uvicorn
import deepspeed
import torch

# import logging
# logger_qw = logging.getLogger('transformers_modules.Qwen-1_8B-Chat.modeling_qwen')

print('Importing transformers ...')
from transformers import AutoConfig, GenerationConfig, AutoTokenizer, AutoModelForCausalLM
print('Importing over.')

# 连接Mongodb
sep('MongoDB')
mongo = conn('local')
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'user_xxxx', limit=1)  # try it
sep('MongoDB OK')


def load_model():
    print('-------------------------------------------------------')
    print('正在加载模型……')
    # model_name = "THUDM/chatglm2-6b-int4"
    # model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
    # model_name = "Qwen/Qwen-1_8B-Chat"
    # model_name = "/mnt/d/_dell7590_root/sync/1_usb/N1/large_sci.com.models/hf/Qwen-1_8B-Chat"  # WSL
    model_name = "/home/yunpeng/models/hf/Qwen-7B-Chat/93a65d3"  # ASUS NEW
    # model_name = "models/hf/Qwen-1_8B-Chat"  # ASUS NEW, ln -s

    def float_set(config, option):
        config.bf16 = False
        config.fp16 = False
        config.fp32 = False

        if option == "bf16":
            config.bf16 = True
        elif option == "fp16":
            config.fp16 = True
        elif option == "fp32":
            config.fp32 = True
        else:
            print("Invalid option. Please choose one from 'bf16', 'fp16' and 'fp32'.")
            
    config = AutoConfig.from_pretrained(
        model_name,
        trust_remote_code=True,
    )
    # NOTE: if you use the old version of model file, please remove the comments below
    # config.use_flash_attn = False
    float_set(config, "fp16")
    generation_config = GenerationConfig.from_pretrained(
        model_name, trust_remote_code=True
    )
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        config=config,
        low_cpu_mem_usage=True,
        trust_remote_code=True
    )
    model = model.eval()
    if hasattr(model.config, "use_dynamic_ntk") and model.config.use_dynamic_ntk:
        model.config.max_sequence_length = 16384
    tokenizer = AutoTokenizer.from_pretrained(
        model_name, trust_remote_code=True
    )
    tokenizer.eos_token_id = config.eos_token_id
    tokenizer.bos_token_id = config.bos_token_id
    tokenizer.pad_token_id = generation_config.pad_token_id
    model.config.eos_token_id = tokenizer.eos_token_id
    model.config.bos_token_id = tokenizer.bos_token_id
    model.config.pad_token_id = tokenizer.pad_token_id



    ds_engine = deepspeed.init_inference(
        model,
        # mp_size=1,  # https://github.com/microsoft/DeepSpeed/issues/2558
        tensor_parallel={"tp_size": 1},  # AssertionError: Cannot provide deprecated parameter 'mp_size' and replacing parameter 'tensor_parallel.tp_size' together
        dtype=torch.half,
        checkpoint=None,
        replace_method='auto',
        replace_with_kernel_inject=True,
    )
    model = ds_engine.module



    sep()
    sep('model')
    print(model)
    sep()
    sep('model.config')
    print(model.config)
    sep()
    sep('tokenizer')
    print(tokenizer)
    sep()
    sep()
    print('模型已经加载完毕。')
    return model, tokenizer


if '__main__' == __name__:
    import argparse
    
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port', help='port of the daemon FastAPI service', default=7750, type=int)
    parser.add_argument('--root-path', help='Root path of the service', default="", type=str, dest='root_path')
    args = parser.parse_args()

    port = args.port
    root_path = args.root_path
    
    if root_path:
        root_path_kwargs = {
            'root_path': root_path,
        }
    else:
        root_path_kwargs = {}
    
    app = FastAPI(
        title="My API",
        version='0.1.0',
        **root_path_kwargs,
    )

    @app.post("/stream_chat")
    async def stream_chat(request: Request):
        
        # 接收输入
        req_json = await request.json()  # 请求json
        # 获取输入
        xinput = req_json['input']
        username = req_json['username']
        print('input:', xinput)
        print('username:', username)

        # 获取聊天历史
        xlog = get_history(mdb, username, more_info=False)
        
        # 模型推理
        print('-------------history-----------------')
        for i, (xin, xout) in enumerate(xlog):
            print(i, '>>>>', xin)
            print(i, '<<<<', xout)
        print('-------------this turn---------------')
        print('>>>>', '>>>>', xinput)
        # xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)  # ChatGLM2-6B
        xgenerator = model.chat_stream(tokenizer, xinput, history=xlog)  # QWEN 1.8B int4
        
        def get_generator():
            for x in xgenerator:
                res_dict = dict()
                res_dict['input'] = xinput
                res_dict['output'] = x
                yield json.dumps(res_dict).encode('utf8') + b"\0"
                
        generator = get_generator()
        
        # https://fastapi.tiangolo.com/advanced/custom-response/#streamingresponse
        return StreamingResponse(generator)

    model, tokenizer = load_model()
    uvicorn.run(app, host='0.0.0.0', port=port, root_path=root_path)
