"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import os
import json
import asyncio
from PyCmpltrtok.util_mongo import get_history
import pymongo as pm
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import uvicorn

app = FastAPI()
os.environ['M_HOST'] = '127.0.0.1'
os.environ['M_PORT'] = '27017'
os.environ['M_USER'] = 'root'
os.environ['M_PWD'] = 'p1983mdA1-ei'

print('Importing transformers ...')
from transformers import AutoConfig, GenerationConfig, AutoTokenizer, AutoModelForCausalLM
print('Importing over.')

# 连接Mongodb
sep('MongoDB')
mongo = pm.MongoClient(
    os.environ['M_HOST'], int(os.environ['M_PORT']), serverSelectionTimeoutMS=3000,
    username=os.environ['M_USER'], password=os.environ['M_PWD'], 
)
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'user_xxxx', limit=1)  # try it
sep('MongoDB OK')


def load_model():
    print('-------------------------------------------------------')
    print('正在加载模型……')
    # model_name = "THUDM/chatglm2-6b-int4"
    # model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
    # model_name = "Qwen/Qwen-1_8B-Chat"
    # model_name = "/mnt/d/_dell7590_root/sync/1_usb/N1/large_sci.com.models/hf/Qwen-1_8B-Chat"  # WSL
    model_name = "/home/yunpeng/models/hf/Qwen-1_8B-Chat-Int4"  # ASUS NEW
    # model_name = "models/hf/Qwen-1_8B-Chat"  # ASUS NEW, ln -s

    def float_set(config, option):
        config.bf16 = False
        config.fp16 = False
        config.fp32 = False

        if option == "bf16":
            config.bf16 = True
        elif option == "fp16":
            config.fp16 = True
        elif option == "fp32":
            config.fp32 = True
        else:
            print("Invalid option. Please choose one from 'bf16', 'fp16' and 'fp32'.")
            
    config = AutoConfig.from_pretrained(
        model_name,
        trust_remote_code=True,
    )
    # NOTE: if you use the old version of model file, please remove the comments below
    # config.use_flash_attn = False
    float_set(config, "fp16")
    generation_config = GenerationConfig.from_pretrained(
        model_name, trust_remote_code=True
    )
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        config=config,
        low_cpu_mem_usage=True,
        trust_remote_code=True
    )
    model = model.eval()
    if hasattr(model.config, "use_dynamic_ntk") and model.config.use_dynamic_ntk:
        model.config.max_sequence_length = 16384
    tokenizer = AutoTokenizer.from_pretrained(
        model_name, trust_remote_code=True
    )
    tokenizer.eos_token_id = config.eos_token_id
    tokenizer.bos_token_id = config.bos_token_id
    tokenizer.pad_token_id = generation_config.pad_token_id
    model.config.eos_token_id = tokenizer.eos_token_id
    model.config.bos_token_id = tokenizer.bos_token_id
    model.config.pad_token_id = tokenizer.pad_token_id
    model = model.cuda()
    sep()
    sep('model')
    print(model)
    sep()
    sep('model.config')
    print(model.config)
    sep()
    sep('tokenizer')
    print(tokenizer)
    sep()
    sep()
    print('模型已经加载完毕。')
    return model, tokenizer


@app.post("/stream_chat")
async def stream_chat(request: Request):
    
    # 接收输入
    req_json = await request.json()  # 请求json
    # 获取输入
    xinput = req_json['input']
    username = req_json['username']
    print('input:', xinput)
    print('username:', username)

    # 获取聊天历史
    xlog = get_history(mdb, username, more_info=False, no_none=True)
    
    # 模型推理
    print('-------------history-----------------')
    for i, (xin, xout) in enumerate(xlog):
        print(i, '>>>>', xin)
        print(i, '<<<<', xout)
    print('-------------this turn---------------')
    print('>>>>', '>>>>', xinput)
    # xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)  # ChatGLM2-6B
    xgenerator = model.chat_stream(tokenizer, xinput, history=xlog)  # QWEN 1.8B int4
    
    async def get_generator():
        for x in xgenerator:
            await asyncio.sleep(0.000001)
            res_dict = dict()
            res_dict['input'] = xinput
            res_dict['output'] = x
            yield json.dumps(res_dict).encode('utf8') + b"\0"
            
    generator = get_generator()
    # https://fastapi.tiangolo.com/advanced/custom-response/#streamingresponse
    return StreamingResponse(generator)


if '__main__' == __name__:
    model, tokenizer = load_model()
    uvicorn.run(app, host='0.0.0.0', port=7760)
