"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import json
import asyncio
from PyCmpltrtok.util_mongo import get_history
import pymongo as pm
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import uvicorn

from http import HTTPStatus
import dashscope
from auth_tmp import api_key

dashscope.api_key = api_key


# 连接Mongodb
sep('MongoDB')
mongo = pm.MongoClient(
    '127.0.0.1', 27017, serverSelectionTimeoutMS=3000,
    username='root', password='p1983mdA1-ei',
)
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'user_xxxx', limit=1)  # try it
sep('MongoDB OK')

app = FastAPI()

model = dashscope.Generation.Models.qwen_turbo
IDLE = '暂时离线，请稍等。'


@app.post("/stream_chat")
async def stream_chat(request: Request):
    
    # 接收输入
    req_json = await request.json()  # 请求json
    # 获取输入
    xinput = req_json['input']
    username = req_json['username']
    print('input:', xinput)
    print('username:', username)

    # 获取聊天历史
    xlog = get_history(mdb, username, more_info=False)
    
    # 模型推理
    print('-------------history-----------------')
    for i, (xin, xout) in enumerate(xlog):
        print(i, '>>>>', f'|{xin}|')
        print(i, '<<<<', f'|{xout}|')
    print('-------------this turn---------------')
    print('>>>>', '>>>>', f'|{xinput}|')
    
    messages = [{'role': 'system', 'content': 'You are a helpful assistant.'},]
    for xin, xout in xlog:
        # Role must be user or assistant and Content length must be greater than 0
        if xin is None or len(xin) == 0:
            xin = IDLE
        if xout is None or len(xout) == 0:
            xout = IDLE
        messages.append({'role': 'user', 'content': xin})
        messages.append({'role': 'assistant', 'content': xout})
    messages.append({'role': 'user', 'content': xinput})
    
    # xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)  # ChatGLM2-6B
    # xgenerator = model.chat_stream(tokenizer, xinput, history=xlog)  # QWEN 1.8B int4
    
    responses = dashscope.Generation.call(
        dashscope.Generation.Models.qwen_turbo,
        messages=messages,
        result_format='message',  # 将返回结果格式设置为 message
        stream=True,  # 设置输出方式为流式输出
        incremental_output=True,  # 增量式流式输出
    )
    
    def get_generator():
        x = ''
        for response in responses:
            if response.status_code == HTTPStatus.OK:
                res_dict = dict()
                res_dict['input'] = xinput
                x += response.output.choices[0]['message']['content']
                res_dict['output'] = x
                yield json.dumps(res_dict, ensure_ascii=False).encode('utf8') + b"\0"
            else:
                print(response)
                break
            
    generator = get_generator()
    # https://fastapi.tiangolo.com/advanced/custom-response/#streamingresponse
    return StreamingResponse(generator)


if '__main__' == __name__:
    import argparse
    
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port', help='port of the daemon FastAPI service', default=7750, type=int)
    args = parser.parse_args()

    port = args.port
    uvicorn.run(app, host='0.0.0.0', port=port)
