"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import json
import asyncio
from PyCmpltrtok.util_mongo import get_history
import pymongo as pm
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep
from auth_tmp import api_key

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import uvicorn
from zhipuai import ZhipuAI

# 连接Mongodb
sep('MongoDB')
mongo = pm.MongoClient(
    '127.0.0.1', 27017, serverSelectionTimeoutMS=3000,
    username='root', password='p1983mdA1-ei',
)
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'user_xxxx', limit=1)  # try it
sep('MongoDB OK')

app = FastAPI()

# Mistral
client = ZhipuAI(api_key=api_key)


@app.post("/stream_chat")
async def stream_chat(request: Request):
    
    # 接收输入
    req_json = await request.json()  # 请求json
    # 获取输入
    xinput = req_json['input']
    username = req_json['username']
    print('input:', xinput)
    print('username:', username)

    # 获取聊天历史
    xlog = get_history(mdb, username, more_info=False)
    
    # 模型推理
    print('-------------history-----------------')
    for i, (xin, xout) in enumerate(xlog):
        print(i, '>>>>', xin)
        print(i, '<<<<', xout)
    print('-------------this turn---------------')
    print('>>>>', '>>>>', xinput)
    
    messages = [{
        'role': 'system',
        'content': '你是一个乐于解答各种问题的助手，你的任务是为用户提供专业、准确、有见地的建议。',
    }]
    for xin, xout in xlog:
        messages.append({'role': "user", 'content':xin})
        messages.append({'role': "assistant", 'content': xout})
    messages.append({'role': "user", 'content':xinput})
    
    # xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)  # ChatGLM2-6B
    # xgenerator = model.chat_stream(tokenizer, xinput, history=xlog)  # QWEN 1.8B int4
    
    response = client.chat.completions.create(
        model="glm-4",  # 填写需要调用的模型名称
        messages=messages,
        stream=True,
    )
    
    async def wrapper():
        # async for chunk in response:   # TypeError: 'async for' requires an object with __aiter__ method, got StreamResponse
        for chunk in response: 
            await asyncio.sleep(0.000001)  # Important for concurrency
            yield chunk.choices[0].delta.content
    
    async def get_generator():
        x = ''
        async for piece in wrapper(): 
            res_dict = dict()
            res_dict['input'] = xinput
            x += piece
            res_dict['output'] = x
            yield json.dumps(res_dict, ensure_ascii=False).encode('utf8') + b"\0"
            
    generator = get_generator()
    # https://fastapi.tiangolo.com/advanced/custom-response/#streamingresponse
    return StreamingResponse(generator)


if '__main__' == __name__:
    import argparse
    
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port', help='port of the daemon FastAPI service', default=7750, type=int)
    args = parser.parse_args()

    port = args.port
    uvicorn.run(app, host='0.0.0.0', port=port)
