"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import json
import asyncio
import os
import sys
import traceback
from PyCmpltrtok.util_mongo import get_history
from PyCmpltrtok.auth.mongo.conn import conn
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep

from fastapi import FastAPI, Request, Body
from fastapi.responses import StreamingResponse
import uvicorn

import langchain
langchain.verbose = True
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI, AzureOpenAI, Anthropic
from langchain.memory import ConversationBufferMemory

import logging

logging.basicConfig(level=logging.INFO, )

# 连接Mongodb
sep('MongoDB')
mongo = conn('local')
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'user_xxxx', limit=1)  # try it
sep('MongoDB OK')

app = FastAPI()

IDLE = '暂时离线，请稍等。'
# prompt = 'The following is a friendly conversation between a human and an AI. '
# 'The AI is talkative and provides lots of specific details from its context. '
# 'If the AI does not know the answer to a question, it truthfully says it does not know.\n\n'
# 'Current conversation:\n'
# '{history}\n'
# 'Human: {input}\n'
# 'AI:'
prompt = '下面是用户和AI的一段友好的对话。'\
'AI非常健谈并且根据上下文提供了很多有用的细节。'\
'如果AI不知道问题的答案，它就诚实地说它不知道。\n\n'\
'当前的对话:\n'\
'{history}\n'\
'用户: {input}\n'\
'AI:'
chat_prompt = PromptTemplate.from_template(prompt)


# @app.post("/stream_chat")
async def stream_chat(
    xinput: str = Body("", description="用户输入", examples=["恼羞成怒"]), 
    username: str = Body(None, description="用户名", examples=["user_xxxx"])
):
    
    # # 接收输入
    # req_json = await request.json()  # 请求json
    # # 获取输入
    # xinput = req_json['input']
    # username = req_json['username']
    print('input:', xinput)
    print('username:', username)

    # 获取聊天历史
    xlog = get_history(mdb, username, more_info=False)
    
    # 模型推理
    print('-------------history-----------------')
    for i, (xin, xout) in enumerate(xlog):
        print(i, '>>>>', f'|{xin}|')
        print(i, '<<<<', f'|{xout}|')
    print('-------------this turn---------------')
    print('>>>>', '>>>>', f'|{xinput}|')
    
    # messages = [{'role': 'system', 'content': 'You are a helpful assistant.'},]
    memory = ConversationBufferMemory(
        human_prefix='用户',
        ai_prefix='AI',
    )
    for xin, xout in xlog:
        # Role must be user or assistant and Content length must be greater than 0
        if xin is None or len(xin) == 0:
            xin = IDLE
        if xout is None or len(xout) == 0:
            xout = IDLE
        # messages.append({'role': 'user', 'content': xin})
        memory.chat_memory.add_user_message(xin)
        # messages.append({'role': 'assistant', 'content': xout})
        memory.chat_memory.add_ai_message(xout)
    # messages.append({'role': 'user', 'content': xinput})
    
    # xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)  # ChatGLM2-6B
    # xgenerator = model.chat_stream(tokenizer, xinput, history=xlog)  # QWEN 1.8B int4
    
    # responses = dashscope.Generation.call(
    #     dashscope.Generation.Models.qwen_turbo,
    #     messages=messages,
    #     result_format='message',  # 将返回结果格式设置为 message
    #     stream=True,  # 设置输出方式为流式输出
    #     incremental_output=True,  # 增量式流式输出
    # )
    
    callback = AsyncIteratorCallbackHandler()
    callbacks = [callback]
    model = ChatOpenAI(
        streaming=True,
        verbose=True,
        callbacks=callbacks,
        openai_api_key="EMPTY",
        # openai_api_base="http://127.0.0.1:20000/v1",
        openai_api_base="http://127.0.0.1:9997/v1",
        # model_name="qwen-api",
        # model_name="Qwen-1_8B-Chat",
        model_name="qwen2.5-instruct",
        temperature=0.3,
        max_tokens=2048,
        # openai_proxy=config.get("openai_proxy"),
    )
    chain = LLMChain(prompt=chat_prompt, llm=model, memory=memory)
    
    async def the_task():
        try:
            await chain.acall({"input": xinput}, ),
        except Exception as e:
            print(print(traceback.format_exc()), file=sys.stderr, flush=True)
            msg = f"Caught exception: {e}"
            print(f'{e.__class__.__name__}: {msg}', file=sys.stderr, flush=True)
        finally:
            # Signal the aiter to stop.
            callback.done.set()
    
    async def get_generator():
        task = asyncio.create_task(the_task())
        x = ''
        async for token in callback.aiter():
            res_dict = dict()
            res_dict['input'] = xinput
            x += token
            res_dict['output'] = x
            yield json.dumps(res_dict, ensure_ascii=False).encode('utf8') + b"\0"
        await task
            
    generator = get_generator()
    return StreamingResponse(generator)


app.post('/stream_chat')(stream_chat)

if '__main__' == __name__:
    import argparse
    
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port', help='port of the daemon FastAPI service', default=7750, type=int)
    args = parser.parse_args()

    port = args.port
    uvicorn.run(app, host='0.0.0.0', port=port, log_level='debug')
