from PyCmpltrtok.common import sep
import langchain
langchain.verbose = True
from langchain.prompts.chat import ChatMessagePromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from pydantic import BaseModel, Field
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chains import LLMChain
import traceback
import sys
import asyncio

sep('chat message prompt tpl, user')
xraw = '<指令>根据已知信息，简洁和专业的来回答问题。如果无法从中得到答案，请说 “根据已知信息无法回答该问题”，'\
            '不允许在答案中添加编造成分，答案请使用中文。 </指令>\n'\
            '<已知信息>{{ context }}</已知信息>\n'\
            '<问题>{{ question }}</问题>\n'

context = '\n'.join([
    '1 + a = 10',
    'x ** 2 = a',
])
query = 'x = ? 请详述解题过程。'

xtype = 'jinja2'

tpl = ChatMessagePromptTemplate.from_template(xraw, xtype, role='user')
print(tpl)

sep('history')


class History(BaseModel):
    """
    对话历史
    可从dict生成，如
    h = History(**{"role":"user","content":"你好"})
    也可转换为tuple，如
    h.to_msy_tuple = ("human", "你好")
    """
    role: str = Field(...)
    content: str = Field(...)
    
    
history = [
    History(role='user', content='Hi~~'),
    History(role='assistant', content='Eng Heng?'),
]
print(history)

sep('chat message prompt tpl, assistant')
tpl2 = [ChatMessagePromptTemplate.from_template(h.content, xtype, role=h.role) for h in history]
print(tpl2)

sep('chat prompt tpl')
chat_prompt = ChatPromptTemplate.from_messages(tpl2 + [tpl])
print(chat_prompt)

sep('LLM')


async def infer():
    callback = AsyncIteratorCallbackHandler()
    model = ChatOpenAI(
        streaming=True,
        verbose=True,
        callbacks=[callback],
        openai_api_key="EMPTY",
        openai_api_base="http://192.168.31.227:20000/v1",
        model_name='qwen-api',
        temperature=0.0,
        max_tokens=4096,
    )
    chain = LLMChain(prompt=chat_prompt, llm=model)

    async def wrap():
        try:
            await chain.acall({
                'context': context,
                'question': query,
            })
        except Exception as e:
            print(traceback.format_exc(), flush=True, file=sys.stderr)
        finally:
            callback.done.set()
            
            
    task = asyncio.create_task(wrap())

    async for token in callback.aiter():
        print(token, flush=True, end='')

    await task
    
    
asyncio.run(infer())
