from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler

from langchain.prompts.chat import ChatPromptTemplate

from langchain.prompts import PromptTemplate

from langchain.chat_models import ChatOpenAI

from typing import AsyncIterable, Awaitable
import asyncio
import json

from server.chat.utils import History

IP = '127.0.0.1'
PORT = '20000'
model_name = 'Qwen-1_8B-Chat-Int4'
temperature = 0.0
max_tokens = 2048


async def wrap_done(fn: Awaitable, event: asyncio.Event):
    """Wrap an awaitable with a event to signal when it's done or an exception is raised."""
    try:
        await fn
    except Exception as e:
        print(e)
        # TODO: handle exception
        msg = f"Caught exception: {e}"
        print(f'{e.__class__.__name__}: {msg}', exc_info=e)
    finally:
        # Signal the aiter to stop.
        event.set()


async def chat_iterator(query: str) -> AsyncIterable[str]:
    callback = AsyncIteratorCallbackHandler()
    callbacks = [callback]
    memory = None

    model = ChatOpenAI(
        streaming=True,
        verbose=True,
        callbacks=callbacks,  # ESSENTIAL !
        openai_api_key="EMPTY",
        openai_api_base=f"http://{IP}:{PORT}/v1",  # /v1 !
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        # openai_proxy=config.get("openai_proxy"),
    )

    prompt_template = '{{ input }}'
    input_msg = History(role="user", content=prompt_template).to_msg_template(False)
    chat_prompt = ChatPromptTemplate.from_messages([input_msg])

    chain = LLMChain(
        prompt=chat_prompt,
        llm=model,
        memory=memory,
    )

    # async def wrapper():
    #     await chain.acall({
    #         'input': query,
    #     })

    # task = asyncio.create_task(wrapper())
    task = asyncio.create_task(chain.acall({
        'input': query,
    }))
    print('after asyncio.create task chain acall')

    print('before async for')
    async for token in callback.aiter():
        yield json.dumps({
            'text': token,
            'message_id': '0x0001',
        }, ensure_ascii=False)
    print('after async for')

    print('before await task')
    await task
    print('await task over')


async def main():

    # xinput = 'You are a story teller. Please wave a story in 1000 words.'
    xinput = '你是一个讲故事的人，请讲一个50字的科幻故事。'

    async for xdict in chat_iterator(xinput):
        print(json.loads(xdict)['text'], end='', flush=True)
    print(flush=True)
    print('Output over')


asyncio.run(main())
print('asyncio run main over')
