
import chatglm_cpp
from PIL import Image
from tool_register import get_tools, run_function, get_tool_desc
from loguru import logger

pipeline = chatglm_cpp.Pipeline("../chatglm3-ggml.bin")

def run_conversation(query: str, stream=False, tools=None, max_retry=5):
    messages = [chatglm_cpp.ChatMessage(role='user', content=query)]
    messages_with_system = []
    if tools:
        messages_with_system.append(chatglm_cpp.ChatMessage(role='system', content=tools))
    messages_with_system += messages
    for _ in range(max_retry):
        
        chatMessage = pipeline.chat(
                messages_with_system,
                max_length=2048,
                max_context_length=2048,
                do_sample=0.95,
                top_k=0,
                top_p=0.8,
                temperature=0.8,
                repetition_penalty=1.0,
                num_threads=0,
                stream=False,
            )
        
        if not chatMessage.tool_calls:
            logger.info(f"Final Reply: \n{chatMessage.content}")
            return chatMessage.content
        
        (tool_call,) = chatMessage.tool_calls
        if tool_call.type == "function":
            logger.info(f"function: \n{tool_call.function}")
            observation = run_function(tool_call.function.name, tool_call.function.arguments)
            logger.info(f"observation: \n{observation}")
        OBSERVATION_MAX_LENGTH = 1024
        if isinstance(observation, str) and len(observation) > OBSERVATION_MAX_LENGTH:
            observation = observation[:OBSERVATION_MAX_LENGTH] + " [TRUNCATED]"
        messages_with_system.append(chatglm_cpp.ChatMessage(role="observation", content=observation))

if __name__ == "__main__":
    # query = "你是谁"
    # run_conversation(query, stream=True)

    logger.info("\n=========== next conversation ===========")

    # query = "北京的温度"
    query = "生成一个随机数"
    run_conversation(query, tools=get_tool_desc(), stream=True)
