
import requests
from .conversation.conversation import Conversation
from .conversation.base import Content,ContentText

class LLMClient:

    def __init__(self,url:str="127.0.0.1:7002") -> None:
        self.url = f"http://{url}/predict"
        pass
    
    def _extract_after_last_inst(self,text:str):
        # 找到最后一个 `[/INST]` 标记的位置
        last_index = text.rfind('[/INST]')
        
        # 如果找到标记，则截取其后的字符串
        if last_index != -1:
            return text[last_index + len('[/INST]'):]
        else:
            return ''  # 如果未找到标记，则返回空字符串

    
    def Predict(self, conversation: Conversation):
        # 对话数据 json
        json_data = conversation.to_json()
        data = { "json" : (None, json_data) }

        # 获取图像列表
        images_list = conversation.get_image_path_list()
        has_images = len(images_list) > 0
        if has_images:
            files_list = [
                ("images", open(image_path, "rb")) for image_path in images_list
            ]
            # print(files_list)

        # 发送请求
        if has_images:
            response = requests.post(self.url, data=data, files=files_list)
        else:
            response = requests.post(self.url, data=data)

        json_output = response.json()
        output = str(json_output["result"])
        # # 打印返回的结果
        # print(response.json())
        # print(output)

        real_output = self._extract_after_last_inst(output)
        real_output = real_output.strip()

        conversation.Add(Content(
            role="assistant",
            content=[ContentText(real_output)]
        ))

        return real_output,conversation

GlobalLLMClient = LLMClient("127.0.0.1:7002")
