import re

from common.config import conf
import ollama

from llm.llm import LLM


class OllamaAPI(LLM):
    def __init__(self):
        super().__init__()
        self.model_api = conf().get("ollama_api", "http://localhost:11434")
        self.model_name = conf().get("model_name", "llama3.1:8b")
        self.model_options = conf().get("ollama_options", {"top_p": 0.9})
        self.model_client = ollama.Client(host=self.model_api)

    def reply(self, messages):
        result = self.model_client.chat(model=self.model_name, messages=messages, stream=False,
                                        options=self.model_options)
        return result['message']['content'].replace('\n', '')

    def reply_stream(self, messages):
        # 不使用流式输出
        response = self.model_client.chat(model=self.model_name, messages=messages,
                                          stream=True,
                                          options=self.model_options)
        buffer = ""
        for chunk in response:
            if content := chunk['message']['content']:
                buffer += content
                while match := re.search(r'[^，。！？]*[，。！？]', buffer):
                    yield match.group()
                    buffer = buffer[match.end():]
        if buffer.strip():
            yield buffer


if __name__ == '__main__':
    bot = OllamaAPI()
    history = [
        {'role': 'system', 'content': '请你扮演一个知识百科,用简短的回复回复内容,回答不要超过100字'},
        {"role": "user", "content": "你好"},
    ]
    print(bot.reply(history))
