import requests
import json
import re
import itertools


class OllamaClinet:
    """每一个实例对应一轮对话"""
    def __init__(
            self, 
            host="localhost", 
            port="11434", 
            model="qwen3:4b-instruct", 
            keep_alive=300,
            think=False, 
            temperature=0.6, 
            num_ctx=4096,
            top_p=0.8,
            top_k=20,
            repeat_penalty=1,
            is_default_options=True,
            clean_markdown=False
            ):
        self.url = "http://" + host + ":" + port + "/api/chat"
        self.model = model
        self.keep_alive = keep_alive
        self.think = think
        if is_default_options:
            self.options = {
                "temperature": temperature,
                "num_ctx": num_ctx,
            }
        else:
            self.options = {
                "temperature": temperature,
                "num_ctx": num_ctx,
                "top_p": top_p,
                "top_k": top_k,
                "repeat_penalty": repeat_penalty,
                }
        self.clean_markdown = clean_markdown
        self.history = []

    def _clean_markdown(self, text):
        """清除文本中所有的#、-、`、*符号"""
        # 使用正则表达式匹配四个符号中的任意一个，并用空字符串替换
        # [\#\-`\*] 表示匹配这四个符号中的任意一个
        # re.UNICODE 确保匹配所有编码的对应符号
        cleaned_text = re.sub(r'[\#\-`\*]', '', text, flags=re.UNICODE)
        return cleaned_text

    def _stream_response(self, message, add_history=False):
        """流式响应生成器,还能把asstant回答加入上下文"""
        data = {
            "model": self.model,
            "keep_alive": self.keep_alive,
            "messages": message,
        "stream": True,
        "think": self.think,
        "options": self.options,
        }
        with requests.post(self.url, json=data, stream=True) as response:
            assistant_content = ''
            for line in response.iter_lines():
                # 通常情况下不会有空行，如果网络不好会有，所以要加入if line过滤
                if line:
                    # {"model":"qwen3:0.6b","created_at":"2025-09-03T08:20:06.6863756Z","message":{"role":"assistant","content":"字"},"done":false}{"model":"qwen3:0.6b","created_at":"2025-09-03T08:20:06.7212439Z","message":{"role":"assistant","content":""},"done_reason":"stop","done":true,"total_duration":790302200,"load_duration":95949700,"prompt_eval_count":30,"prompt_eval_duration":6236700,"eval_count":16,"eval_duration":686666200}
                    # print(line.decode('utf-8'))
                    chunk = json.loads(line)
                    if "message" in chunk:
                        # {'role': 'assistant', 'content': '吗'}
                        content = chunk["message"]["content"]
                        if self.clean_markdown:
                            cleaned_content = self._clean_markdown(content)
                            content = cleaned_content
                        if add_history:
                            assistant_content += content
                        yield content
            if add_history:
                self.history.append({'role': 'assistant', 'content': assistant_content})

    
    def single_chat(self, user_prompt, system_prompt=''):
        """单轮对话返回生成器，没有任何记忆能力"""
        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        messages.append({"role": "user", "content": user_prompt})
        return self._stream_response(messages)
    
    def single_chat_print(self, user_prompt, system_prompt='', is_printing=True):
        """单论对话返回一个字符串"""
        generator = self.single_chat(user_prompt, system_prompt)
        all_response = ''
        for chunk in generator:
            if is_printing:
                print(chunk, end="", flush=True)
            all_response += chunk
        return all_response
    
    def chat(self, user_prompt, system_prompt=''):
        """连续对话返回生成器，有记忆能力"""
        if system_prompt:
            # 只保留一个系统提示词
            self.history = [msg for msg in self.history if msg["role"] != "system"]
            self.history.append({"role": "system", "content": system_prompt})
        self.history.append({"role": "user", "content": user_prompt})
        return self._stream_response(self.history, add_history=True)
    
    def chat_print(self, user_prompt, system_prompt='', is_printing=True):
        """连续对话返回一个字符串"""
        generator = self.chat(user_prompt, system_prompt)
        all_response = ''
        for chunk in generator:
            if is_printing:
                print(chunk, end="", flush=True)
            all_response += chunk
        return all_response


class NaturalLanguageProcessing:
    def __init__(
            self,
            system_name='小智',
            # 可扩展，更换 openai 需要手写一个客户端，使用格式需要与 OllamaClient 相同
            llm_client=OllamaClinet
            ):
        self.system_name = system_name
        # 实例化 llm_client
        self.llm_client = llm_client()

    def _merger(self, response, is_printing=False):
        # 合并token长度
        tokens = 0
        # 分为两级合并, 短一点比较适合 CosyVoice 输出语音
        tokens_1 = 8
        tokens_2 = 14
        chunks = ''
        for chunk in response:
            if is_printing:
                print(chunk, end="", flush=True)
            tokens += 1
            chunks += chunk
            if tokens >= tokens_2 and chunk and chunk[-1] in {' ', '：', ':', '、', '”', '）', ')'}:
                yield(chunks)
                chunks = ''
                tokens = 0
            elif tokens >= tokens_1 and chunk and chunk[-1] in {'\n', ';', '。', '；', ',', '，', '！', '!', '?', '？'}:
                yield(chunks)
                chunks = ''
                tokens = 0

    def _choice_mode(self, input_text):
        if input_text.startswith("你好小智"):
            return self._instruction_mode(input_text)
        else:
            return self._chat_mode(input_text)
    
    def _instruction_mode(self, input_text):
        response = self.llm_client.chat(input_text, system_prompt='进入指令模式')
        return response
    
    def _chat_mode(self, input_text):
        response = self.llm_client.chat(input_text, system_prompt='进入对话模式')
        return response


    def print_something(self):
        response = self._choice_mode("你好，写一篇文章,100字。")
        response_merge = self._merger(response)
        for chunk in response_merge:
            print(chunk)


if __name__ == "__main__":
    chat = NaturalLanguageProcessing()
    chat.print_something()