from langchain_community.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from config.index import Config
from typing import AsyncGenerator, Dict, Any, List
import json
import asyncio
import re

class OllamaChatBase:
    def __init__(self, verbose: bool = False):
        self.cfg = Config()
        self.host = self.cfg.get('ollama.host')
        self.model = self.cfg.get('ollama.model')
        self.verbose = verbose
        
        # 初始化模型
        self.llm = Ollama(
            base_url=self.host,
            model=self.model,
            callback_manager=CallbackManager([]),
            temperature=self.cfg.get('ollama.temperature'),
        )
        
        # 自定义提示词模板
        self.prompt_template = PromptTemplate(
            input_variables=["history", "input"],
            template="""你是一个专业的AI助手，请用中文回答用户的问题。

当前对话历史：
{history}

用户: {input}
助手:"""
        )
        
        # 初始化会话链
        self.conversations: Dict[str, ConversationChain] = {}
    
    def _filter_think_content(self, text: str) -> str:
        """过滤掉<think></think>标签中的内容"""
        return re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL).strip()
    
    def _get_conversation_chain(self, session_id: str) -> ConversationChain:
        """获取或创建一个会话链"""
        if session_id not in self.conversations:
            # 创建一个自定义的 ConversationBufferMemory
            memory = ConversationBufferMemory(
                human_prefix="用户",
                ai_prefix="助手"
            )
            self.conversations[session_id] = ConversationChain(
                llm=self.llm,
                memory=memory,
                prompt=self.prompt_template,
                verbose=self.verbose
            )
        return self.conversations[session_id]

    def clear_history(self, session_id: str):
        """清除指定会话的历史记录"""
        if session_id in self.conversations:
            self.conversations[session_id].memory.clear()
            del self.conversations[session_id]

    async def stream_chat(self, 
                         query: str, 
                         session_id: str = None) -> AsyncGenerator[Dict[str, Any], None]:
        """
        流式聊天响应
        """
        try:
            # 如果没有提供session_id，创建一个临时会话
            session_id = session_id or "temp_" + str(id(query))
            
            # 获取或创建会话链
            conversation = self._get_conversation_chain(session_id)
            
            if self.verbose:
                print(f"\nUser Query: {query}")
            
            # 获取历史记录并构建提示词
            history = conversation.memory.load_memory_variables({})
            history_str = history.get("history", "")
            
            # 过滤掉think标签内容
            if isinstance(history_str, str):
                history_str = self._filter_think_content(history_str)
            
            # 使用模板构建提示词
            prompt = self.prompt_template.format(
                history=history_str,
                input=query
            )
            
            if self.verbose:
                print("\n--- Prompt ---")
                print(prompt)
                print("-------------")
            
            # 使用 Ollama 的原生流式输出
            full_response = ""
            async for chunk in self.llm.astream(prompt):
                if self.verbose:
                    print(chunk, end="", flush=True)
                full_response += chunk
                yield {
                    "event": "message",
                    "data": json.dumps({
                        "content": chunk,
                        "type": "content"
                    }, ensure_ascii=False)
                }
            
            # 过滤掉think标签内容后再保存到历史记录
            filtered_response = self._filter_think_content(full_response)
            conversation.memory.save_context({"input": query}, {"output": filtered_response})
            
            # 添加完成标识
            yield {
                "event": "message",
                "data": json.dumps({
                    "content": "",
                    "type": "done"
                }, ensure_ascii=False)
            }

        except Exception as e:
            error_msg = str(e)
            if self.verbose:
                print(f"\nError: {error_msg}")
            yield {
                "event": "error",
                "data": json.dumps({
                    "error": error_msg,
                    "type": "error"
                }, ensure_ascii=False)
            }
