from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from pathlib import Path
import json
from config import MODEL_TYPE, CHROMA_CONFIG
from models.document import Knowledge
from chromadb import PersistentClient
from chromadb.config import Settings
from llm.model_factory import ModelFactory
from typing import Union, List, Optional, Dict, Any
from llm.model_service import ModelService
from llm.model_registry import ModelRegistry
from model_config import DEFAULT_EMBED_PROVIDER, DEFAULT_EMBED_MODEL

class ChatService:
    def __init__(self):
        """
        初始化聊天服务
        """
        self.model_client = ModelFactory.create_model()
        
    def get_available_models(self) -> List[str]:
        """
        获取当前服务中可用的模型列表
        Returns:
            List[str]: 可用模型名称列表
        """
        try:
            return self.model_client.get_available_models()
        except Exception as e:
            print(f"Error fetching models: {e}")
            return []

    def get_embeddings(self, text: Union[str, List[str]], model_name: Optional[str] = None) -> List[List[float]]:
        """
        获取文本的嵌入向量
        Args:
            text: 输入文本或文本列表
            model_name: 可选的模型名称
        Returns:
            List[List[float]]: 嵌入向量列表
        """
        model_tuple = ModelRegistry.split_model_name(model_name)
        embed_provider = model_tuple[0]
        embed_model = model_tuple[1]
        embeddings = ModelService.call(
            provider_name=embed_provider, 
            model_name=embed_model, 
            mode="embed", stream=False, text=text)
        # return self.model_client.get_embeddings(text=text, model=model_name)
        return embeddings
    
    def get_retriever(self, collection_name: str, embedding_model: Optional[str] = None, top_k: int = 3):
        """
        获取检索器
        Args:
            collection_name: 集合名称
            embedding_model: embedding模型名称
            top_k: 检索的文档数量
        Returns:
            Retriever: 文档检索器
        """
        class EmbeddingFunction:
            def __init__(self, get_embeddings_fn, model_name):
                self.get_embeddings_fn = get_embeddings_fn
                self.model_name = model_name

            def embed_documents(self, texts: List[str]) -> List[List[float]]:
                return self.get_embeddings_fn(texts, self.model_name)

            def embed_query(self, text: str) -> List[float]:
                return self.get_embeddings_fn(text, self.model_name)[0]
            
        embedding_function = EmbeddingFunction(self.get_embeddings, embedding_model)
            
        vector_db = Chroma(
            client=PersistentClient(
                path=str(Path(CHROMA_CONFIG['path']).resolve()), 
                settings=Settings(anonymized_telemetry=False)
            ),
            collection_name=collection_name,
            embedding_function=embedding_function
        )
        return vector_db.as_retriever(search_kwargs={"k": top_k})

    def stream_chat(self, question: str, knowledge: Knowledge, model_name: Optional[str] = None, top_k: int = 3):
        """
        流式聊天
        Args:
            question: 问题
            knowledge: 知识库信息
            model_name: 可选的模型名称
            top_k: 检索的文档数量
        """
        try:
            # 获取相关文档
            retriever = self.get_retriever(
                collection_name=knowledge.collection_name,
                embedding_model=knowledge.embedding_model,
                top_k=top_k
            )
            docs = retriever.get_relevant_documents(question)
            
            if not docs:
                yield "我无法从知识库中找到相关信息。"
                return
                
            # 构建上下文
            def concat_docs(docs):
                # 每个文档添加标题和分隔符
                formatted_docs = []
                for i, doc in enumerate(docs, 1):
                    doc_text = f"【文档{i}】：\n{doc}\n\n"
                    formatted_docs.append(doc_text)
                
                # 拼接所有文档
                return "\n".join(formatted_docs)
            
            # context = "\n".join([doc.page_content for doc in docs])
            context = concat_docs(docs)
            
            prompt = f"【指令】根据已知信息，简洁和专业的来回答问题。如果无法从中得到答案，请说 “根据已知信息无法回答该问题”，不允许在答案中添加编造成分，答案请使用中文。\n\n【已知信息】{context}"

            prompt = self.truncate_prompt(prompt)  # 确保 prompt 不超过限制

            messages = [
                    {"role": "system", "content": prompt },
                    {"role": "user", "content": question }
                ]
            
            model_tuple = ModelRegistry.split_model_name(model_name)
            # 使用模型接口进行生成
            response = self.model_client.chat(
                messages=messages,
                model=model_tuple[1],
                stream=True  # 启用流式响应
            )

            # 处理流式响应
            def encode_newline(chunk):
                # 将单独的\n替换为[[BR]]，防止SSE丢失回车
                if chunk == '\n':
                    return '[[BR]]'
                # 也可以将所有\n替换为[[BR]]，更保险
                return chunk.replace('\n', '[[BR]]')

            if isinstance(response, (str, bytes)):
                # 如果返回的是普通字符串，直接yield
                yield encode_newline(response)
            else:
                # 如果返回的是迭代器，逐个yield
                for chunk in response:
                    if chunk:
                        yield encode_newline(chunk)
                    
        except Exception as e:
            yield f"发生错误: {str(e)}"

    def generate_streaming_response(self, chat_service, question, knowledge, model_name, top_k):
        """
        生成流式响应
        Args:
            chat_service: 聊天服务实例
            question: 问题
            knowledge: 知识库信息
            model_name: 模型名称
            top_k: 检索的文档数量
        """
        try:
            # 获取相关文档
            retriever = chat_service.get_retriever(
                collection_name=knowledge.collection_name,
                embedding_model=knowledge.embedding_model,
                top_k=top_k
            )
            docs = retriever.get_relevant_documents(question)
            context = "\n".join([doc.page_content for doc in docs])
            
            # 构建 prompt
            prompt = f"""基于以下内容回答问题。如果无法从提供的内容中找到答案，请说'我无法从提供的内容中找到答案'。

    文档内容：
    {context}

    问题：{question}

    回答："""

            # 使用模型接口进行生成
            for chunk in chat_service.model_client.generate(
                prompt=prompt,
                model=model_name,
                stream=True
            ):
                if chunk:
                    yield f"data: {chunk}\n\n"
            
            yield "data: [DONE]\n\n"
                        
        except Exception as e:
            print(f"Streaming error: {e}")
            yield f"data: Error occurred: {str(e)}\n\n"
            yield "data: [DONE]\n\n"

    def truncate_prompt(self, prompt: str, max_length: int = 4000) -> str:
        """
        Truncate prompt to stay within token limits while trying to maintain sentence integrity
        
        Args:
            prompt: The input prompt text
            max_length: Maximum allowed length (default 4000 characters)
        
        Returns:
            Truncated prompt string
        """
        if len(prompt) <= max_length:
            return prompt

        # Split by sentence endings
        sentences = []
        current = ""
        
        # Split on common sentence endings while preserving the delimiter
        for char in prompt:
            current += char
            if char in '.!?。！？' and current.strip():
                sentences.append(current.strip())
                current = ""
        
        if current:  # Add any remaining text
            sentences.append(current.strip())

        # Build truncated prompt
        result = ""
        for sentence in sentences:
            if len(result + sentence) > max_length:
                break
            result += sentence + " "

        result = result.strip()
        
        # Add indicator if truncated
        if len(result) < len(prompt):
            result += "...(content truncated)"

        return result