from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.llms.base import LLM
from langchain.chains import ConversationalRetrievalChain
import requests
import os
from typing import Any, List, Optional
from pydantic import Field

class GuijiLLM(LLM):
    """硅基流动API封装"""
    api_key: str = Field(description="硅基流动API密钥")
    model_name: str = Field(default="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B")
    api_url: str = "https://api.siliconflow.cn/v1/chat/completions"
    
    def __init__(self, api_key: str, model_name: str = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"):
        super().__init__(api_key=api_key, model_name=model_name)
        
    def _call(self, prompt: str, **kwargs) -> str:
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": self.model_name,
            "messages": [{"role": "user", "content": prompt}]
        }
        
        response = requests.post(self.api_url, headers=headers, json=data)
        if response.status_code == 200:
            return response.json()["choices"][0]["message"]["content"]
        else:
            raise Exception(f"API调用失败: {response.text}")
            
    @property
    def _llm_type(self) -> str:
        return "guiji"

class LocalRAGSystem:
    def __init__(self, persist_directory="./chroma_db", guiji_api_key=None):
        # 初始化向量化模型
        self.embeddings = HuggingFaceEmbeddings(
            model_name="sentence-transformers/all-MiniLM-L6-v2"
        )
        # 初始化向量数据库
        self.persist_directory = persist_directory
        self.vectorstore = None
        
        # 检查API密钥
        if not guiji_api_key and not os.getenv("GUIJI_API_KEY"):
            raise ValueError("必须提供硅基流动API密钥")
            
        # 初始化硅基流动模型
        self.llm = GuijiLLM(
            api_key=guiji_api_key or os.getenv("GUIJI_API_KEY"),
            model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
        )
        self.qa_chain = None
        
    def load_documents(self, documents_dir):
        """加载文档目录中的所有文本文件"""
        loader = DirectoryLoader(
            documents_dir,
            glob="**/*.txt",
            loader_cls=TextLoader,
            loader_kwargs={'encoding': 'utf-8'}  # 添加编码参数
        )
        try:
            documents = loader.load()
        except Exception as e:
            # 如果UTF-8失败，尝试其他编码
            loader = DirectoryLoader(
                documents_dir,
                glob="**/*.txt",
                loader_cls=TextLoader,
                loader_kwargs={'encoding': 'gbk'}  # 尝试GBK编码
            )
            documents = loader.load()
        
        # 文本分割
        text_splitter = CharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200
        )
        split_docs = text_splitter.split_documents(documents)
        
        # 创建或更新向量数据库
        self.vectorstore = Chroma.from_documents(
            documents=split_docs,
            embedding=self.embeddings,
            persist_directory=self.persist_directory
        )
        self.vectorstore.persist()
        print(f"成功加载 {len(split_docs)} 个文档片段")
        
    def initialize_qa_chain(self):
        """初始化问答链"""
        if not self.vectorstore:
            raise Exception("请先加载文档!")
            
        self.qa_chain = ConversationalRetrievalChain.from_llm(
            llm=self.llm,
            retriever=self.vectorstore.as_retriever(),
            return_source_documents=True,
            verbose=True
        )
        
    def query(self, query_text, chat_history=None):
        """检索相关文档并生成回答"""
        if not self.qa_chain:
            self.initialize_qa_chain()
            
        chat_history = chat_history or []
        
        # 执行问答
        result = self.qa_chain({
            "question": query_text,
            "chat_history": chat_history
        })
        
        return {
            "answer": result["answer"],
            "source_documents": result["source_documents"]
        } 