import os
import uuid
from typing import List, Optional, Dict, Any
from pydantic import BaseModel
from fastapi import UploadFile, HTTPException
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
import chromadb
from openai import OpenAI

# 初始化硅基流动客户端
client = OpenAI(
    api_key=os.getenv("SILICONFLOW_API_KEY"),
    base_url=os.getenv("SILICONFLOW_BASE_URL")
)


class KnowledgeBase:
    """知识库管理类"""

    def __init__(self):
        self.vector_db_path = os.getenv("VECTOR_DB_PATH", "./vector_store")
        self.collection_name = os.getenv("CHROMA_COLLECTION_NAME", "knowledge_base")
        self.embedding_model = os.getenv("EMBEDDING_MODEL",
                                         "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")

        # 初始化嵌入模型
        self.embeddings = HuggingFaceEmbeddings(
            model_name=self.embedding_model,
            model_kwargs={'device': 'cpu'}
        )

        # 初始化文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len,
        )

        # 初始化向量数据库
        self.vector_store = self._init_vector_store()

    def _init_vector_store(self):
        """初始化向量数据库"""
        return Chroma(
            collection_name=self.collection_name,
            embedding_function=self.embeddings,
            persist_directory=self.vector_db_path
        )

    async def add_document(self, file: UploadFile) -> Dict[str, Any]:
        """添加文档到知识库"""
        try:
            # 保存临时文件
            file_extension = os.path.splitext(file.filename)[1].lower()
            temp_path = f"./temp_{uuid.uuid4()}{file_extension}"

            with open(temp_path, "wb") as buffer:
                content = await file.read()
                buffer.write(content)

            # 根据文件类型加载文档
            if file_extension == '.pdf':
                loader = PyPDFLoader(temp_path)
            elif file_extension in ['.txt', '.md']:
                loader = TextLoader(temp_path, encoding='utf-8')
            else:
                os.remove(temp_path)
                raise HTTPException(status_code=400, detail="不支持的文件格式")

            documents = loader.load()

            # 分割文本
            chunks = self.text_splitter.split_documents(documents)

            # 添加到向量数据库
            self.vector_store.add_documents(chunks)

            # 清理临时文件
            os.remove(temp_path)

            return {
                "status": "success",
                "message": f"成功添加文档 {file.filename}",
                "chunks_count": len(chunks),
                "file_size": len(content)
            }

        except Exception as e:
            raise HTTPException(status_code=500, detail=f"文档处理失败: {str(e)}")

    def search_similar(self, query: str, k: int = 5) -> List[Document]:
        """在知识库中搜索相似内容"""
        try:
            results = self.vector_store.similarity_search(query, k=k)
            return results
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"搜索失败: {str(e)}")

    def get_collection_info(self) -> Dict[str, Any]:
        """获取知识库统计信息"""
        try:
            collection = self.vector_store._collection
            count = collection.count()
            return {
                "document_count": count,
                "collection_name": self.collection_name,
                "vector_db_path": self.vector_db_path
            }
        except Exception as e:
            return {"error": f"获取统计信息失败: {str(e)}"}


# 初始化知识库
knowledge_base = KnowledgeBase()