from langchain_ollama import OllamaEmbeddings
from langchain_chroma import Chroma
import settings
import os
from langchain_community.document_loaders import TextLoader, PyPDFLoader, UnstructuredWordDocumentLoader, UnstructuredHTMLLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter

class ZLRagAction():
    def __init__(self):
        self.embedding = OllamaEmbeddings(model="shaw/dmeta-embedding-zh:latest")
        self.persist_directory = settings.CHROMA_PRESIST_DIRECTORY
        self.collection_name = "fastapi_agent"
        self.vector_db = Chroma(
            collection_name=self.collection_name,
            embedding_function=self.embedding,
            persist_directory=self.persist_directory,
        )

    async def file_to_vector(self, file_path: str):
        ext = os.path.splitext(file_path)[-1]
        if ext == ".txt" or ".text":
            loader = TextLoader(file_path, encoding="utf-8")
        elif ext == ".pdf":
            loader = PyPDFLoader(file_path)
        elif ext == ".doc" or ext == ".docx":
            loader = UnstructuredWordDocumentLoader(file_path)
        elif ext == ".html" or ext == ".htm":
            loader = UnstructuredHTMLLoader(file_path)
        else:
            raise Exception(f"不支持文件类型{ext}")
        docs = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(
            separators=["\n\n", "\n", "(?<=[。！？])", "(?<=[，；、])", " ", ""],
            keep_separator=True,
            is_separator_regex=True,
            chunk_size=500,
            chunk_overlap=50
        )
        splitted_text = text_splitter.split_documents(docs)
        self.vector_db.add_documents(splitted_text)

    async def delete_file_vector(self, file_path: str):
        result = self.vector_db.get(where={"source": file_path})
        ids = result.get("ids")
        if len(ids) > 0:
            await self.vector_db.adelete(ids)


