"""
1 读1个或多个产品设计模板
2 特征向量写入csv,标签是文本分片
3 特征检索时从文件中检索
Q1-向量数据库按一个模板考虑,输入的应该是每个模块,分片怎么说？
    按多个考虑,输入的应该是模板描述,调用又得隔一层,db需要加一列访问地址,token就太大了
"""
from sys import path
path.append("../")

from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_chroma import Chroma

from utils import utils
FILE_DIR_PATH = utils.getRootDir() + '/knowledge/'

class LanchainRAG:
    def __init__(self, model_name: str, filepath: str):
        self.embedding_model_name = model_name
        self.embeddings = HuggingFaceEmbeddings(model_name=model_name)
        self.filepath = filepath
        self.db_case = None

        self.initDB()
    
    def initDB( self):
        pages = self.loadFile()
        chunks = self.splitPages(pages)
        self.db_case = Chroma.from_documents(chunks, self.embeddings ,collection_name='temp')
        # how to print db_case?


    def search(self, query_text: str, topK:int=5):
        relevant_chunks = self.db_case.similarity_search(query_text, topK)
        context = "\n\n".join([f"Document {i+1}:\n"+"\n"+chunk.page_content for i, chunk in enumerate(relevant_chunks)])
        return context

    def loadFile(self):
        file_path = FILE_DIR_PATH + 'templete.pdf'
        loader = PyPDFLoader(file_path)
        pages = []
        for page in loader.alazy_load():
            pages.append(page)
        return pages # 返回应当是一段设计模块的文本
    
    # 交给模型按数量限制和文本相似度切片，或 预先按目录/模块等逻辑拆分，再做输入切片/直接作为标签提取特征
    def splitPages(self, pages):
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=50,
            length_function=len,
        )
        chunks = text_splitter.split_documents(pages)

        return chunks

rag = LanchainRAG('BAAI/bge-m3')
context = rag.search('设计目标怎么做')
print(context)