'''
RAG Demo
    文档集：D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\datafiles
    ebd: bge-base-zh-v1.5
    llm: qwen0.5
'''

import os.path
import sys

from RAG.VectorBase import VectorStore
from RAG.utils import ReadFiles
from RAG.LLM import OpenAIChat, InternLMChat
from RAG.Embeddings import JinaEmbedding, ZhipuEmbedding,UserEmbedding

def get_database():
    # 制作文档数据库

    embedding = UserEmbedding() # 转换特征向量 模型
    vector = VectorStore(bd_model=embedding)
    if os.path.exists('./data/storage') and False:
        vector.load_vector('./data/storage')
    else:
        # [chunk_str]
        # docs = ReadFiles('./data/database').get_content(max_token_len=600, cover_content=150) # [chunk_str]# 获得data目录下的所有文件内容并分割
        # docs = ReadFiles('./data/datafiles').get_content(max_token_len=300, cover_content=60)
        docs = ReadFiles('./data/datafiles').get_content(max_token_len=100, cover_content=20)
        print(docs)
        # embedding = ZhipuEmbedding() # 创建EmbeddingModel
        vector.document = docs # 1202*~
        vector.get_vector(embedding) # chunk 转 特征向量
        vector.persist(path='data/storage') # 将向量和文档内容保存到storage目录下，下次再用就可以直接加载本地的数据库
    return vector

# 1 制作文档数据库
vector = get_database()
# 2 检索
question = '静态交通检测算法逻辑是什么'
content = vector.query(question, k=1)[0]

# 3 增强
question_aug = question + ' 基于以下信息： ' + content
print(question_aug)
# 4 生成
sys.path.append(r'D:\code\other\LLMs\algorithms')
from infer import get_llm_infer
llm_infer = get_llm_infer()
res = llm_infer(question_aug, is_streamer=False)
print(res)


