import os
from pptx import Presentation
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex

# 获取文件夹中的所有PPT文件
def get_ppt_files_in_directory(directory_path):
    ppt_files = []
    for file_name in os.listdir(directory_path):
        if file_name.endswith(".pptx"):  # 只处理pptx文件
            ppt_files.append(os.path.join(directory_path, file_name))
    return ppt_files

# 读取PPT文件中的文本内容
def extract_text_from_ppt(ppt_path):
    prs = Presentation(ppt_path)
    text = ""
    for slide in prs.slides:
        for shape in slide.shapes:
            if hasattr(shape, "text"):
                text += shape.text + "\n"
    return text

# 文件夹路径
ppt_directory = "/mnt/workspace/llama-index/data-list"

# 获取所有PPT文件路径
ppt_files = get_ppt_files_in_directory(ppt_directory)

# 从PPT文件中提取文本
documents = []
for ppt_file in ppt_files:
    text = extract_text_from_ppt(ppt_file)
    documents.append(text)  # 将PPT文件的文本内容加入documents

# embedding 模型
embed_model = HuggingFaceEmbedding(
    model_name="/mnt/workspace/llm/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
    local_files_only=True  # 禁止联网加载模型
)
Settings.embed_model = embed_model


# 本地大模型
llm = HuggingFaceLLM(
    model_name="/mnt/workspace/llm/Qwen/Qwen-1_8B-Chat",
    tokenizer_name="/mnt/workspace/llm/Qwen/Qwen-1_8B-Chat",
    model_kwargs={"trust_remote_code": True},  # 禁止远程代码执行
    tokenizer_kwargs={"trust_remote_code": True}  # 禁止远程代码执行
    # 设置允许远程代码执行
)
Settings.llm = llm

# 将文档变成索引，通过embed_model获取索引的内容（主要是词向量）
index = VectorStoreIndex.from_documents(documents)

# 将索引的内容（词向量）持久化存储到本地的向量数据库
index.storage_context.persist()

# 查询引擎
query_engine = index.as_query_engine()

# 执行查询
rsp = query_engine.query("利率3.3%起的有哪些银行")
print(f"得到的结果：{rsp}")
