from pdf2image import convert_from_path
from pathlib import Path
from call_model import get_image_infos, call_embedding_model
from database import save_faiss
import numpy as np

# 读取pdf中的信息存储到向量数据库中
def insert_pdf_infos(filepath, prompt, faiss, faiss_path, conn, cursor):
    # 取filepath的路径，包括文件夹名和文件名
    Filepath = Path(filepath)
    directory, filename = Filepath.parent, Filepath.stem
    new_directory = directory/filename

    new_directory.mkdir(parents=True, exist_ok=True)

    # 读取pdf文件，保存每一页，然后使用大模型提取每页的信息
    images = convert_from_path(filepath)

    for index, image in enumerate(images):
        image_path = new_directory/f"{index}.png"

        # 如果image_path已存在，则代表这张图已经存过了，跳过这条进行处理
        if not image_path.is_file():
            image.save(image_path)
            image_path = str(image_path)

            # 使用千问提取总结这页的内容
            print(f"---------------------{image_path}--------------------------")
            content, tokens, cost_time = get_image_infos(image_path, prompt)
            print("content:", content)
            print("tokens:", tokens)
            print("time:", cost_time)

            if content:
                # 将image_path存储到sqlite中
                cursor.execute('''
                    INSERT INTO origin (path, content, tokens, time)
                    VALUES (?, ?, ?, ?)
                ''', (image_path, content, tokens, cost_time))

                conn.commit()
                last_id = cursor.lastrowid

                # 将content向量化
                embedding = call_embedding_model(content)

                if embedding is not None:
                    # 插入到faiss中
                    faiss.add_with_ids(embedding, np.array([last_id]))
                    # 保存faiss
                    save_faiss(faiss, faiss_path)


        else:
            print("文件已存在")