from pdf2image import convert_from_path
from paddleocr import PaddleOCR
import numpy as np
import cv2
import paddle
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings,OllamaLLM
from langchain.docstore.document import Document
# 在bash中还是需要安装faiss的：pip install faiss-gpu
from langchain_community.vectorstores import FAISS
from tqdm import tqdm
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA

# 使用ollama中的model作为嵌入式模型，提供给vector_store进行使用
embeddings = OllamaEmbeddings(model="deepseek-r1:7b", base_url="http://localhost:11434")

# 选择 Sentence-Transformers 预训练模型(性能占优)，预计2-3mins运行解析
# embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")

# ollama版本：体积较小，但是ollama的执行速度不知为什么较慢；但是比较deepseek的速度快一些，deepseek需要6hours，这个预计需要0.5hours
# embeddings = OllamaEmbeddings(model="all-minilm:33m", base_url="http://localhost:11434")

# llm模型：测试选择ollama模型进行测试
# ollama_llm = OllamaLLM(model="qwen2.5:7b", base_url="http://localhost:11434")

print(paddle.device.is_compiled_with_cuda())  # 是否支持 CUDA
# 强制选择gpu进行执行
paddle.device.set_device("gpu")  # 自动选择 GPU
print(paddle.device.get_device())  # 当前设备 (CPU / GPU)

ocr = PaddleOCR(use_angle_cls=True, lang="ch", use_gpu=True)  # 启用中文 OCR

def pdf_to_text_paddle(pdf_path):
    images = convert_from_path(pdf_path)
    text = ""
    i=0
    for img in images:
        # 判断img文件类型
        # print(f"Type of img: {type(img)}")
        # if isinstance(img, np.ndarray):
        #     print(f"Image shape: {img.shape}")
        # elif isinstance(img, list):
        #     print(f"Number of images: {len(img)}")
        # elif img is None:
        #     print("Image is None!")

        # 如果判断出img类型为非numpy.ndarray，则需要进行转换
        if isinstance(img, np.ndarray)==False:
            img = np.array(img)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)  # OpenCV 格式转换
        result = ocr.ocr(img, cls=True)
        i+=1
        for line in result:
            if line: 
                for word in line:
                    if word: 
                        text += word[1][0] + " "  # 提取识别出的文本
        text += "\n"
        # 测试：限制前5页输出
        # if i==5:
        #     break
    return text

# 通过paddle读取扫描型pdf内的存放内容
pdf_text = pdf_to_text_paddle("./test_pdf/test-imgs.pdf")
# print(pdf_text)

# document 化 （进度条版本）
documents = [Document(page_content=chunk) for chunk in tqdm(pdf_text,desc="Processing Docs", unit="doc")]

# splitter
splitter = RecursiveCharacterTextSplitter(
    separators=["\n\n", "\n", " "],  # 依次尝试按 段落、换行、空格 拆分
    chunk_size=500,   
    chunk_overlap=50,
    add_start_index=True  
)
# all_splits = splitter.split_documents(documents)

# 进度版splitter
all_splits = []
for doc in tqdm(documents, desc="Splitting documents", unit="document"):
    splits = splitter.split_documents([doc])
    all_splits.extend(splits)

# create and save processing
vector_store = FAISS.from_documents(all_splits, embeddings)
vector_store.save_local("./test_vsdata/","index.bin")

'''
此处上方为生成vector-stores文件操作
'''

# # 假设已经存在了index文件情况下（单index文件情况下）
# vector_store = FAISS.load_local("./test_vsdata/", embeddings, 
#                                 index_name="index.bin",
#                                 allow_dangerous_deserialization=True)


# # 初始化检索器
# retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 3})  # 取 3 条最相似的文档


# # 定义 RAG 处理的工作流：结合检索和生成
# qa_chain = RetrievalQA.from_chain_type(llm=ollama_llm, chain_type="stuff", retriever=retriever)

# # 生成用户查询的响应
# def rag_query(query: str):
#     response = qa_chain.invoke(query)
#     return response

# # 测试：传入查询问题并生成答案
# query = "注册表注入技术的优缺点分析"
# result = rag_query(query)
# print(result)