# Author :   atg
# coding=utf-8
# Time   ：2025/6/6 19:59

import os
import numpy as np
import json
import dashscope
from openai import OpenAI
import fitz
from ai_core import API_KEY
from sklearn.metrics.pairwise import cosine_similarity

# os.environ['DASHSCOPE_API_KEY'] = "you API_KEY"

os.environ['DASHSCOPE_API_KEY'] = API_KEY


# 从PDF文件中提取文本 --- 提取数据源

# def extract_text_from_pdf(pdf_path):
#     # 打开PDF文件
#     document = fitz.open(pdf_path)
#     all_text = ""  # 初始化一个空字符串存储提取出的文本
#
#     # 遍历PDF中的每一页
#     for page_num in range(document.page_count):
#         page = document.get_page(page_num)  # 获取页面
#         text = page.get_text("text")  # 从页面提取文本
#         all_text += text  # 将提取出的文本追加到all_text字符串
#
#     return all_text  # 返回提取出的文本


#  从PDF文件中提取所有文本内容

def extract_text_from_pdf(pdf_path):
    """


    参数:
        pdf_path (str): PDF文件路径

    返回:
        str: 提取的所有文本内容

    异常:
        FileNotFoundError: 当文件不存在时
        RuntimeError: 当文件无法读取或不是有效PDF时
    """
    all_text = []

    try:
        with fitz.open(pdf_path) as document:
            for page_num in range(len(document)):
                page = document.load_page(page_num)
                text = page.get_text("text")
                if text:  # 只有当页面有文本时才添加
                    all_text.append(text)

    except FileNotFoundError:
        raise FileNotFoundError(f"PDF文件未找到: {pdf_path}")
    except Exception as e:
        raise RuntimeError(f"读取PDF文件失败: {str(e)}")

    return "\n".join(all_text)


# 文本分块 分成小块 目的提高模型的准确性

def chunk_text(text_input, chunk_size=1000, overlap_size=100):
    text_chunks = []

    # 步长 = chunk_size - overlap_size
    for i in range(0, len(text_input), chunk_size - overlap_size):
        # 追加从i到i+chunk_size的文本块到text_chunks列表
        text_chunks.append(text_input[i:i + chunk_size])
    return text_chunks


# 创建文本块的嵌入向量
def create_embeddings(texts, model="text-embedding-v3"):
    #  将文本转换向量
    if isinstance(texts, str):
        texts = [texts]
    completed = client.embeddings.create(
        model=model,
        input=texts,
        encoding_format="float")

    # 将响应转换dict 并提取向量
    data = json.loads(completed.model_dump_json())

    embeddings = [vector["embedding"] for vector in data["data"]]
    return embeddings


# 计算余弦相似度来找到与用户查询最相关的文本块

def semantic_search(query, chunks, embeddings=None, k=2):
    """
    在chucks中找到与query最相关的k个文本块
    :param query: 查询字符串
    :param chunks: 文本块列表
    :param embeddings: 预计算的嵌入向量（可选）
    :param k: 返回的最相关文本块数量
    :return: 最相关的k个文本块
    """
    if embeddings is None:
        embeddings = create_embeddings(chunks)
    else:
        assert len(chunks) == len(embeddings), "chunks和embeddings的长度必须相同"

    query_embedding = create_embeddings(query)[0]  # 创建查询的嵌入向量

    # 计算余弦相似度
    similarity_scores = []

    for i, chunk_embedding in enumerate(embeddings):
        score = cosine_similarity([query_embedding], [chunk_embedding])[0][0]
        similarity_scores.append((i, score))

    # 按相似度降序排序
    similarity_scores.sort(key=lambda x: x[1], reverse=True)

    # 获取前k个文本块的索引，然后返回对应的文本块
    top_k_indices = [i for i, _ in similarity_scores[:k]]
    return [chunks[i] for i in top_k_indices]


# 初始话模型
client = OpenAI(api_key=API_KEY, base_url="https://dashscope.aliyuncs.com/compatible-mode/v1")

pdf_path = "基于DeepSeek构建本地RAG知识库的研究.pdf"

# 提取文本
extract_text = extract_text_from_pdf(pdf_path)

# 分块
chunks = chunk_text(extract_text)

# print("文本分块到的数量:", len(chunks))
# print(chunks[0])

query = "deepseek是什么?"
top_chunks = semantic_search(query, chunks)
for i, chunk in enumerate(top_chunks):
    print(f"{i + 1}. {chunk}")
