import fitz
import os
import numpy as np
import json
import time
from openai import OpenAI

"""
pip install PyMuPDF
F:\workspace\py_project\rag-lai\all-rag-techniques-main
"""
# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)

# 提取 PDF 文本
def extract_text_from_pdf(pdf_path):
    # 使用fitz.open打开PDF文件
    mypdf = fitz.open(pdf_path)
    # 循环提取文本，合并到同一个字符串
    all_text = ""
    for page_num in range(mypdf.page_count):
        page = mypdf[page_num]
        text = page.get_text("text")
        all_text += text
    return all_text

# 将长文本分为n块，每块有overlap个字符的重叠部分
def chunk_text(text, n, overlap):
    chunks = []
    for i in range(0, len(text), n - overlap):
        chunks.append(text[i:i + n])
    return chunks

# 为每个文本块生成嵌入向量，通过timpe.sleep控制速率，避免API调用频率过高 nomic-embed-text
def create_embeddings(texts, model="BAAI/bge-m3"):
    embeddings = []
    for text in texts:
        time.sleep(0.03)  # 限制请求速率，防止超限
        response = client.embeddings.create(model=model, input=text)
        embeddings.append(response.data[0].embedding)
    print("embedding:\n")
    print(embeddings)
    return embeddings

# 使用余弦相似度公式计算两个向量的相似度。
# 相似度的范围在 [-1, 1]。
def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

# 语义搜索
# 为查询生成嵌入向量，将查询嵌入与每个文本块的嵌入进行相似计算
# 排序并选择相似度最高的k个块
def semantic_search(query, text_chunks, embeddings, k=5):
    query_embedding = create_embeddings([query])[0]
    similarity_scores = []
    # 索引 待比 资料向量
    for i, chunk_embedding in enumerate(embeddings):
        # 问题向量 资料向量
        similarity_score = cosine_similarity(np.array(query_embedding), np.array(chunk_embedding))
        similarity_scores.append((i, similarity_score))
    # 根据相似分进行排序
    similarity_scores.sort(key=lambda x: x[1], reverse=True)
    # 记录考前的文本块合集
    top_indices = [index for index, _ in similarity_scores[:k]]
    return [text_chunks[index] for index in top_indices]

# 生成 AI 响应
def generate_response(system_prompt, user_message, model="Qwen/Qwen2.5-1.5B-Instruct"):
    response = client.chat.completions.create(
        model=model,
        # 生成的随机性，越接近0越稳定
        temperature=0,
        # system系统提示词 user用户输入
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    # 返回响应结果
    return response

# 读取 PDF 并处理文本
pdf_path = "data/AI_Information.pdf"
# 从PDF文件中提取文本，并创建更小、重叠的块
extracted_text = extract_text_from_pdf(pdf_path)
text_chunks = chunk_text(extracted_text, 1000, 200)
print(text_chunks)
# 资料转向量
embeddings = create_embeddings(text_chunks)
# 加载问题和答案
with open('data/val.json') as f:
    data = json.load(f)
# 执行语义分析
query = data[0]['question']
top_chunks = semantic_search(query, text_chunks, embeddings, k=2)
for i, chunk in enumerate(top_chunks):
    print(f"Context {i + 1}:\n{chunk}\n=====================================")
# 设置系统提示词
system_prompt = ("You are an AI assistant that strictly answers based on the given context. "
                 "If the answer cannot be derived directly from the provided context, respond with: "
                 "'I do not have enough information to answer that.'")
# 创建用户输入
user_prompt = "\n".join([f"Context {i + 1}:\n{chunk}\n=====================================\n" for i, chunk in enumerate(top_chunks)])
user_prompt = f"{user_prompt}\n Question: {query}"
# 生成 AI 回答
ai_response = generate_response(system_prompt, user_prompt)
# 评估 AI 回答
evaluate_system_prompt = ("直接算文本相似程度有多少？0.00%到100.00%")
evaluation_prompt = f"AI Response:\n{ai_response.choices[0].message.content}\nTrue Response: {data[0]['ideal_answer']}\n{evaluate_system_prompt}\n"
evaluation_response = generate_response(evaluate_system_prompt, evaluation_prompt)
# 打印评估结果
print(evaluation_response.choices[0].message.content)
