import fitz
import os
import numpy as np
import json
from openai import OpenAI

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)

def extract_text_from_pdf(pdf_path):
    # Open the PDF file
    mypdf = fitz.open(pdf_path)
    all_text = ""
    # Iterate through each page in the PDF
    for page_num in range(mypdf.page_count):
        page = mypdf[page_num]
        text = page.get_text("text")
        all_text += text
    return all_text

def chunk_text(text, n, overlap):
    chunks = []
    for i in range(0, len(text), n - overlap):
        chunks.append(text[i:i + n])
    return chunks

def create_embeddings(text, model="BAAI/bge-m3"):
    response = client.embeddings.create(
        model=model,
        input=text
    )
    return response

def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
# 丰富检索（问题，资料，资料向量，返回的最相关文本块，检索结果的上下文块数量
def context_enriched_search(query, text_chunks, embeddings, k=1, context_size=1):
    # 问题转向量
    query_embedding = create_embeddings(query).data[0].embedding
    similarity_scores = []
    # 对比问题向量和资料向量，将索引与对应相似度组成元组
    for i, chunk_embedding in enumerate(embeddings):
        similarity_score = cosine_similarity(np.array(query_embedding), np.array(chunk_embedding.embedding))
        similarity_scores.append((i, similarity_score))
    # 按相似分数从大到小排序
    similarity_scores.sort(key=lambda x: x[1], reverse=True)
    # 获取最相关资料的索引
    top_index = similarity_scores[0][0]
    # 获取最相关资料的上文索引
    start = max(0, top_index - context_size)
    # 获取最相关资料的下文索引
    end = min(len(text_chunks), top_index + context_size + 1)
    return [text_chunks[i] for i in range(start, end)]

def generate_response(system_prompt, user_message, model="Qwen/Qwen2-1.5B-Instruct"):
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    return response

pdf_path = "data/AI_Information.pdf"
extracted_text = extract_text_from_pdf(pdf_path)
text_chunks = chunk_text(extracted_text, 1000, 200)
print("Number of text chunks:", len(text_chunks))
response = create_embeddings(text_chunks)
with open('data/val.json') as f:
    data = json.load(f)
query = data[0]['question']
# 丰富检索（问题，资料，资料向量
top_chunks = context_enriched_search(query, text_chunks, response.data, k=1, context_size=1)
print("Query:", query)
# 输出资料
for i, chunk in enumerate(top_chunks):
    print(f"Context {i + 1}:\n{chunk}\n=====================================")
system_prompt = ("You are an AI assistant that strictly answers based on the given context. "
                 "If the answer cannot be derived directly from the provided context, "
                 "respond with: 'I do not have enough information to answer that.'")
user_prompt = "\n".join([f"Context {i + 1}:\n{chunk}\n=====================================\n" for i, chunk in enumerate(top_chunks)])
user_prompt = f"{user_prompt}\nQuestion: {query}"
ai_response = generate_response(system_prompt, user_prompt)
evaluate_system_prompt = ("You are an intelligent evaluation system tasked with assessing the AI assistant's responses. "
                          "If the AI assistant's response is very close to the true response, assign a score of 1. "
                          "If the response is incorrect or unsatisfactory in relation to the true response, assign a score of 0. "
                          "If the response is partially aligned with the true response, assign a score of 0.00~1.00.")
evaluation_prompt = f"User Query: {query}\nAI Response:\n{ai_response.choices[0].message.content}\nTrue Response: {data[0]['ideal_answer']}\n{evaluate_system_prompt}"
evaluation_response = generate_response(evaluate_system_prompt, evaluation_prompt)
print(evaluation_response.choices[0].message.content)