import fitz
import os
import numpy as np
import json
from openai import OpenAI
from tqdm import tqdm

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)
# 首尾拼接
def extract_text_from_pdf(pdf_path):
    mypdf = fitz.open(pdf_path)
    all_text = ""
    for page in mypdf:
        all_text += page.get_text("text") + " "
    return all_text.strip()
# 拆分文本
def chunk_text(text, n, overlap):
    chunks = []
    # 从0开始，步长n-overlap，直到文本结束
    for i in range(0, len(text), n - overlap):
        #
        chunks.append(text[i:i + n])
    return chunks  # Return the list of text chunks

def create_embeddings(texts, model="BAAI/bge-large-en-v1.5"):
    response = client.embeddings.create(model=model, input=texts)
    # 把每个emdding都转numpy数组并存储
    return [np.array(embedding.embedding) for embedding in response.data]

def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
# k检索块个数
def retrieve_relevant_chunks(query, text_chunks, chunk_embeddings, k=5):
    query_embedding = create_embeddings([query])[0]
    # 计算问题向量和资料向量的相似度
    similarities = [cosine_similarity(query_embedding, emb) for emb in chunk_embeddings]
    # argsort对相似度份数从小到大排序，选择最后k个索引
    top_indices = np.argsort(similarities)[-k:][::-1]
    return [text_chunks[i] for i in top_indices]

def generate_response(query, system_prompt, retrieved_chunks, model="Qwen/Qwen2-1.5B-Instruct"):
    # 拼接成带编号的上下文
    context = "\n".join([f"Context {i + 1}:\n{chunk}" for i, chunk in enumerate(retrieved_chunks)])
    user_prompt = f"{context}\n\nQuestion: {query}"
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    return response.choices[0].message.content
# 忠实性和相关性进行考核（问题，回答，真实答案）
def evaluate_response(question, response, true_answer):
    # 使用预先定义的模板（问题，回答，真实答案，满分，中等，差）
    faithfulness_prompt = FAITHFULNESS_PROMPT_TEMPLATE.format(
        question=question,
        response=response,
        true_answer=true_answer,
        full=SCORE_FULL,
        partial=SCORE_PARTIAL,
        none=SCORE_NONE
    )
    # 使用预先定义的模板（问题，回答，真实答案，满分，中等，差）
    relevancy_prompt = RELEVANCY_PROMPT_TEMPLATE.format(
        question=question,
        response=response,
        full=SCORE_FULL,
        partial=SCORE_PARTIAL,
        none=SCORE_NONE
    )

    faithfulness_response = client.chat.completions.create(
        model="Qwen/Qwen2-1.5B-Instruct",
        temperature=0,
        messages=[
            {"role": "system", "content": "You are an objective evaluator. Return ONLY the numerical score."},
            {"role": "user", "content": faithfulness_prompt}
        ]
    )

    relevancy_response = client.chat.completions.create(
        model="Qwen/Qwen2-1.5B-Instruct",
        temperature=0,
        messages=[
            {"role": "system", "content": "You are an objective evaluator. Return ONLY the numerical score."},
            {"role": "user", "content": relevancy_prompt}
        ]
    )
    # Extract scores and handle potential parsing errors
    try:
        faithfulness_score = float(faithfulness_response.choices[0].message.content.strip())
    except ValueError:
        print("Warning: Could not parse faithfulness score, defaulting to 0")
        faithfulness_score = 0.0
    try:
        relevancy_score = float(relevancy_response.choices[0].message.content.strip())
    except ValueError:
        print("Warning: Could not parse relevancy score, defaulting to 0")
        relevancy_score = 0.0
    return faithfulness_score, relevancy_score


pdf_path = "data/AI_Information.pdf"
extracted_text = extract_text_from_pdf(pdf_path)
# 文本分割时，每个块的字符数
chunk_sizes = [128, 256, 512]
# 设置资料分块字典，生成文本块（内容，块数，重叠字数），按3种大小拆分
text_chunks_dict = {size: chunk_text(extracted_text, size, size // 5) for size in chunk_sizes}
# 样式 / 个数
for size, chunks in text_chunks_dict.items():
    print(f"Chunk Size: {size}, Number of Chunks: {len(chunks)}")
# 设置资料向量字典，存储不同大小块转化的向量，tqdm显示转换进度
chunk_embeddings_dict = {size: create_embeddings(chunks) for size, chunks in tqdm(text_chunks_dict.items(), desc="Generating Embeddings")}
with open('data/val.json') as f:
    data = json.load(f)
# 提取问题
query = data[0]['question']
# 设置提示资料字典，相关度块（问题，资料，资料向量），返回的是相关度最高的资料文本
retrieved_chunks_dict = {size: retrieve_relevant_chunks(query, text_chunks_dict[size], chunk_embeddings_dict[size]) for size in chunk_sizes}
# 系统提示词
system_prompt = ("You are an AI assistant that strictly answers based on the given context. "
                 "If the answer cannot be derived directly from the provided context, "
                 "respond with: 'I do not have enough information to answer that.'")
# 获得AI回答字典（获取回答（问题，系统提示，资料提示）
ai_responses_dict = {size: generate_response(query, system_prompt, retrieved_chunks_dict[size]) for size in chunk_sizes}
print(ai_responses_dict[256])
SCORE_FULL = 1.0
SCORE_PARTIAL = 0.5
SCORE_NONE = 0.0
# Define strict evaluation prompt templates
FAITHFULNESS_PROMPT_TEMPLATE = """
Evaluate the faithfulness of the AI response compared to the true answer.
User Query: {question}
AI Response: {response}
True Answer: {true_answer}

Faithfulness measures how well the AI response aligns with facts in the true answer, without hallucinations.

INSTRUCTIONS:
- Score STRICTLY using only these values:
    * {full} = Completely faithful, no contradictions with true answer
    * {partial} = Partially faithful, minor contradictions
    * {none} = Not faithful, major contradictions or hallucinations
- Return ONLY the numerical score ({full}, {partial}, or {none}) with no explanation or additional text.
"""

RELEVANCY_PROMPT_TEMPLATE = """
Evaluate the relevancy of the AI response to the user query.
User Query: {question}
AI Response: {response}

Relevancy measures how well the response addresses the user's question.

INSTRUCTIONS:
- Score STRICTLY using only these values:
    * {full} = Completely relevant, directly addresses the query
    * {partial} = Partially relevant, addresses some aspects
    * {none} = Not relevant, fails to address the query
- Return ONLY the numerical score ({full}, {partial}, or {none}) with no explanation or additional text.
"""

true_answer = data[0]['ideal_answer']
faithfulness, relevancy = evaluate_response(query, ai_responses_dict[256], true_answer)
faithfulness2, relevancy2 = evaluate_response(query, ai_responses_dict[128], true_answer)
print(f"Faithfulness Score (Chunk Size 256): {faithfulness}")
print(f"Relevancy Score (Chunk Size 256): {relevancy}")
print(f"\n")
print(f"Faithfulness Score (Chunk Size 128): {faithfulness2}")
print(f"Relevancy Score (Chunk Size 128): {relevancy2}")