import fitz
import os
import numpy as np
import json
from openai import OpenAI

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)

def extract_text_from_pdf(pdf_path):
    mypdf = fitz.open(pdf_path)
    all_text = ""
    for page in mypdf:
        all_text += page.get_text("text") + " "
    #     strip删除多余空格
    return all_text.strip()

def get_embedding(text, model="BAAI/bge-large-en-v1.5"):
    response = client.embeddings.create(model=model, input=text)
    return np.array(response.data[0].embedding)

def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
def compute_breakpoints(similarities, method="percentile", threshold=90):
    if method == "percentile":
        threshold_value = np.percentile(similarities, threshold)
    elif method == "standard_deviation":
        mean = np.mean(similarities)
        std_dev = np.std(similarities)
        threshold_value = mean - (threshold * std_dev)
    elif method == "interquartile":
        q1, q3 = np.percentile(similarities, [25, 75])
        threshold_value = q1 - 1.5 * (q3 - q1)
    else:
        raise ValueError("Invalid method. Choose 'percentile', 'standard_deviation', or 'interquartile'.")
    # 返回前90%的数（自动升序
    return [i for i, sim in enumerate(similarities) if sim < threshold_value]
def split_into_chunks(sentences, breakpoints):
    chunks = []
    start = 0
    for bp in breakpoints:
        # 为每个断点生成一个chunk,并将其添加到列表中
        chunks.append(". ".join(sentences[start:bp + 1]) + ".")
        start = bp + 1
    chunks.append(". ".join(sentences[start:]))
    return chunks
def create_embeddings(text_chunks):
    return [get_embedding(chunk) for chunk in text_chunks]
def semantic_search(query, text_chunks, chunk_embeddings, k=5):
    query_embedding = get_embedding(query)
    similarities = [cosine_similarity(query_embedding, emb) for emb in chunk_embeddings]
    # 使用argsort对相似度进行排序，返回从高到低的前k个，::-1逆序排序，最高相似度排在前面
    top_indices = np.argsort(similarities)[-k:][::-1]
    # 根据高相似度的索引存储对应文本
    return [text_chunks[i] for i in top_indices]
def generate_response(system_prompt, user_message, model="Qwen/Qwen2.5-72B-Instruct"):
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    return response

pdf_path = "data/AI_Information.pdf"
extracted_text = extract_text_from_pdf(pdf_path)
# 根据句号来拆分文本
sentences = extracted_text.split(". ")
print("Number of text chunks:", len(sentences))
# 文本转向量
embeddings = [get_embedding(sentence) for sentence in sentences]
# 计算相似度
similarities = [cosine_similarity(embeddings[i], embeddings[i + 1]) for i in range(len(embeddings) - 1)]
# 排除掉相似度高于90的
breakpoints = compute_breakpoints(similarities, method="percentile", threshold=90)
# 根据相似度前90的断点，进行切割
text_chunks = split_into_chunks(sentences, breakpoints)
print(f"Number of semantic chunks: {len(text_chunks)}")
chunk_embeddings = create_embeddings(text_chunks)
with open('data/val.json') as f:
    data = json.load(f)
query = data[0]['question']
# 问题，资料，资料的向量，k
top_chunks = semantic_search(query, text_chunks, chunk_embeddings, k=2)
print(f"Query: {query}")
for i, chunk in enumerate(top_chunks):
    print(f"Context {i+1}:\n{chunk}\n{'='*40}")
system_prompt = ("You are an AI assistant that strictly answers based on the given context. "
                 "If the answer cannot be derived directly from the provided context, respond with: "
                 "'I do not have enough information to answer that.'")
user_prompt = "\n".join([f"Context {i + 1}:\n{chunk}\n=====================================\n" for i, chunk in enumerate(top_chunks)])
user_prompt = f"{user_prompt}\nQuestion: {query}"
ai_response = generate_response(system_prompt, user_prompt)
evaluate_system_prompt = ("You are an intelligent evaluation system tasked with assessing the AI assistant's responses. "
                          "If the AI assistant's response is very close to the true response, assign a score of 1. "
                          "If the response is incorrect or unsatisfactory in relation to the true response, assign a score of 0. "
                          "If the response is partially aligned with the true response, assign a score of 0.00~1.00")
evaluation_prompt = f"User Query: {query}\nAI Response:\n{ai_response.choices[0].message.content}\nTrue Response: {data[0]['ideal_answer']}\n{evaluate_system_prompt}"
evaluation_response = generate_response(evaluate_system_prompt, evaluation_prompt)
print(evaluation_response.choices[0].message.content)