import fitz
import os
import numpy as np
import json
from openai import OpenAI
import re
from tqdm import tqdm

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)
def extract_text_from_pdf(pdf_path):
    mypdf = fitz.open(pdf_path)
    all_text = ""
    for page_num in range(mypdf.page_count):
        page = mypdf[page_num]  # Get the page
        text = page.get_text("text")  # Extract text from the page
        all_text += text  # Append the extracted text to the all_text string
    return all_text  # Return the extracted text
def chunk_text(text, n, overlap):
    chunks = []
    for i in range(0, len(text), n - overlap):
        chunks.append(text[i:i + n])
    return chunks
# 大模型 针对文本 生成问题
def generate_questions(text_chunk, num_questions, model="Qwen/Qwen2-1.5B-Instruct"):
    # 系统提示
    system_prompt = ("You are an expert at generating relevant questions from text. "
                     "Create concise questions that can be answered using only the provided text. "
                     "Focus on key information and concepts.")
    # 生成x个问题，针对text资料
    user_prompt = f"""
    Based on the following text, generate {num_questions} different questions that can be answered using only this text:
    {text_chunk}
    Format your response as a numbered list of questions only, with no additional text.
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0.7,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )

    questions_text = response.choices[0].message.content.strip()
    questions = []
    for line in questions_text.split('\n'):
        cleaned_line = re.sub(r'^\d+\.\s*', '', line.strip())
        if cleaned_line and cleaned_line.endswith('?'):
            questions.append(cleaned_line)
    return questions
def create_embeddings(text, model="BAAI/bge-m3"):
    response = client.embeddings.create(
        model=model,
        input=text
    )
    return response  # Return the response containing the embeddings

class SimpleVectorStore:
    # 存储嵌入向量，存储对应的原始文本数据，存储每个文本的附加信息
    def __init__(self):
        self.vectors = []
        self.texts = []
        self.metadata = []
    # text添加的文本块，文本的嵌入向量，可选的元数据字典
    def add_item(self, text, embedding, metadata=None):
        self.vectors.append(np.array(embedding))
        self.texts.append(text)
        self.metadata.append(metadata or {})
    # 把问题的向量与向量类中所有向量进行比对
    def similarity_search(self, query_embedding, k=3):
        if not self.vectors:
            return []
        query_vector = np.array(query_embedding)
        similarities = []
        for i, vector in enumerate(self.vectors):
            similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))
            similarities.append((i, similarity))
        # 升序
        similarities.sort(key=lambda x: x[1], reverse=True)
        results = []
        # 循环3个或更少个
        for i in range(min(k, len(similarities))):
            # 从高到底开始取走 索引 和 相似分
            idx, score = similarities[i]
            # 根据索引找到当初的文本，类型，相似分
            results.append({
                "text": self.texts[idx],
                "metadata": self.metadata[idx],
                "similarity": score
            })
        return results
# (PDF文件的路径，文本块大小，重叠区域，每个文本块生成问题数量）
def process_document(pdf_path, chunk_size=1000, chunk_overlap=200, questions_per_chunk=3):
    extracted_text = extract_text_from_pdf(pdf_path)
    # 拆分资料
    text_chunks = chunk_text(extracted_text, chunk_size, chunk_overlap)
    print(f"Created {len(text_chunks)} text chunks")
    # 创建一个对象用于存储文本块及其嵌入向量
    vector_store = SimpleVectorStore()
    for i, chunk in enumerate(tqdm(text_chunks, desc="Processing Chunks")):
        chunk_embedding_response = create_embeddings(chunk)
        chunk_embedding = chunk_embedding_response.data[0].embedding
        # 将向量存入资料类
        vector_store.add_item(
            text=chunk,
            embedding=chunk_embedding,
            metadata={"type": "chunk", "index": i}
        )
        # （文本，每个文本块生成问题的数量）
        questions = generate_questions(chunk, num_questions=questions_per_chunk)
        # （生成的问题
        for j, question in enumerate(questions):
            question_embedding_response = create_embeddings(question)
            question_embedding = question_embedding_response.data[0].embedding
            # 每个生成问题存入向量
            vector_store.add_item(
                text=question,
                embedding=question_embedding,
                metadata={"type": "question", "chunk_index": i, "original_chunk": chunk}
            )
    return text_chunks, vector_store

def semantic_search(query, vector_store, k=3):
    query_embedding_response = create_embeddings(query)
    query_embedding = query_embedding_response.data[0].embedding
    results = vector_store.similarity_search(query_embedding, k=k)
    return results

# （文本，类型，相似分）
def prepare_context(search_results):
    chunk_indices = set()
    context_chunks = []
    for result in search_results:
        if result["metadata"]["type"] == "chunk":
            # 文本索引
            chunk_indices.add(result["metadata"]["index"])
            # 文本索引+文本内容
            context_chunks.append(f"Chunk {result['metadata']['index']}:\n{result['text']}")
    for result in search_results:
        if result["metadata"]["type"] == "question":
            # 资料索引
            chunk_idx = result["metadata"]["chunk_index"]
            # 如果这个不在刚才的文本索引里
            if chunk_idx not in chunk_indices:
                # 把问题的索引加入文本中
                chunk_indices.add(chunk_idx)
                # 把问题对应的内容加入文本（资料块来自xx问题，原内容是xxx）
                context_chunks.append(
                    f"Chunk {chunk_idx} (referenced by question '{result['text']}'):\n{result['metadata']['original_chunk']}")
    full_context = "\n\n".join(context_chunks)
    print("\n这是完整资料\n"+full_context)
    return full_context

def generate_response(query, context, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = ("You are an AI assistant that strictly answers based on the given context. "
                     "If the answer cannot be derived directly from the provided context, respond with: "
                     "'I do not have enough information to answer that.'")
    user_prompt = f"""
        Context:
        {context}
        Question: {query}
        Please answer the question based only on the context provided above. Be concise and accurate.
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    return response.choices[0].message.content

def evaluate_response(query, response, reference_answer, model="Qwen/Qwen2-1.5B-Instruct"):
    evaluate_system_prompt = """You are an intelligent evaluation system tasked with assessing AI responses.
        Compare the AI assistant's response to the true/reference answer, and evaluate based on:
        1. Factual correctness - Does the response contain accurate information?
        2. Completeness - Does it cover all important aspects from the reference?
        3. Relevance - Does it directly address the question?
        Assign a score from 0 to 1:
        - 1.0: Perfect match in content and meaning
        - 0.8: Very good, with minor omissions/differences
        - 0.6: Good, covers main points but misses some details
        - 0.4: Partial answer with significant omissions
        - 0.2: Minimal relevant information
        - 0.0: Incorrect or irrelevant
        Provide your score with justification.
    """
    evaluation_prompt = f"""
        User Query: {query}
        AI Response:
        {response}
        Reference Answer:
        {reference_answer}
        Please evaluate the AI response against the reference answer.
    """
    eval_response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": evaluate_system_prompt},
            {"role": "user", "content": evaluation_prompt}
        ]
    )
    return eval_response.choices[0].message.content

def create_embeddings(text, model="BAAI/bge-m3"):
    response = client.embeddings.create(
        model=model,
        input=text
    )
    return response

def cosine_similarity(vec1, vec2):
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

def generate_response(system_prompt, user_message, model="Qwen/Qwen2-1.5B-Instruct"):
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    return response






# 起始点
pdf_path = "data/AI_Information.pdf"
text_chunks, vector_store = process_document(
    pdf_path,
    chunk_size=1000,
    chunk_overlap=200,
    questions_per_chunk=3
)
print(f"Vector store contains {len(vector_store.texts)} items")
with open('data/val.json') as f:
    data = json.load(f)
query = data[0]['question']
# （文本，类型，相似分）=（问题，向量类）
search_results = semantic_search(query, vector_store, k=3)
print("Query:", query)
print("\nSearch Results:\n")
chunk_results = []
question_results = []
# result（文本，类型，相似分）
for result in search_results:
    # 类型chunk是文本，
    if result["metadata"]["type"] == "chunk":
        chunk_results.append(result)
    else:
        # 类型question是问题
        question_results.append(result)
print("\nRelevant Document Chunks:")
# 输出搜索到的文本
for i, result in enumerate(chunk_results):
    print(f"Context {i + 1} (similarity: {result['similarity']:.4f}):")
    print(result["text"][:300] + "...")
    print("=====================================")
# 输出搜索到的问题
print("\nMatched Questions:")
for i, result in enumerate(question_results):
    print(f"Question {i + 1} (similarity: {result['similarity']:.4f}):")
    print(result["text"])
    chunk_idx = result["metadata"]["chunk_index"]
    print(f"From chunk {chunk_idx}")
    print("=====================================")
# （文本，类型，相似）
context = prepare_context(search_results)
response_text = generate_response(query, context)
print("\nQuery:", query)
print("\nResponse:")
print(response_text)
reference_answer = data[0]['ideal_answer']
evaluation = evaluate_response(query, response_text, reference_answer)
print("\nEvaluation:")
print(evaluation)