import fitz
import os
import numpy as np
import json
from openai import OpenAI
import re
from tqdm import tqdm

# 设置 API 变量
os.environ["OPENAI_API_KEY"] = "sk-aswwzdvvimeybiiokqebixpkhbmcftlbgkubssfuodifqjcf"
# 初始化OpenAI客户端
client = OpenAI(
    base_url="https://api.siliconflow.cn/v1",
    api_key=os.getenv("OPENAI_API_KEY")  # 获取环境变量
)

class SimpleVectorStore:
    def __init__(self):
        self.vectors = []  # List to store embedding vectors
        self.texts = []  # List to store original texts
        self.metadata = []  # List to store metadata for each text
    def add_item(self, text, embedding, metadata=None):
        self.vectors.append(np.array(embedding))  # Convert embedding to numpy array and add to vectors list
        self.texts.append(text)  # Add the original text to texts list
        self.metadata.append(metadata or {})  # Add metadata to metadata list, use empty dict if None

    def similarity_search(self, query_embedding, k=3):
        if not self.vectors:
            return []
        query_vector = np.array(query_embedding)
        similarities = []
        for i, vector in enumerate(self.vectors):
            similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))
            similarities.append((i, similarity))
        similarities.sort(key=lambda x: x[1], reverse=True)
        results = []
        for i in range(min(k, len(similarities))):
            idx, score = similarities[i]
            results.append({
                "text": self.texts[idx],  # Add the corresponding text
                "metadata": self.metadata[idx],  # Add the corresponding metadata
                "similarity": score  # Add the similarity score
            })
        return results  # Return the list of top k similar items

def create_embeddings(text, model="BAAI/bge-m3"):
    input_text = text if isinstance(text, list) else [text]
    response = client.embeddings.create(
        model=model,
        input=input_text
    )
    if isinstance(text, str):
        return response.data[0].embedding
    return [item.embedding for item in response.data]

def extract_text_from_pdf(pdf_path):
    mypdf = fitz.open(pdf_path)
    all_text = ""
    for page_num in range(mypdf.page_count):
        page = mypdf[page_num]
        text = page.get_text("text")
        all_text += text
    return all_text

def chunk_text(text, n=1000, overlap=200):
    chunks = []
    for i in range(0, len(text), n - overlap):
        chunks.append(text[i:i + n])
    return chunks  # Return the list of text chunks

def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):
    print("Extracting text from PDF...")
    # 获取全部文本
    extracted_text = extract_text_from_pdf(pdf_path)
    print("Chunking text...")
    chunks = chunk_text(extracted_text, chunk_size, chunk_overlap)
    print(f"Created {len(chunks)} text chunks")
    print("Creating embeddings for chunks...")
    chunk_embeddings = create_embeddings(chunks)
    store = SimpleVectorStore()
    for i, (chunk, embedding) in enumerate(zip(chunks, chunk_embeddings)):
        store.add_item(
            text=chunk,
            embedding=embedding,
            metadata={"index": i, "source": pdf_path}
        )
    print(f"Added {len(chunks)} chunks to the vector store")
    return store

def transformed_search(query, vector_store, transformation_type, top_k=3):
    print(f"Transformation type: {transformation_type}")
    print(f"Original query: {query}")
    results = []
    if transformation_type == "rewrite":
        transformed_query = rewrite_query(query)
        print(f"Rewritten query: {transformed_query}")
        query_embedding = create_embeddings(transformed_query)
        results = vector_store.similarity_search(query_embedding, k=top_k)
    elif transformation_type == "step_back":
        transformed_query = generate_step_back_query(query)
        print(f"Step-back query: {transformed_query}")
        query_embedding = create_embeddings(transformed_query)
        results = vector_store.similarity_search(query_embedding, k=top_k)
    elif transformation_type == "decompose":
        sub_queries = decompose_query(query)
        print("Decomposed into sub-queries:")
        for i, sub_q in enumerate(sub_queries, 1):
            print(f"{i}. {sub_q}")
        sub_query_embeddings = create_embeddings(sub_queries)
        all_results = []
        for i, embedding in enumerate(sub_query_embeddings):
            sub_results = vector_store.similarity_search(embedding, k=2)
            all_results.extend(sub_results)
        seen_texts = {}
        for result in all_results:
            text = result["text"]
            if text not in seen_texts or result["similarity"] > seen_texts[text]["similarity"]:
                seen_texts[text] = result
        results = sorted(seen_texts.values(), key=lambda x: x["similarity"], reverse=True)[:top_k]
    else:
        query_embedding = create_embeddings(query)
        results = vector_store.similarity_search(query_embedding, k=top_k)
    return results

def generate_response(query, context, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = ("You are a helpful AI assistant. Answer the user's question based only on the provided context. "
                     "If you cannot find the answer in the context, state that you don't have enough information.")
    user_prompt = f"""
        Context:
        {context}
        Question: {query}
        Please provide a comprehensive answer based only on the context above.
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0,  # Low temperature for deterministic output
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    return response.choices[0].message.content.strip()

def rag_with_query_transformation(pdf_path, query, transformation_type=None):
    vector_store = process_document(pdf_path)
    if transformation_type:
        results = transformed_search(query, vector_store, transformation_type)
    else:
        query_embedding = create_embeddings(query)
        results = vector_store.similarity_search(query_embedding, k=3)
    context = "\n\n".join([f"PASSAGE {i + 1}:\n{result['text']}" for i, result in enumerate(results)])
    # 获取一版回答
    response = generate_response(query, context)
    return {
        "original_query": query,
        "transformation_type": transformation_type,
        "context": context,
        "response": response
    }

def compare_responses(results, reference_answer, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = """You are an expert evaluator of RAG systems. 
    Your task is to compare different responses generated using various query transformation techniques 
    and determine which technique produced the best response compared to the reference answer."""
    comparison_text = f"""Reference Answer: {reference_answer}\n\n"""
    for technique, result in results.items():
        comparison_text += f"{technique.capitalize()} Query Response:\n{result['response']}\n\n"
    user_prompt = f"""
    {comparison_text}
    Compare the responses generated by different query transformation techniques to the reference answer.
    For each technique (original, rewrite, step_back, decompose):
    1. Score the response from 1-10 based on accuracy, completeness, and relevance
    2. Identify strengths and weaknesses
    Then rank the techniques from best to worst and explain which technique performed best overall and why.
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    print("\n===== EVALUATION RESULTS =====")
    print(response.choices[0].message.content)
    print("=============================")

def evaluate_transformations(pdf_path, query, reference_answer=None):
    # 定义存储列表
    transformation_types = [None, "rewrite", "step_back", "decompose"]
    results = {}
    for transformation_type in transformation_types:
        type_name = transformation_type if transformation_type else "original"
        print(f"\n===== Running RAG with {type_name} query =====")
        # 问题rag化
        result = rag_with_query_transformation(pdf_path, query, transformation_type)
        results[type_name] = result
        print(f"Response with {type_name} query:")
        print(result["response"])
        print("=" * 50)
    if reference_answer:
        compare_responses(results, reference_answer)
    return results

def rewrite_query(original_query, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = ("You are an AI assistant specialized in improving search queries. "
                     "Your task is to rewrite user queries to be more specific, detailed, "
                     "and likely to retrieve relevant information.")
    user_prompt = f"""
    Rewrite the following query to make it more specific and detailed. Include relevant terms and concepts that might help in retrieving accurate information.
    Original query: {original_query}
    Rewritten query:
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0.0,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    return response.choices[0].message.content.strip()

def generate_step_back_query(original_query, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = ("You are an AI assistant specialized in search strategies. "
                     "Your task is to generate broader, more general versions of specific "
                     "queries to retrieve relevant background information.")
    user_prompt = f"""
    Generate a broader, more general version of the following query that could help retrieve useful background information.
    Original query: {original_query}
    Step-back query:
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0.1,  # Slightly higher temperature for some variation
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    return response.choices[0].message.content.strip()

def decompose_query(original_query, num_subqueries=4, model="Qwen/Qwen2-1.5B-Instruct"):
    system_prompt = ("You are an AI assistant specialized in breaking down complex questions. "
                     "Your task is to decompose complex queries into simpler sub-questions that, "
                     "when answered together, address the original query.")
    user_prompt = f"""
    Break down the following complex query into {num_subqueries} simpler sub-queries. Each sub-query should focus on a different aspect of the original question.
    Original query: {original_query}
    Generate {num_subqueries} sub-queries, one per line, in this format:
    1. [First sub-query]
    2. [Second sub-query]
    And so on...
    """
    response = client.chat.completions.create(
        model=model,
        temperature=0.2,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    )
    content = response.choices[0].message.content.strip()
    lines = content.split("\n")
    sub_queries = []
    for line in lines:
        if line.strip() and any(line.strip().startswith(f"{i}.") for i in range(1, 10)):
            # Remove the number and leading space
            query = line.strip()
            query = query[query.find(".") + 1:].strip()
            sub_queries.append(query)
    return sub_queries
# 原问题
original_query = "What are the impacts of AI on job automation and employment?"
print("Original Query:", original_query)
# 纵向优化用户问题
rewritten_query = rewrite_query(original_query)
print("\n1. Rewritten Query:")
print(rewritten_query)
# 横向优化用户问题
step_back_query = generate_step_back_query(original_query)
print("\n2. Step-back Query:")
print(step_back_query)
# 拆分用户问题
sub_queries = decompose_query(original_query, num_subqueries=4)
print("\n3. Sub-queries:")
for i, query in enumerate(sub_queries, 1):
    print(f"   {i}. {query}")
#
with open('data/val.json') as f:
    data = json.load(f)
query = data[0]['question']
reference_answer = data[0]['ideal_answer']
pdf_path = "data/AI_Information.pdf"
evaluation_results = evaluate_transformations(pdf_path, query, reference_answer)