from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def single_rag_iteration(query):
    query_embedding = get_query_embedding(query)
    related_chunks = retrieve_related_chunks(query_embedding, n_results=3)
    context = "\n".join(related_chunks)
    prompt = f"已知信息:{context}\n\n 请根据上下文内容回答用户的问题:{query}"
    answer = ollama_qa(prompt)
    return answer, related_chunks


def build_iterative_query(original_query, previous_answer):
    iterative_query = f"{original_query}\n\n,基于上一次的回答:{previous_answer}"
    return iterative_query


def iterative_rag_generation(original_query, max_iterations=2):
    # 存储每次迭代的结果
    iteration_results = []
    # 当前的查询
    current_query = original_query
    for iteration in range(1, max_iterations + 1):  # 1 2
        # 执行单次的RAG迭代
        answer, context_chunks = single_rag_iteration(current_query)
        iteration_results.append(
            {
                "iteration": iteration,  # 当前正在迭代第几次
                "query": current_query,  # 当前的查询
                "answer": answer,  # 当前的回答
                "context_chunks": context_chunks,  # 当前上下文文本块列表
            }
        )
        current_query = build_iterative_query(original_query, answer)
    final_answer = iteration_results[-1]["answer"]
    return final_answer, iteration_results


if __name__ == "__main__":
    query = "婚假怎么请？"
    # 设置最大的迭代次数
    max_iterations = 2
    # 1. 执行迭代式RAG生成
    final_answer, iteration_results = iterative_rag_generation(query, max_iterations)
    print(final_answer)
