from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    print(related_chunks)
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def generate_pseudo_document(query):
    pseudo_doc_prompt = (
        f"请针对以下问题编写一段详细的回答，包括相关的背景信息和关键的概念:{query}"
    )
    pseudo_document = doubao_qa(pseudo_doc_prompt)
    return pseudo_document


def merge_query_with_pseudo_document(query, pseudo_document):
    enhanced_query = f"{query}\n\n相关背景信息:{pseudo_document}"
    return enhanced_query


if __name__ == "__main__":

    query = input("请输入你的问题:")
    # 生成一个伪文档
    pseudo_document = generate_pseudo_document(query)
    print(f"生成伪文档:{pseudo_document}")
    # 合并查询与伪文档，开成增强查询
    enhanced_query = merge_query_with_pseudo_document(query, pseudo_document)
    query_embedding = get_query_embedding(enhanced_query)
    print(f"query_embedding:{len(query_embedding)}")
    related_chunks = retrieve_related_chunks(query_embedding, 3)
    print(f"related_chunks:{related_chunks}")
    query_context = "\n".join(related_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{query}"
    print(f"prompt:{prompt}")
    answer = doubao_qa(prompt)
    print(f"answer:{answer}")
