from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def summarize_queries_from_context(history, last_question):
    prompt = """
        请根据以下多轮会话的内容，归纳出1到多条适合知识库检索的query,每条query需要单独输出
        历史会话 {history}
        用户最新的问题{last_question}
        请输出所有检索到的query,每行一条
    """.format(
        history="\n".join(history), last_question=last_question
    )
    queries = ollama_qa(prompt)
    print(f"归纳出了{queries}")
    return queries


if __name__ == "__main__":
    history = [
        "用户：我最近买了个智能手表。",
        "AI：请问您购买的是哪个品牌或型号？",
        "用户：是Alpha智能手表，去年买的。",
        "AI：请问有什么可以帮您解答的问题吗？",
    ]
    latest_question = "保修期还有多久？"
    # 1.智能归纳多轮对话的上下文，生成检索query列表
    queries = summarize_queries_from_context(history, latest_question)
    # 2.针对每条query进行分别检索
    all_chunks = []
    for q in queries:
        query_embedding = get_query_embedding(q)
        related_chunks = retrieve_related_chunks(query_embedding, 3)
        all_chunks.extend(related_chunks)
    query_context = "\n".join(all_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{latest_question}"
    print(f"prompt:{prompt}")
    answer = doubao_qa(prompt)
    print(f"answer:{answer}")
