from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def extract_abstract_stem(query):
    prompt = """
    请将下面的数学或物理题目进行题干抽象，去除人物、动物、故事背景等情节，只保留核心数量关系和求解目标，输出标准题干
    题目:{query}
    """.format(
        query=query
    )
    abstract_stem = ollama_qa(prompt)
    return abstract_stem.strip()


if __name__ == "__main__":
    query = "王阿姨去市场买菜，从家到市场的距离是10公里。刚出发时，王阿姨骑电动车的速度是v千米/小时，骑到一半时电量不足，速度降为v-5千米/小时。已知她总共用了1小时到达市场，请问王阿姨出发时的速度v是多少千米/小时？"
    is_education = any(
        word in query for word in ["题目", "速度", "距离", "用时", "收集", "求", "解"]
    )
    if is_education:
        query_for_retrieval = extract_abstract_stem(query)
    else:
        query_for_retrieval = query
    query_embedding = get_query_embedding(query_for_retrieval)
    print(f"query_embedding:{len(query_embedding)}")
    related_chunks = retrieve_related_chunks(query_embedding, 3)
    print(f"related_chunks:{related_chunks}")
    query_context = "\n".join(related_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{query}"
    print(f"prompt:{prompt}")
    answer = doubao_qa(prompt)
    print(f"answer:{answer}")
