from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
import numpy as np

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    print(related_chunks)
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def generate_num_hypothetical_docs(query, num_hypothetical_docs):

    perspectives = [
        f"请从学术研究的角度，针对以下的问题编写一段详细的回答：{query}",
        f"请从实际应用研究的角度，针对以下的问题编写一段详细的回答：{query}",
        f"请从基础概念研究的角度，针对以下的问题编写一段详细的回答：{query}",
        f"请从经济价格研究的角度，针对以下的问题编写一段详细的回答：{query}",
    ]
    hypothetical_docs = []
    for i, perspective in enumerate(perspectives[:num_hypothetical_docs]):
        doc = doubao_qa(perspective)
        hypothetical_docs.append(doc)
    return hypothetical_docs


def get_embedding(text):
    return model.encode(text).tolist()


def calculate_average_embedding(embeddings):
    # 把向量列表转成numpy的数组
    embeddings_array = np.array(embeddings)
    return np.mean(embeddings_array, axis=0).tolist()


def hyde_query_embedding(query, num_hypothetical_docs=3):
    # 1.从多个角度进行文档生成
    hypothetical_docs = generate_num_hypothetical_docs(query, num_hypothetical_docs)
    # 2.把原始的query和生成的3个假设文档转成向量并放到向量列表中
    embeddings = []
    query_vector = get_embedding(query)
    embeddings.append(query_vector)

    for i, doc in enumerate(hypothetical_docs):
        doc_embedding = get_embedding(doc)
        embeddings.append(doc_embedding)
    # 向量平均
    final_query_embedding = calculate_average_embedding(embeddings)
    return final_query_embedding


if __name__ == "__main__":
    query = input("请输入你的问题:")
    # HYDE增强查询向量
    query_embedding = hyde_query_embedding(query, num_hypothetical_docs=3)
    print(f"query_embedding:{len(query_embedding)}")
    related_chunks = retrieve_related_chunks(query_embedding, 3)
    print(f"related_chunks:{related_chunks}")
    query_context = "\n".join(related_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{query}"
    print(f"prompt:{prompt}")
    answer = doubao_qa(prompt)
    print(f"answer:{answer}")
