from sentence_transformers import SentenceTransformer
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa

model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(
        query_embeddings=[query_embedding],
        n_results=n_results,
        include=["documents", "distances"],
    )
    # 返回的 distances 是 L2 距离（欧氏距离） 越小越相似（距离越近，相似度越高）
    scores = results.get("distances")[0]
    # 如果想保留更多结果，降低 threshold（如 0.7）。
    threshold = 0.1
    filtered_result = [
        doc
        for doc, score in zip(results["documents"][0], scores)
        if 1 - score > threshold
    ]
    return filtered_result


def query_enhancement(query, iteration):
    enhancement_prompt = f"""
    请对以下的问题进行查询重写，生成更精确的检索查询，要求
    1. 保持原问题的核心含义 
    2. 添加更多的关键字和细节
    3. 使用更具体的表达方式

    原问题:{query}
    要求生成3个增强后的查询,用换行符分隔
    """
    enhancement_queries = doubao_qa(enhancement_prompt)
    queries = []
    if enhancement_queries:
        lines = enhancement_queries.strip().split("\n")
        for line in lines:
            if line.strip() and not line.startswith("原问题"):
                queries.append(line.strip())
    return queries[:3]


def judge_document_relevance(query, documents):
    judge_prompt = f"""
    请判断以下文档是否和用户问题相关，请只回答"yes"或"no"，不要那么严格，有点相关性就可以，比如病假
    用户问题{query}
    文档内容:
    {"\n".join([f"文档{i+1}:{doc}" for i,doc in enumerate(documents)])}

    请逐个判断每个文档的相关性，格式如下：
    文档1:[yes/no]
    文档2:[yes/no]
    ...

    不同的的文档用换行符\n分隔
   
    """
    judgment = doubao_qa(judge_prompt)
    relevant_docs = []
    if judgment:
        lines = judgment.strip().split("\n")
        for i, line in enumerate(lines):
            if "yes" in line and i < len(documents):
                relevant_docs.append(documents[i])
    return relevant_docs


def generate_answer_with_context(query, context):
    prompt = f"""
        请根据上下文信息回答用户问题，要求
        1.答案必须基于提供的上下文信息
        2.答案要准确，完整，有条理
        上下文信息:
        {context}
        用户问题：
        {query}
        答案:
    """
    answer = doubao_qa(prompt)
    return answer


def verify_answer_credibility(query, context, answer):
    support_check_prompt = f"""
    请判断以下的答案能否在提供的上下文中找到线索，请只回答"有依据"或"无依据"
    用户问题:{query}
    上下文信息:{context}
    生成的答案:{answer}
    """
    support_result = doubao_qa(support_check_prompt)

    complete_check_prompt = f"""
    请判断以下的答案能否完整的回答了用户的问题，请只回答"很完整"或"不完整"
    用户问题:{query}
    生成的答案:{answer}
    判断 这个答案是否完整回答了用户的问题?
    """
    complete_result = doubao_qa(support_check_prompt)

    has_support = support_result and "有依据" in support_result
    is_complete = complete_result and "很完整" in complete_result
    return has_support, is_complete


def self_rag_pipeline(query, max_iteration):

    for iteration in range(1, max_iteration + 1):
        # 查询 向量化
        query_embedding = get_query_embedding(query)
        # 向量检索
        related_chunks = retrieve_related_chunks(query_embedding, n_results=5)
        # 如果未检索到相关的文档块，则尝试进行查询增强
        if not related_chunks or len(related_chunks) == 0:
            enhanced_queries = query_enhancement(query, iteration)
            for query in enhanced_queries:
                query_embedding = get_query_embedding(query)
                related_chunks = retrieve_related_chunks(query_embedding, n_results=5)
                if related_chunks and len(related_chunks) > 0:
                    break
            if not related_chunks or len(related_chunks) == 0:
                print("经过了增强查询，还是未找到相关的文档")
                return "经过了增强查询，还是未找到相关的文档"
        # 3.文档相关性判断别
        relevant_docs = judge_document_relevance(query, related_chunks)
        print(relevant_docs)
        if not relevant_docs:
            if iteration < max_iteration:
                enhanced_queries = query_enhancement(query, iteration)
                if enhanced_queries:
                    query = enhanced_queries[0]
                    continue
            else:
                return "抱歉，无法找到相关信息来回答你的问题"
        # 调用大模型生成答案
        context = "\n".join(relevant_docs)
        answer = generate_answer_with_context(query, context)
        # 答案信度验证
        has_support, is_complete = verify_answer_credibility(query, context, answer)
        if has_support and is_complete:
            return answer
        elif not has_support:  # 如果回答没有上下文支持,需要进行多次尝试
            for attempt in range(3):
                new_answer = generate_answer_with_context(query, context)
                has_support, is_complete = verify_answer_credibility(
                    query, context, new_answer
                )
                if has_support and is_complete:
                    return answer
            # 如果尝试了3次还是不行
            if iteration < max_iteration:
                enhanced_queries = query_enhancement(query, iteration)
                if enhanced_queries:
                    query = enhanced_queries[0]
                    continue
        else:  # 如果答案不完整
            enhanced_queries = query_enhancement(query, iteration)
            if enhanced_queries:
                query = enhanced_queries[0]
                continue
    return "抱歉，经过多轮迭代，还是无法生成满意的答案"


if __name__ == "__main__":
    query = "假期的适用对象?"
    final_answer = self_rag_pipeline(query, max_iteration=3)
    print(final_answer)
