import os
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
import tiktoken
import langchain
from langchain_core.documents import Document

print(langchain.__version__)
# =========================
# 自定义 Document 类
# =========================
# =========================
# API 配置
# =========================
DEFAULT_API_BASE = "https://api.siliconflow.cn/v1/"
DEFAULT_API_KEY = "sk-pdfifkpjdlxvyvgkerbluaotktpznsmpbcvskjauotenxgvz"
DEFAULT_LLM_MODEL = "Qwen/Qwen2.5-7B-Instruct"
DEFAULT_EMBED_MODEL = "BAAI/bge-m3"

os.environ["OPENAI_API_KEY"] = DEFAULT_API_KEY
os.environ["OPENAI_API_BASE"] = DEFAULT_API_BASE

# =========================
# 参数配置
# =========================
THRESHOLD = 0.75
TOKEN_BUDGET = 1000
MODEL_NAME = DEFAULT_LLM_MODEL

# =========================
# 文档准备
# =========================
docs = [
    "LangChain 是一个用于构建基于大型语言模型的应用程序的框架。",
    "RAG（检索增强生成）结合了信息检索和生成模型，用于改进问答效果。",
    "FAISS 是一个高效的向量相似度搜索库，用于大规模向量检索。",
    "Token 预算控制有助于防止模型输入过长而产生错误。"
]


# =========================
# 向量化存储
# =========================
embeddings = OpenAIEmbeddings(
    model="BAAI/bge-large-zh-v1.5",  # OpenAI推荐的经济高效模型
    openai_api_key="sk-pdfifkpjdlxvyvgkerbluaotktpznsmpbcvskjauotenxgvz",
    openai_api_base="https://api.siliconflow.cn/v1"  # 国内稳定的访问节点
)

documents = [Document(page_content=d) for d in docs]

vectorstore = FAISS.from_documents(documents, embeddings)

# =========================
# 检索函数（阈值+token预算）
# =========================
def retrieve_with_constraints(query: str):
    results_with_score = vectorstore.similarity_search_with_score(query, k=5)
    filtered_docs = [doc for doc, score in results_with_score if score < THRESHOLD]

    enc = tiktoken.get_encoding("cl100k_base")  # GPT-3.5/4 通用
    total_tokens = 0
    selected_docs = []
    for doc in filtered_docs:
        n_tokens = len(enc.encode(doc.page_content))
        if total_tokens + n_tokens <= TOKEN_BUDGET:
            selected_docs.append(doc)
            total_tokens += n_tokens
        else:
            break
    return selected_docs

# =========================
# RAG生成逻辑
# =========================
def rag_chat(query: str):
    context_docs = retrieve_with_constraints(query)
    context = "\n\n".join([d.page_content for d in context_docs])

    prompt_template = PromptTemplate(
        input_variables=["context", "question"],
        template="""
你是一个智能助手。根据以下已检索到的文档内容回答用户问题。
文档内容：
{context}

问题：
{question}

请用中文简洁准确地回答。
"""
    )

    prompt_text = prompt_template.format(context=context, question=query)

    llm = ChatOpenAI(
        model_name=MODEL_NAME,
        temperature=0,
        openai_api_base=DEFAULT_API_BASE,
        openai_api_key=DEFAULT_API_KEY
    )

    answer = llm.invoke(prompt_text)
    if hasattr(answer, "content"):
        return answer.content
    return answer

# =========================
# 测试
# =========================
if __name__ == "__main__":
    query = "RAG 是什么？"
    answer = rag_chat(query)
    print("💬 最终回答：", answer)
