import json  
import os  
# from openai import OpenAI  

# LangChain 相关库  
# 需要安装: pip install langchain langchain-openai faiss-cpu tiktoken  
from langchain_community.vectorstores import FAISS  
# from langchain_openai import OpenAIEmbeddings  
from langchain.schema import Document

# 从零构建RAG意图识别系统

# --- 0. 准备工作 ---  
# 假设这是我们通过第一步构建的知识库  
knowledge_base = [  
    {"query": "明天天气怎么样？", "intent": "查询天气", "slots": {"city": "默认", "time": "明天"}},  
    {"query": "查一下北京的天气", "intent": "查询天气", "slots": {"city": "北京", "time": "今天"}},  
    {"query": "后天上海会下雨吗", "intent": "查询天气", "slots": {"city": "上海", "time": "后天"}},  
    {"query": "给我放一首周杰伦的歌", "intent": "播放音乐", "slots": {"artist": "周杰伦", "song": "任意"}},  
    {"query": "我想听七里香", "intent": "播放音乐", "slots": {"artist": "周杰伦", "song": "七里香"}},  
    {"query": "来点音乐", "intent": "播放音乐", "slots": {"artist": "任意", "song": "任意"}},  
    {"query": "订一张明天去上海的机票", "intent": "预订机票", "slots": {"departure_city": "当前城市", "destination_city": "上海", "date": "明天"}},  
    {"query": "从北京到广州的航班", "intent": "预订机票", "slots": {"departure_city": "北京", "destination_city": "广州", "date": "今天"}},
    {"query": "购买后天西安飞成都的经济仓位", "intent": "预订机票", "slots": {"departure_city": "西安", "destination_city": "成都", "date": "后天"}}, 
]

# --- 配置 API ---  
# LangChain 会自动从环境变量 `OPENAI_API_KEY` 和 `OPENAI_BASE_URL` 读取配置  
# 建议使用环境变量，而不是在代码中硬编码  
# os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY_HERE"  
# os.environ["OPENAI_BASE_URL"] = "YOUR_API_BASE_URL_HERE"

# 为方便演示，我们在这里实例化 client 和 embeddings  
# 替换为你的API Key和Base URL  
# api_key = "YOUR_API_KEY_HERE"
# base_url = "YOUR_API_BASE_URL_HERE"

# client = OpenAI(api_key=api_key, base_url=base_url)  
# embeddings = OpenAIEmbeddings(openai_api_key=api_key, openai_api_base_url=base_url)

from langchain_ollama import ChatOllama
llm = ChatOllama(model="qwen3:8b", temperature=0, reasoning=False, format="json")

from langchain_ollama import OllamaEmbeddings
embeddings = OllamaEmbeddings(model="bge-m3:latest")

# --- 1. 使用 LangChain 构建向量知识库 ---  
print("Step 1: Building vector store with LangChain...") 
# 将原始知识库转换为 LangChain 的 Document 格式  
# 我们将 query 作为 page_content，将 intent 和 slots 作为 metadata  
documents = [  
    Document(  
        page_content=item['query'],  
        metadata={'intent': item['intent'], 'slots': json.dumps(item['slots'])} # Metadata值必须是字符串、整数、浮点数或布尔值  
    ) for item in knowledge_base  
]

# 从 documents 创建 FAISS 向量存储  
# 这一个步骤会自动处理文本的 embedding 和索引的创建  
try:  
    vector_store = FAISS.from_documents(documents, embeddings)
    print("Vector store built successfully with FAISS.")  
except Exception as e:
    print(f"Error building vector store: {e}")  
    vector_store = None

def retrieve_examples_langchain(user_query, k=3):
    """  
    使用 LangChain 的向量存储检索最相似的K个示例。  
    """
    print(f"nStep 2: Retrieving examples for query: '{user_query}' with LangChain")  
    if not vector_store:
        print("Vector store is not available.")  
        return []
          
    # FAISS.similarity_search会返回 Document 对象列表  
    retrieved_docs = vector_store.similarity_search(user_query, k=k)  
      
    # 将 Document 对象转换回我们原来的字典格式，以便下游函数使用  
    examples = [  
        {  
            "query": doc.page_content,  
            "intent": doc.metadata['intent'],  
            "slots": json.loads(doc.metadata['slots'])  
        } for doc in retrieved_docs  
    ]  
    print(f"Retrieved {len(examples)} examples.")  
    return examples

def build_prompt_with_rag(user_query, examples):
    """  
    构建带有检索到的示例的动态提示词。  
    """
    print("\nStep 3: Building dynamic prompt with retrieved examples...")  
    examples_str = "\n".join([f"// 示例\n用户输入: {ex['query']}\n输出: {json.dumps({'intent': ex['intent'], 'slots': ex['slots']}, ensure_ascii=False)}"for ex in examples])

    prompt = f"""  
    你是一个任务型对话机器人的NLU（自然语言理解）引擎。  
    你的任务是根据用户最新的提问，识别出用户的意图（intent）并抽取出相应的槽位（slots）。

    请严格参考下面提供的示例，理解如何进行意图识别和槽位抽取。

    {examples_str}

    ---  
    现在，请处理以下用户的最新提问。  
    请严格按照JSON格式输出，不要包含任何其他解释。

    用户输入: {user_query}
    输出:  
    """
    print("Prompt built.")
    return prompt

def recognize_intent_with_rag(user_query):
    """  
    执行完整的RAG意图识别流程。  
    """
    # 1. 检索 (使用 LangChain 版本)  
    examples = retrieve_examples_langchain(user_query)  

    # 2. 构建提示词  
    prompt = build_prompt_with_rag(user_query, examples)  
    print("\n--- Final Prompt to LLM ---")  
    print(prompt)  
    print("---------------------------")

    # 3. 调用LLM  
    print("\nStep 4: Calling LLM for final recognition...")  
    try:  
        # response = client.chat.completions.create(  
        #     model="gpt-3.5-turbo",  
        #     messages=[{"role": "user", "content": prompt}],  
        #     temperature=0, # 对于分类和提取任务，使用低温  
        #     response_format={"type": "json_object"},  
        # )  
        response = llm.invoke(   
            input=[{"role": "user", "content": prompt}],  
            # temperature=0, # 对于分类和提取任务，使用低温  
            # response_format={"type": "json_object"},  
        )
        # result = response.choices[0].message.content
        result = response.content
        print("LLM call successful.")
        return json.loads(result)  
    except Exception as e:  
        print(f"An error occurred during LLM call: {e}")  
        return {"error": str(e)}

# --- 最终测试 ---  
# if vector_store:  
#     test_query_1 = "帮我找一首林俊杰的歌"
#     result_1 = recognize_intent_with_rag(test_query_1)  
#     print(f"\n--- Result for '{test_query_1}' ---")  
#     print(json.dumps(result_1, indent=2, ensure_ascii=False))

#     test_query_2 = "后天广州天气如何"
#     result_2 = recognize_intent_with_rag(test_query_2)  
#     print(f"\n--- Result for '{test_query_2}' ---")  
#     print(json.dumps(result_2, indent=2, ensure_ascii=False))

def assemble_context(history, current_query):  
    """  
    将历史对话和当前查询拼接成一个用于检索的上下文字符串。

    Args:  
        history (list of dicts): [{"role": "user/assistant", "content": "..."}]  
        current_query (str): 最新的用户输入。

    Returns:  
        str: 拼接后的上下文。  
    """
    # 只保留最近几轮对话，避免上下文过长  
    recent_history = history[-4:] # 例如，保留最近2轮对话（user+assistant）  
      
    history_str = ""
    for turn in recent_history:  
        role = "用户" if turn["role"] == "user "else "助手"
        content = turn["content"]  
        history_str += f"{role}: {content}\n"
          
    context_for_retrieval = f"对话历史:\n{history_str}\n最新提问: {current_query}"
    return context_for_retrieval

# --- 示例 ---  
history = [  
    {"role": "user", "content": "帮我订一张去北京的票"},  
    {"role": "assistant", "content": "好的，什么时候出发？"}  
]  
current_query = "明天"

context = assemble_context(history, current_query)  
print("--- Context for RAG Retrieval ---")  
print(context)

if vector_store:  
    result_1 = recognize_intent_with_rag(context)  
    print(f"\n--- Result for '{context}' ---")  
    print(json.dumps(result_1, indent=2, ensure_ascii=False))

