import json
from fastapi import APIRouter, Query
from fastapi.responses import JSONResponse
from typing import Optional,List
from langchain_ollama import OllamaLLM, OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (TextLoader,CSVLoader,
                                                  DirectoryLoader,UnstructuredHTMLLoader,
                                                  JSONLoader,PyPDFLoader)
router = APIRouter() # Ollama LLM 配置
OLLAMA_BASE_URL = "http://172.16.21.38:11436"
OLLAMA_MODEL = "qwen3:0.6b"

ollama = OllamaLLM(base_url=OLLAMA_BASE_URL, model=OLLAMA_MODEL)

# FAISS 向量检索配置
# 文档目录路径（可替换为你自己的知识库目录）
DOCUMENT_DIR = "./docs"

# Embeddings 实例
embeddings = OllamaEmbeddings(base_url=OLLAMA_BASE_URL)

# 加载文档
loader = DirectoryLoader(DOCUMENT_DIR, glob="**/*.txt", loader_cls=TextLoader)
documents = loader.load()

# 构建 FAISS 向量索引
vector_store = FAISS.from_documents(documents, embeddings)

# ------------------------------
# 上下文管理
# ------------------------------
conversation_contexts = {}

# ------------------------------
# 接口：基于 FAISS + Ollama 的上下文问答
# ------------------------------
@router.post("/qa_with_faiss")
async def qa_with_faiss(
    session_id: str = Query(..., description="每个用户/会话的唯一ID"),
    question: str = Query(..., description="用户输入的问题"),
    top_k: int = Query(3, description="检索相关文档的数量")
):
    """结合 FAISS 向量检索 + Ollama 上下文的问答接口"""
    # 1. 检索相关文档
    retrieved_docs = vector_store.similarity_search(question, k=top_k)
    retrieved_texts = "\n".join([doc.page_content for doc in retrieved_docs])

    # 2. 获取当前会话上下文
    if session_id not in conversation_contexts:
        conversation_contexts[session_id] = []

    # 3. 构建 Ollama 上下文
    # 先把检索到的文档作为系统提示
    system_prompt = f"以下是相关知识库内容，请参考回答用户问题：\n{retrieved_texts}"
    messages = [{"role": "system", "content": system_prompt}] + conversation_contexts[session_id]
    messages.append({"role": "user", "content": question})

    # 4. 调用 Ollama
    response = ollama.generate(messages=messages)

    # 5. 更新上下文
    conversation_contexts[session_id].append({"role": "user", "content": question})
    conversation_contexts[session_id].append({"role": "assistant", "content": response.text})

    return JSONResponse({
        "session_id": session_id,
        "answer": response.text,
        "retrieved_docs": [doc.page_content for doc in retrieved_docs],
        "context_length": len(conversation_contexts[session_id])
    })

# ------------------------------
# 接口：清理上下文
# ------------------------------
@router.post("/clear_context")
async def clear_context(session_id: str = Query(..., description="要清理的会话ID")):
    if session_id in conversation_contexts:
        del conversation_contexts[session_id]
        return JSONResponse({"status": "success", "message": f"上下文已清理: {session_id}"})
    return JSONResponse({"status": "fail", "message": f"未找到会话: {session_id}"})
