import json
from fastapi import APIRouter, Query
from fastapi.responses import JSONResponse
from typing import Optional, List
from langchain_ollama import OllamaLLM, OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
    TextLoader,
    CSVLoader,
    DirectoryLoader,
    UnstructuredHTMLLoader,
    JSONLoader,
    PyPDFLoader
)
import os
router = APIRouter()

# 远程 Ollama 服务的地址
OLLAMA_BASE_URL = "http://172.16.21.38:11436"
OLLAMA_MODEL = "qwen3:0.6b"

# Ollama LLM 实例
ollama = OllamaLLM(base_url=OLLAMA_BASE_URL, model=OLLAMA_MODEL)

# FAISS 向量库存储路径
FAISS_INDEX_PATH = "H:/DWORPLACETEST/PythonWebTemplate/AIAgentBase/app/testapi/D4RAG/faiss_index"



@router.get("/test")
async def test():
    return JSONResponse(content={"message": "Hello, World!"})

@router.get("/rag_demo2")
async def rag_demo2(query: str = Query(..., description="用户问题")):
    """RAG 示例：加载文档 → 构建向量数据库 → 检索 → LLM 回答"""

    # 1. 加载文档
    loaders = [
        TextLoader("H:/DWORPLACETEST/PythonWebTemplate/AIAgentBase/app/testapi/D4RAG/examples/example.md", encoding="utf-8"),
        UnstructuredHTMLLoader("H:/DWORPLACETEST/PythonWebTemplate/AIAgentBase/app/testapi/D4RAG/examples/loader.html")
    ]
    docs = []
    for loader in loaders:
        docs.extend(loader.load())

    # 2. Embedding 实例
    embeddings = OllamaEmbeddings(base_url=OLLAMA_BASE_URL, model=OLLAMA_MODEL)

    # 3. 确保 FAISS 目录存在
    os.makedirs(FAISS_INDEX_PATH, exist_ok=True)
    INDEX_FILE = os.path.join(FAISS_INDEX_PATH, "index.faiss")

    # 4. 判断索引文件是否存在
    if os.path.exists(INDEX_FILE):
        vectorstore = FAISS.load_local(
            FAISS_INDEX_PATH,
            embeddings,
            allow_dangerous_deserialization=True
        )
    else:
        vectorstore = FAISS.from_documents(docs, embeddings)
        vectorstore.save_local(FAISS_INDEX_PATH)

    # 5. 检索器
    retriever = vectorstore.as_retriever()

    # 6. 构建 RAG QA 链
    qa = RetrievalQA.from_chain_type(
        llm=ollama,
        retriever=retriever,
        return_source_documents=True
    )

    # 7. 提问
    result = qa.invoke({"query": query})

    return JSONResponse(content={
        "query": query,
        "answer": result["result"],
        "sources": [doc.metadata for doc in result["source_documents"]]
    })
