import os
from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from langchain_community.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import ZhipuAIEmbeddings
from langchain_community.vectorstores import FAISS
import tempfile

from dotenv import load_dotenv

from langchain_openai import ChatOpenAI
from langchain.prompts import (
    ChatPromptTemplate,
    MessagesPlaceholder,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory

load_dotenv(".env.local")

app = FastAPI()

# 允许跨域（如需前端可用）
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局向量数据库（简单内存版，生产建议持久化）
vectorstore = None
retriever = None


# 上传文档并构建向量库
@app.post("/api/upload")
def upload_doc(file: UploadFile = File(...)):
    global vectorstore, retriever
    suffix = file.filename.split(".")[-1].lower()
    with tempfile.NamedTemporaryFile(delete=False, suffix=f".{suffix}") as tmp:
        tmp.write(file.file.read())
        tmp_path = tmp.name
    # 选择合适的 loader
    if suffix == "txt":
        loader = TextLoader(tmp_path, encoding="utf-8")
    elif suffix == "pdf":
        loader = PyPDFLoader(tmp_path)
    elif suffix == "docx":
        loader = Docx2txtLoader(tmp_path)
    else:
        os.remove(tmp_path)
        return JSONResponse(status_code=400, content={"error": "不支持的文件类型"})
    docs = loader.load()
    os.remove(tmp_path)
    # 文本分割
    splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    splits = splitter.split_documents(docs)
    # 构建向量库
    embeddings = ZhipuAIEmbeddings(api_key=os.getenv("ZHIPUAI_API_KEY"))
    vectorstore = FAISS.from_documents(splits, embeddings)
    retriever = vectorstore.as_retriever()
    return {"msg": "文档已上传并处理，共 %d 段" % len(splits)}


# 问答接口
@app.post("/api/ask")
def ask_question(question: str = Form(...)):
    global retriever
    if retriever is None:
        return JSONResponse(status_code=400, content={"error": "请先上传文档"})
    docs = retriever.invoke(question)
    context = "\n".join([doc.page_content for doc in docs])
    
    
    llm = ChatOpenAI(
        temperature=0.95,
        model="glm-4",
        openai_api_key=os.getenv("ZHIPUAI_API_KEY"),
        openai_api_base="https://open.bigmodel.cn/api/paas/v4/"
    )
    
    prompt = ChatPromptTemplate(
        messages=[
            SystemMessagePromptTemplate.from_template(
                f"已知信息：{context}\n\n请根据上述内容回答：{question}"
            ),
            MessagesPlaceholder(variable_name="chat_history"),
            HumanMessagePromptTemplate.from_template("{question}")
        ]
    )

    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
    conversation = LLMChain(
        llm=llm,
        prompt=prompt,
        verbose=True,
        memory=memory
    )
    result = conversation.invoke({"question": question})
    print(result)
    return {"answer": result["text"]}


# 健康检查
@app.get("/")
def root():
    return {"msg": "LangChain 文档问答系统 API 正常运行"}
