from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# import fitz  # PyMuPDF

from dotenv import load_dotenv

load_dotenv()

app = FastAPI()

# 跨域 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:3000"],  # 允许的原点
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头部
)

# 创建一个 LangChain LLM 和文档检索 QA 链
llm = OpenAI(temperature=0)
embeddings = OpenAIEmbeddings()
vectorstore = None
qa_chain = None

# 在本地设置 Chroma 数据库路径
chroma_db = Chroma(persist_directory="db")


# 加载pdf, 创建langchain链
def load_pdf_and_create_qa_chain(pdf_path: str):
    # load
    global vectorstore, qa_chain
    loader = PyPDFLoader(pdf_path)
    documents = loader.load()

    # 使用文本切分器进行处理
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    split_docs = text_splitter.split_documents(documents)

    # 保存文档向量到 Chroma 数据库
    vectorstore = Chroma.from_documents(split_docs, embeddings, persist_directory="db")
    vectorstore.persist()  # 持久化到本地
    qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever())


# 上传pdf
@app.post("/upload_pdf/")
async def upload_pdf(file: UploadFile = File(...)):
    pdf_path = f"files/temp_{file.filename}"
    with open(pdf_path, "wb") as f:
        content = await file.read()
        f.write(content)

    load_pdf_and_create_qa_chain(pdf_path)
    return {"message": "PDF uploaded and processed successfully."}


from pydantic import BaseModel


# 定义 Pydantic 模型
class Item(BaseModel):
    question: str


# 根据历史数据问答
@app.post("/ask/")
async def ask_question(item: Item):
    if qa_chain is None:
        return {"error": "No PDF has been processed yet. Please upload a PDF first."}

    answer = qa_chain.run(item.question + ',用中文回答')
    # return {"answer": answer["result"]}
    return {"answer": answer}


# 定义请求体模型
class UserInput(BaseModel):
    input: str


# 初始化 OpenAI 模型
recommend_llm = OpenAI(temperature=0.7)

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

# 定义 prompt 模板
prompt_template = PromptTemplate(
    input_variables=["user_input"],
    template="根据以下用户需求推荐合适的岗位: {user_input}"
)

# 创建一个 LLMChain
chain = LLMChain(llm=recommend_llm, prompt=prompt_template)


# 根据用户输入, 推荐岗位
@app.post("/recommend")
async def get_recommendation(user_input: UserInput):
    # 获取用户输入
    input_text = user_input.input
    # 获取用户的简评
    simple_commend = qa_chain.run("当前用户的简评")
    print(simple_commend)

    # 使用链条生成推荐
    try:
        recommendations = chain.run(simple_commend + ',\n' + input_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

    # 返回推荐结果
    return {"recommendations": recommendations}
