from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from langchain.document_loaders import PyPDFLoader, CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# import fitz  # PyMuPDF

from dotenv import load_dotenv

load_dotenv()

app = FastAPI()

# 跨域 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=[
        "http://localhost:3000",
        "http://localhost:81",
    ],  # 允许的原点
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头部
)

# 创建一个 LangChain LLM 和文档检索 QA 链
# llm = OpenAI(temperature=0, model='gpt-4o')
llm = OpenAI(temperature=0)
embeddings = OpenAIEmbeddings()
vectorstore = None
qa_chain = None

try:
    vectorstore = Chroma(persist_directory="db", embedding_function=embeddings)
    qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever())
    print("Successfully loaded vector store from persistent storage.")
except Exception as e:
    print(f"Failed to load vector store: {e}")


# 在本地设置 Chroma 数据库路径
# chroma_db = Chroma(persist_directory="db")


# 加载pdf, 创建langchain链
def load_pdf_and_create_qa_chain(pdf_path: str):
    # load
    global vectorstore, qa_chain
    loader = PyPDFLoader(pdf_path)
    documents = loader.load()

    # 使用文本切分器进行处理s
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    split_docs = text_splitter.split_documents(documents)

    # 保存文档向量到 Chroma 数据库
    vectorstore = Chroma.from_documents(split_docs, embeddings, persist_directory="db")
    vectorstore.persist()  # 持久化到本地
    qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever())


# 上传pdf
@app.post("/upload_pdf/")
async def upload_pdf(file: UploadFile = File(...)):
    pdf_path = f"files/temp_{file.filename}"
    with open(pdf_path, "wb") as f:
        content = await file.read()
        f.write(content)

    load_pdf_and_create_qa_chain(pdf_path)
    return {"message": "PDF uploaded and processed successfully.", "res": "ok"}


# ----------------------------------------------------------


from pydantic import BaseModel


# 定义 Pydantic 模型
class Item(BaseModel):
    question: str


# 简历分析, 根据历史数据问答
# @app.post("/ask/")
# async def ask_question(item: Item):
#     if qa_chain is None:
#         return {"error": "No PDF has been processed yet. Please upload a PDF first."}
#
#     answer = qa_chain.run(item.question + ',用中文回答')
#     # return {"answer": answer["result"]}
#     return {"answer": answer}


# ----------------------------------------------------------

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate


# 定义请求体模型
class UserInput(BaseModel):
    input: str


# Initialize OpenAI model with a supported model
# recommend_llm = OpenAI(temperature=0.7, model="gpt-3.5-turbo")  # Change to a supported model
recommend_llm = OpenAI(temperature=0.7)  # Change to a supported model

# Define prompt template
prompt_template = PromptTemplate(
    input_variables=["user_input"],
    template="根据以下用户需求推荐合适的岗位: {user_input}"
)

# Create an LLMChain
chain = LLMChain(llm=recommend_llm, prompt=prompt_template)


@app.post("/recommend")
async def get_recommendation(user_input: UserInput):
    # Get user input
    input_text = user_input.input

    # Assuming qa_chain is defined somewhere else in your code
    # If not, you need to define it or remove this part
    try:
        simple_commend = qa_chain.run("当前用户的简评")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error in qa_chain: {str(e)}")

    print(simple_commend)

    # Use the chain to generate recommendations
    try:
        recommendations = chain.run(simple_commend + ',\n' + input_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error in generating recommendations: {str(e)}")

    # Return recommendations
    return {"recommendations": recommendations}


# ----------------------------------------------------------
# 初始化 OpenAI 模型
job_llm = OpenAI(temperature=0.7, model="gpt-4o")

# 定义 prompt 模板
prompt_template_job = PromptTemplate(
    input_variables=["user_input"],
    template="你是一个专业的就业指导专家, 请根据用户输入, 回答就业岗位等相关问题: {user_input}"
)

# 创建一个 LLMChain
chain_job = LLMChain(llm=job_llm, prompt=prompt_template_job)


# 岗位问答
@app.post("/job")
async def get_job_qa(user_input: UserInput):
    # 获取用户输入
    input_text = user_input.input

    # 使用链条生成问答
    try:
        job_qa = chain_job.run(input_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

    # 返回推荐结果
    return {"answer": job_qa}


# ----------------------------------------------------------
# 模拟面试 ws
from fastapi import WebSocket, WebSocketDisconnect
import json
from langchain.chat_models import ChatOpenAI

# 初始化LangChain中的ChatOpenAI模型
chat_model = ChatOpenAI(model="gpt-4o")

# 初始化Chroma向量数据库
chroma_db = Chroma(persist_directory="db", embedding_function=OpenAIEmbeddings())

# 创建RetrievalQA链
qa_chain2 = RetrievalQA.from_chain_type(llm=chat_model, chain_type="stuff", retriever=chroma_db.as_retriever())


# WebSocket连接处理
@app.websocket("/ws/interview")
async def websocket_endpoint(websocket: WebSocket):
    await websocket.accept()
    context = []  # 初始化上下文列表

    try:
        while True:
            data = await websocket.receive_text()
            # 更新上下文
            context.append(f"用户输入：{data}")
            context_str = "\n".join(context)

            # 生成回复
            # prompt = f"你是一名面试官。请根据以下信息进行回答，模拟一次工作面试：\n\n{context_str}"
            prompt = f"你是一名面试官。请根据用户输入进行面试：\n\n{context_str}"
            response = qa_chain2(prompt)

            # 获取模型的回复
            model_reply = response['result']
            context.append(f"面试官回复：{model_reply}")

            await websocket.send_text(json.dumps({"response": model_reply}))
    except WebSocketDisconnect:
        print("Client disconnected")


import sqlite3
from langchain.schema import Document


# 定义嵌入函数
def embedding_function(texts):
    return embeddings_db.embed_documents(texts)


def embed_and_store_data(data):
    # 将每一行数据转换为一个字符串
    documents = [Document(page_content=" ".join(map(str, row))) for row in data]

    # 使用嵌入函数生成嵌入
    embeddings_list = embedding_function([doc.page_content for doc in documents])

    # 将文档和嵌入添加到 Chroma 数据库
    chroma_db.add_documents(documents=documents, embeddings=embeddings_list)


# 连接到 SQLite 数据库
# todo: 连接到 Mysql 数据库
# def load_data_from_sqlite(db_path):
#     conn = sqlite3.connect(db_path)
#     cursor = conn.cursor()
#     cursor.execute(
#         '''
#         SELECT
#             s.id AS student_id,
#             s.studentNumber,
#             s.name AS student_name,
#             s.class,
#             s.enrollmentYear,
#             s.gpa,
#             g.id AS grade_id,
#             g.subject,
#             g.score,
#             g.rank,
#             g.gradePoint,
#             r.id AS resume_id,
#             r.name AS resume_name,
#             r.createdAt,
#             r.updatedAt
#         FROM
#             students s
#         LEFT JOIN
#             grades g ON s.id = g.studentId
#         LEFT JOIN
#             resumes r ON s.id = r.studentId
#         '''
#     )
#     rows = cursor.fetchall()
#     conn.close()
#     return rows


# 嵌入数据并存储到 Chroma
embeddings_db = OpenAIEmbeddings()


# 在 FastAPI 启动时加载数据
# @app.on_event("startup")
# async def startup_event():
    # data = load_data_from_sqlite("../web/resume-system/prisma/dev.db")
    # print(data)
    # embed_and_store_data(data)
