import json
from fastapi.responses import StreamingResponse
from dotenv import load_dotenv
from fastapi import FastAPI, UploadFile, File
import os
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from pydantic import BaseModel


class rag_item(BaseModel):
    request: str
    model: str
    flow: bool = False

app = FastAPI()

# 设置文件上传的文件夹
UPLOAD_FOLDER = 'uploads'  # 确保该目录存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)


@app.post("/upload")
async def upload_file(file: UploadFile = File(...)):
    file_location = os.path.join(UPLOAD_FOLDER, file.filename)
    with open(file_location, "wb") as file_object:
        file_object.write(await file.read())
    return f"文件 '{file.filename}' 上传成功"


@app.get("/chat_stream")
async def chat_stream(request: str):
    markdown_path = "uploads/lol14.19版本改动.md"
    loader = UnstructuredMarkdownLoader(markdown_path)
    docs = loader.load()
    load_dotenv()
    llm = Ollama(model="qwen2.5")
    prompt = ChatPromptTemplate.from_template("""仅根据提供的上下文回答以下问题:

       <context>
       {context}
       </context>

       Question: {input}""")

    llm_chain = prompt | llm
    ret = llm_chain.stream({
        "input": request,
        "context": docs
    })

    def predict():
        text = ""
        for _token in ret:
            token = _token
            js_data = {"code": "200", "msg": "ok", "data": token}
            yield f"data: {json.dumps(js_data,ensure_ascii=False)}\n\n"
            text += token
        print(text)

    generate = predict()
    return StreamingResponse(generate, media_type="text/event-stream")


# TODO  需新增功能
#       1.注意增加文件检查
#       2.@某文件文档回答
#       3.文档段落索引返回
#       4.流式返回回答
#       5.已上传文档删除
#       6.思考ing
@app.post("/ragv1")
async def rag(item: rag_item):
    markdown_path = "uploads/demo.md"
    loader = UnstructuredMarkdownLoader(markdown_path)

    docs = loader.load()
    # print(docs)
    llm = Ollama(model=item.model)
    prompt = ChatPromptTemplate.from_template("""仅根据提供的上下文回答以下问题:

    <context>
    {context}
    </context>

    Question: {input}""")

    document_chain = prompt | llm

    result = document_chain.invoke({
        "input": item.request,
        "context": docs
    })

    return result



if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="127.0.0.1", port=8000)
