from fastapi import FastAPI, UploadFile, File
from pydantic import BaseModel
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
from langchain_community.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from fastapi.responses import StreamingResponse
from pdf_parser import handle_pdf_upload  # 导入处理 PDF 上传的函数
from pinecone.grpc import PineconeGRPC as Pinecone
app = FastAPI()

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 配置 Deepseek 模型
api_key = "sk-e929d8ee897344e69fc3e7e3e5d4f745"
api_url = "https://api.deepseek.com"
pc = Pinecone(api_key="pcsk_7FjwAR_HXQ9RNcBfMrbtRqHcojo6AEsZgNDzC5ZQedeBVhXHS7UpxpYLapgiL7o9ufbjUg")

# 初始化 Deepseek Chat 模型
llm = ChatOpenAI(
    model_name="deepseek-chat",
    openai_api_key=api_key,
    openai_api_base=api_url,
    temperature=0,
    streaming=True  # 启用流式输出
)
def build_prompt(query, knowledge_results):

    # 将知识片段拼接成字符串
    # knowledge_text = "\n".join([result["metadata"]["text"] for result in knowledge_results])
    
    # 构建提示词模板
    prompt = f"""
    你是一个智能助手，请根据以下相关知识回答用户的问题。

    相关知识：
    {knowledge_results}

    用户问题：
    {query}

    请根据相关知识回答问题：
    """
    return prompt

class Question(BaseModel):
    question: str


# 要想调试需要用docs里传递消息才行
@app.post("/upload")
async def upload_file(file: UploadFile = File(...)):
    # 处理文件上传，将文档添加到知识库
    try:
        num_chunks = handle_pdf_upload(file)  # 调用处理 PDF 的函数
        return {"message": f"文件上传成功，处理了 {num_chunks} 个文本块"}
    except Exception as e:
        return {"error": "处理文件时出现错误", "details": str(e)}, 500


# 聊天接口
@app.post("/chat")
async def chat(question: Question):
    try:
        # 将用户问题转换为向量
        query_embedding = pc.inference.embed(
            model="multilingual-e5-large",
            inputs=[question.question],
            parameters={"input_type": "query"}
        )
        index = pc.Index("chatbot")
        # 在 Pinecone 中搜索相似的知识片段，有可能完全没有关系的向量也会返回，这要reank或在做处理
        results = index.query(
            namespace="example-namespace",
            vector=query_embedding[0].values,
            top_k=3,  # 返回最相似的 3 个知识片段
            include_values=False,
            include_metadata=True
        )
        # 提取每个匹配项的 text 字段
        texts = [match['metadata']['text'] for match in results['matches']]

        # 将提取的文本拼接成一个字符串
        combined_text = "\n".join(texts)

        # 输出结果
        print(combined_text)

        # 构建提示词模板
        prompt = build_prompt(question.question, combined_text)

        # 创建消息
        messages = [HumanMessage(content=prompt)]

        # 生成流式响应
        # def generate():
        #     for response in llm.stream(messages):
        #         print(response.content)
        #         yield response.content + "\n"
        #
        # response = StreamingResponse(generate(), media_type="text/plain")
        # # print(response)
        # # return StreamingResponse(generate(), media_type="text/plain")
        # return response
        async def generate():
            full_response = ""  # 用于存储完整的响应内容
            async for response in llm.astream(messages):  # 假设 llm 支持异步流式调用
                chunk = response.content + "\n"
                full_response += chunk  # 将每个 chunk 添加到完整响应中
                yield chunk  # 逐步返回 chunk

            # 在生成器结束后打印完整的响应内容
            full_response = full_response.replace("\n", "")  # 去除所有换行符
            print("Full response:", full_response)

        response = StreamingResponse(generate(), media_type="text/plain")
        return response
    except Exception as e:
        print(f"Error in chat endpoint: {str(e)}")
        return {"error": "处理您的问题时出现错误，请稍后重试。", "details": str(e)}, 500

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=9002)
    # 注意这里的port是9002，前端访问的也应该是9002才能投到fastapi端口