from flask import Flask, request, Response, render_template
from langchain_openai import ChatOpenAI
from langchain_community import ConversationBufferMemory
from langchain_community.chains import ConversationChain

app = Flask(__name__)

# 初始化大模型（支持流式）
llm = ChatOpenAI(model="gpt-3.5-turbo", streaming=True)

memory = ConversationBufferMemory()
conversation = ConversationChain(llm=llm, memory=memory, verbose=True)

@app.route("/")
def index():
    return render_template("index.html")

@app.route("/chat_stream", methods=["POST"])
def chat_stream():
    user_input = request.json.get("message")

    def generate():
        buffer = []
        for chunk in llm.stream(user_input):
            delta = chunk.content
            if delta:
                buffer.append(delta)
                yield f"data: {delta}\n\n"
        # 让记忆保存完整的回复
        full_reply = "".join(buffer)
        conversation.memory.chat_memory.add_user_message(user_input)
        conversation.memory.chat_memory.add_ai_message(full_reply)

    return Response(generate(), mimetype="text/event-stream")


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)
