from flask import Flask, request, jsonify, Response
from langchain_community.llms import Ollama
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
import requests
import os
from search import WebSearcher
import embedding_handle
from flask_cors import CORS  # 导入 CORS

app = Flask(__name__)
CORS(app)  # 启用 CORS

# 初始化 Ollama 模型
llm = Ollama(model="qwen2.5:7b", base_url="http://127.0.0.1:11434")
searcher = WebSearcher()

# 定义联网检索函数
def search_web(query):
    response = searcher.search_web(query=query, num_results=3)
    return  embedding_handle.rank_search_results(query, response.get('results', []))

# 定义提示模板
template = """你是一个基于最新数据的助手。请严格基于以下上下文回答问题，不要凭空猜测：
{context}

问题：{question}
"""
prompt = PromptTemplate(template=template, input_variables=["context", "question"])

# 定义流式响应函数
def generate_response(query):
    # 检索实时数据
    context = search_web(query)
    # 将上下文和问题传递给模型
    full_prompt = prompt.format(context=context, question=query)
    
    try:
        # 调用 Ollama API
        response = llm.invoke(full_prompt)
        print(f"----{response}")
        yield f"data: {response}\n\n"  # 使用 Server-Sent Events 格式qwen2.5:7b
    except Exception as e:
        yield f"data: 调用Ollama API时发生错误：{str(e)}\n\n"

@app.route('/ask', methods=['POST'])
def ask():
    data = request.get_json()
    question = data.get('question', '')
    
    if not question:
        return jsonify({"status": "error", "message": "问题不能为空"}), 400
    
    return Response(generate_response(question), mimetype='text/event-stream')

if __name__ == "__main__":
    app.run(host='localhost', port=9000) 