import os
import pandas as pd
from flask import Flask, request, jsonify
from domain_knowledge import KnowledgeBase
from reader import Reader

app = Flask(__name__)

# 假设我们已经有了一个加载的语料库DataFrame
# 读取原始领域内语料
data_dir = "./datas"
file_name = "NIOPhone产品咨询类FAQ-达芬奇产品咨询类.csv"
file_excel_name = "NIOPhone产品咨询类FAQ.xlsx"
file_path = os.path.join(data_dir, file_name)
file_excel_path = os.path.join(data_dir, file_excel_name)
df = pd.read_csv(file_path, sep=',', encoding='utf-8')
corpus_df = df[['咨询问题（问题主干）', '话术', '关机']]
corpus_df.rename(columns={'咨询问题（问题主干）': 'question', '话术': 'answer', '关机': 'question_type'}, inplace=True)
print("raw df.shape=", df.shape)

embedding_model_name = "/b4-ai/share_model_zoo/embedding/BAAI/bge-base-zh-v1.5/"
llm_model_name = "/b4-ai/share_model_zoo/LLM/Qwen/Qwen1.5-4B-Chat"

device = "cuda"
model_service_type = "local" # 以防本地loading model 失败导致的接口异常。
knowledge_engine = KnowledgeBase(corpus_df, embedding_model_name, device)
reader = Reader(llm_model_name, device, model_service_type=model_service_type)


@app.route('/query', methods=['POST'])
def query():
    user_question = request.json.get('question')
    retrieved_docs = knowledge_engine.retrieved_docs_vector(user_question)
    answer = reader.rag_with_llm(retrieved_docs, user_question, model_service_type)
    return jsonify({"question": user_question, "answer": answer})


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5601)
