from flask import Flask, request, jsonify, render_template
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
# 假设您已经有了llm和retriever的初始化代码

app = Flask(__name__)

# 初始化问答链
from assessment_assistant import LangchainQA
qa_chain = LangchainQA().create_chain()


@app.route('/')
def index():
    return render_template('index.html')

@app.route('/ask', methods=['POST'])
def ask_question():
    try:
        data = request.get_json()
        question = data.get('question', '')
        
        if not question:
            return jsonify({'error': '问题不能为空'})
        
        # 调用问答链
        result = qa_chain({"question": question})
        
        # 处理源文档信息
        source_docs = []
        for doc in result.get('source_documents', []):
            source_docs.append({
                'content': doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content,
                'metadata': doc.metadata
            })
        
        response = {
            'answer': result['answer'],
            'sources': source_docs,
            'success': True
        }
        
        return jsonify(response)
    
    except Exception as e:
        return jsonify({'error': str(e), 'success': False})

if __name__ == '__main__':
    app.run(debug=True)