File size: 1,435 Bytes
4aed57a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
from flask import Flask, request, jsonify
from flask_cors import CORS
# This is done in the Dockerfile now, but leaving for local dev consistency
os.environ["XDG_CACHE_HOME"] = os.environ.get("XDG_CACHE_HOME", "/tmp/.cache")
# Import handlers from other server files
from whisper_server import handle_transcribe, model as whisper_model
from qgen_server import handle_generate_questions, qg_model
from qamatcher_server import handle_match_question, matcher_model
app = Flask(__name__)
# Configure CORS to allow all origins
CORS(app, resources={r"/*": {"origins": "*"}})
@app.route('/')
def index():
return jsonify({
'message': 'VoiceQ AI Server is running!',
'models_loaded': {
'whisper': whisper_model is not None,
'question-generator': qg_model is not None,
'question-matcher': matcher_model is not None,
}
})
@app.route('/transcribe', methods=['POST'])
def transcribe():
return handle_transcribe()
@app.route('/generate-questions', methods=['POST'])
def generate_questions():
return handle_generate_questions()
@app.route('/match-question', methods=['POST'])
def match_question():
return handle_match_question()
# The following block is for local development only and will not be used in the Docker container.
if __name__ == '__main__':
PORT = int(os.environ.get("PORT", 5001))
app.run(host='0.0.0.0', port=PORT, debug=True)
|