|
from flask import Flask, render_template,request, redirect,url_for, jsonify , session |
|
from helper_functions import predict_class , prepare_text , inference , predict , align_predictions_with_sentences , load_models |
|
import fitz |
|
import os, shutil |
|
import torch |
|
import tempfile |
|
from pydub import AudioSegment |
|
import logging |
|
|
|
app = Flask(__name__) |
|
app.config['UPLOAD_FOLDER'] = 'static/uploads' |
|
|
|
|
|
global_model = None |
|
global_neptune = None |
|
global_tokenizer = None |
|
global_pipe = None |
|
|
|
def init_app(): |
|
global global_model, global_neptune, global_pipe |
|
print("Loading models...") |
|
global_model, global_neptune, global_pipe = load_models() |
|
print("Models loaded successfully!") |
|
|
|
init_app() |
|
|
|
@app.route("/") |
|
def home(): |
|
predict_class = "" |
|
class_probabilities = dict() |
|
chart_data = dict() |
|
return render_template('pdf.html', class_probabilities= class_probabilities, predicted_class=predict_class,chart_data = chart_data) |
|
|
|
@app.route('/pdf') |
|
def pdf(): |
|
predict_class = "" |
|
class_probabilities = dict() |
|
chart_data = dict() |
|
return render_template('pdf.html', class_probabilities= class_probabilities, predicted_class=predict_class,chart_data = chart_data) |
|
|
|
@app.route('/pdf/upload' , methods = ['POST']) |
|
def treatment(): |
|
global global_model, global_tokenizer |
|
if request.method == 'POST' : |
|
|
|
file = request.files['file'] |
|
filename = file.filename |
|
|
|
|
|
filepath = app.config['UPLOAD_FOLDER'] + "/" + filename |
|
file.save(filepath) |
|
|
|
|
|
pdf_document = fitz.open(filepath) |
|
|
|
|
|
extracted_text = "" |
|
|
|
|
|
for page_num in range(len(pdf_document)): |
|
|
|
page = pdf_document.load_page(page_num) |
|
|
|
|
|
page_text = page.get_text() |
|
|
|
|
|
extracted_text += f"\nPage {page_num + 1}:\n{page_text}" |
|
|
|
|
|
pdf_document.close() |
|
|
|
predicted_class , class_probabilities = predict_class([extracted_text] , global_model) |
|
chart_data = { |
|
'datasets': [{ |
|
'data': list(class_probabilities.values()), |
|
'backgroundColor': [color[2] for color in class_probabilities.keys()], |
|
'borderColor': [color[2] for color in class_probabilities.keys()] |
|
}], |
|
'labels': [label[0] for label in class_probabilities.keys()] |
|
} |
|
print(predict_class) |
|
print(chart_data) |
|
|
|
for filename in os.listdir(app.config['UPLOAD_FOLDER']): |
|
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) |
|
try: |
|
if os.path.isfile(file_path) or os.path.islink(file_path): |
|
os.unlink(file_path) |
|
elif os.path.isdir(file_path): |
|
shutil.rmtree(file_path) |
|
except Exception as e: |
|
print('Failed to delete %s. Reason: %s' % (file_path, e)) |
|
return render_template('pdf.html',extracted_text = extracted_text, class_probabilities=class_probabilities, predicted_class=predicted_class, chart_data = chart_data) |
|
return render_template('pdf.html') |
|
|
|
|
|
|
|
@app.route('/sentence' , methods = ['GET' , 'POST']) |
|
def sentence(): |
|
global global_model, global_tokenizer |
|
if request.method == 'POST': |
|
|
|
text = [request.form['text']] |
|
predicted_class , class_probabilities = predict_class(text , global_model) |
|
|
|
chart_data = { |
|
'datasets': [{ |
|
'data': list(class_probabilities.values()), |
|
'backgroundColor': [color[2 ] for color in class_probabilities.keys()], |
|
'borderColor': [color[2] for color in class_probabilities.keys()] |
|
}], |
|
'labels': [label[0] for label in class_probabilities.keys()] |
|
} |
|
print(chart_data) |
|
|
|
for filename in os.listdir(app.config['UPLOAD_FOLDER']): |
|
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) |
|
try: |
|
if os.path.isfile(file_path) or os.path.islink(file_path): |
|
os.unlink(file_path) |
|
elif os.path.isdir(file_path): |
|
shutil.rmtree(file_path) |
|
except Exception as e: |
|
print('Failed to delete %s. Reason: %s' % (file_path, e)) |
|
return render_template('response_sentence.html', text=text, class_probabilities=class_probabilities, predicted_class=predicted_class,chart_data = chart_data) |
|
|
|
|
|
return render_template('sentence.html') |
|
|
|
|
|
@app.route("/voice_backup") |
|
def slu_backup(): |
|
input_file = "static/uploads/2022.jep-architectures-neuronales.pdf" |
|
|
|
pdf_document = fitz.open(input_file) |
|
|
|
extracted_text = "" |
|
|
|
for page_num in range(len(pdf_document)): |
|
|
|
page = pdf_document.load_page(page_num) |
|
|
|
|
|
page_text = page.get_text() |
|
|
|
|
|
extracted_text += f"\nPage {page_num + 1}:\n{page_text}" |
|
|
|
|
|
pdf_document.close() |
|
|
|
inference_batch, sentences = inference(extracted_text) |
|
predictions = predict(inference_batch) |
|
sentences_prediction = align_predictions_with_sentences(sentences, predictions) |
|
predicted_class , class_probabilities = predict_class([extracted_text] , global_model) |
|
|
|
chart_data = { |
|
'datasets': [{ |
|
'data': list(class_probabilities.values()), |
|
'backgroundColor': [color[2 ] for color in class_probabilities.keys()], |
|
'borderColor': [color[2] for color in class_probabilities.keys()] |
|
}], |
|
'labels': [label[0] for label in class_probabilities.keys()] |
|
} |
|
print(class_probabilities) |
|
print(chart_data) |
|
print(sentences_prediction) |
|
return render_template('voice_backup.html',extracted_text = extracted_text, class_probabilities=class_probabilities, predicted_class=predicted_class, chart_data = chart_data, sentences_prediction = sentences_prediction) |
|
|
|
logging.basicConfig(level=logging.DEBUG) |
|
|
|
@app.route("/voice", methods=['GET', 'POST']) |
|
def slu(): |
|
global global_neptune, global_pipe, global_model |
|
|
|
if request.method == 'POST': |
|
logging.debug("Received POST request") |
|
audio_file = request.files.get('audio') |
|
|
|
if audio_file: |
|
logging.debug(f"Received audio file: {audio_file.filename}") |
|
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_audio: |
|
audio_file.save(temp_audio) |
|
temp_audio_path = temp_audio.name |
|
|
|
logging.debug(f"Saved audio to temporary file: {temp_audio_path}") |
|
|
|
try: |
|
|
|
result = global_pipe(temp_audio_path) |
|
extracted_text = result["text"] |
|
logging.debug(f"Transcribed text: {extracted_text}") |
|
|
|
|
|
inference_batch, sentences = inference(extracted_text) |
|
predictions = predict(inference_batch, global_neptune) |
|
sentences_prediction = align_predictions_with_sentences(sentences, predictions) |
|
predicted_class, class_probabilities = predict_class([extracted_text], global_model) |
|
|
|
chart_data = { |
|
'datasets': [{ |
|
'data': list(class_probabilities.values()), |
|
'backgroundColor': [color[2] for color in class_probabilities.keys()], |
|
'borderColor': [color[2] for color in class_probabilities.keys()] |
|
}], |
|
'labels': [label[0] for label in class_probabilities.keys()] |
|
} |
|
|
|
response_data = { |
|
'extracted_text': extracted_text, |
|
'class_probabilities' : class_probabilities, |
|
'predicted_class': predicted_class, |
|
'chart_data': chart_data, |
|
'sentences_prediction': sentences_prediction |
|
} |
|
logging.debug(f"Prepared response data: {response_data}") |
|
|
|
return render_template('voice.html', |
|
class_probabilities= class_probabilities, |
|
predicted_class= predicted_class, |
|
chart_data= chart_data, |
|
sentences_prediction=sentences_prediction) |
|
|
|
except Exception as e: |
|
logging.error(f"Error processing audio: {str(e)}") |
|
return jsonify({'error': str(e)}), 500 |
|
|
|
finally: |
|
|
|
os.unlink(temp_audio_path) |
|
|
|
else: |
|
logging.error("No audio file received") |
|
return jsonify({'error': 'No audio file received'}), 400 |
|
|
|
|
|
logging.debug("Received GET request") |
|
return render_template('voice.html', |
|
class_probabilities={}, |
|
predicted_class=[""], |
|
chart_data={}, |
|
sentences_prediction={}) |
|
|
|
if __name__ == '__main__': |
|
app.run(debug=True) |