# libraries from flask import Flask, render_template, request, redirect, url_for, flash, session, send_from_directory import os import logging from utility.utils import extract_text_from_images, Data_Extractor, json_to_llm_str, process_extracted_text, process_resume_data from backup.backup import NER_Model from paddleocr import PaddleOCR # Configure logging logging.basicConfig( level=logging.INFO, handlers=[ logging.StreamHandler() # Remove FileHandler and log only to the console ] ) # Flask App app = Flask(__name__) app.secret_key = 'your_secret_key' app.config['UPLOAD_FOLDER'] = 'uploads/' app.config['RESULT_FOLDER'] = 'uploads/' UPLOAD_FOLDER = 'static/uploads/' RESULT_FOLDER = 'static/results/' os.makedirs(UPLOAD_FOLDER, exist_ok=True) os.makedirs(RESULT_FOLDER, exist_ok=True) if not os.path.exists(app.config['UPLOAD_FOLDER']): os.makedirs(app.config['UPLOAD_FOLDER']) if not os.path.exists(app.config['RESULT_FOLDER']): os.makedirs(app.config['RESULT_FOLDER']) # Set the PaddleOCR home directory to a writable location os.environ['PADDLEOCR_HOME'] = os.path.join(app.config['UPLOAD_FOLDER'], '.paddleocr') # Change made here @app.route('/') def index(): uploaded_files = session.get('uploaded_files', []) logging.info(f"Accessed index page, uploaded files: {uploaded_files}") return render_template('index.html', uploaded_files=uploaded_files) @app.route('/upload', methods=['POST']) def upload_file(): if 'files' not in request.files: flash('No file part') logging.warning("No file part found in the request") return redirect(request.url) files = request.files.getlist('files') # Get multiple files if not files or all(file.filename == '' for file in files): flash('No selected files') logging.warning("No files selected for upload") return redirect(request.url) uploaded_files = [] for file in files: if file: filename = file.filename file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) uploaded_files.append(filename) logging.info(f"Uploaded file: {filename}") session['uploaded_files'] = uploaded_files flash('Files successfully uploaded') logging.info(f"Files successfully uploaded: {uploaded_files}") return redirect(url_for('index')) @app.route('/remove_file') def remove_file(): uploaded_files = session.get('uploaded_files', []) for filename in uploaded_files: file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) if os.path.exists(file_path): # Check if the file exists before trying to remove it os.remove(file_path) logging.info(f"Removed file: {filename}") else: logging.warning(f"File not found for removal: {filename}") session.pop('uploaded_files', None) flash('Files successfully removed') logging.info("All uploaded files removed") return redirect(url_for('index')) @app.route('/process', methods=['POST']) def process_file(): uploaded_files = session.get('uploaded_files', []) if not uploaded_files: flash('No files selected for processing') logging.warning("No files selected for processing") return redirect(url_for('index')) # Create a list of file paths for the extracted text function file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files] logging.info(f"Processing files: {file_paths}") extracted_text = {} # Initialize extracted_text # Change made here try: # Extract text from all images extracted_text, processed_Img = extract_text_from_images(file_paths, RESULT_FOLDER) logging.info(f"Extracted text: {extracted_text}") logging.info(f"Processed images: {processed_Img}") # Call the Gemma model API and get the professional data llmText = json_to_llm_str(extracted_text) logging.info(f"LLM text: {llmText}") LLMdata = Data_Extractor(llmText) logging.info(f"LLM data: {LLMdata}") except Exception as e: logging.error(f"Error during LLM processing: {e}") logging.info("Running backup model...") # Run the backup model in case of an exception if extracted_text: # Ensure extracted_text has a value before using it # Change made here text = json_to_llm_str(extracted_text) LLMdata = NER_Model(text) logging.info(f"NER model data: {LLMdata}") else: logging.warning("No extracted text available for backup model") # Change made here cont_data = process_extracted_text(extracted_text) logging.info(f"Contextual data: {cont_data}") # Storing the parsed results processed_data = process_resume_data(LLMdata, cont_data, extracted_text) logging.info(f"Processed data: {processed_data}") session['processed_data'] = processed_data session['processed_Img'] = processed_Img flash('Data processed and analyzed successfully') logging.info("Data processed and analyzed successfully") return redirect(url_for('result')) @app.route('/result') def result(): processed_data = session.get('processed_data', {}) processed_Img = session.get('processed_Img', {}) logging.info(f"Displaying results: Data - {processed_data}, Images - {processed_Img}") return render_template('result.html', data=processed_data, Img=processed_Img) @app.route('/uploads/') def uploaded_file(filename): logging.info(f"Serving file: {filename}") return send_from_directory(app.config['UPLOAD_FOLDER'], filename) if __name__ == '__main__': logging.info("Starting Flask app") app.run(debug=True)