Update app.py
Browse files
app.py
CHANGED
@@ -5,24 +5,19 @@ import os
|
|
5 |
from PIL import Image
|
6 |
import tempfile
|
7 |
import json
|
8 |
-
import logging
|
9 |
-
import sqlite3
|
10 |
-
from datetime import datetime
|
11 |
-
import base64
|
12 |
-
import io
|
13 |
-
import time
|
14 |
|
15 |
app = Flask(__name__)
|
16 |
|
17 |
-
# Configure logging
|
18 |
logging.basicConfig(level=logging.INFO)
|
19 |
|
20 |
# Configuration de l'API Gemini
|
21 |
token = os.environ.get("TOKEN")
|
22 |
if not token:
|
23 |
logging.error("API Token (TOKEN environment variable) not found!")
|
24 |
-
|
25 |
-
|
26 |
genai_client = genai.Client(api_key=token)
|
27 |
|
28 |
SAFETY_SETTINGS = [
|
@@ -44,89 +39,6 @@ SAFETY_SETTINGS = [
|
|
44 |
)
|
45 |
]
|
46 |
|
47 |
-
# --- Database Setup ---
|
48 |
-
def init_db():
|
49 |
-
"""Initialize the SQLite database"""
|
50 |
-
conn = sqlite3.connect('analysis_data.db')
|
51 |
-
cursor = conn.cursor()
|
52 |
-
|
53 |
-
cursor.execute('''
|
54 |
-
CREATE TABLE IF NOT EXISTS analyses (
|
55 |
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
56 |
-
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
57 |
-
image_name TEXT,
|
58 |
-
image_data TEXT,
|
59 |
-
gemini_file_uri TEXT,
|
60 |
-
consignes TEXT,
|
61 |
-
use_deepthink BOOLEAN,
|
62 |
-
model_used TEXT,
|
63 |
-
tableau_output TEXT,
|
64 |
-
dissertation_output TEXT,
|
65 |
-
user_ip TEXT,
|
66 |
-
processing_time REAL
|
67 |
-
)
|
68 |
-
''')
|
69 |
-
|
70 |
-
conn.commit()
|
71 |
-
conn.close()
|
72 |
-
|
73 |
-
def save_analysis_data(image_name, image_data, gemini_file_uri, consignes, use_deepthink, model_used, tableau_output, dissertation_output, user_ip, processing_time):
|
74 |
-
"""Save analysis data to database"""
|
75 |
-
conn = sqlite3.connect('analysis_data.db')
|
76 |
-
cursor = conn.cursor()
|
77 |
-
|
78 |
-
cursor.execute('''
|
79 |
-
INSERT INTO analyses
|
80 |
-
(image_name, image_data, gemini_file_uri, consignes, use_deepthink, model_used, tableau_output, dissertation_output, user_ip, processing_time)
|
81 |
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
82 |
-
''', (image_name, image_data, gemini_file_uri, consignes, use_deepthink, model_used, tableau_output, dissertation_output, user_ip, processing_time))
|
83 |
-
|
84 |
-
conn.commit()
|
85 |
-
conn.close()
|
86 |
-
|
87 |
-
def get_all_analyses():
|
88 |
-
"""Get all analyses from database"""
|
89 |
-
conn = sqlite3.connect('analysis_data.db')
|
90 |
-
cursor = conn.cursor()
|
91 |
-
|
92 |
-
cursor.execute('''
|
93 |
-
SELECT id, timestamp, image_name, consignes, use_deepthink, model_used,
|
94 |
-
tableau_output, dissertation_output, user_ip, processing_time, gemini_file_uri
|
95 |
-
FROM analyses
|
96 |
-
ORDER BY timestamp DESC
|
97 |
-
''')
|
98 |
-
|
99 |
-
analyses = cursor.fetchall()
|
100 |
-
conn.close()
|
101 |
-
|
102 |
-
return analyses
|
103 |
-
|
104 |
-
def get_analysis_image(analysis_id):
|
105 |
-
"""Get image data for a specific analysis"""
|
106 |
-
conn = sqlite3.connect('analysis_data.db')
|
107 |
-
cursor = conn.cursor()
|
108 |
-
|
109 |
-
cursor.execute('SELECT image_data FROM analyses WHERE id = ?', (analysis_id,))
|
110 |
-
result = cursor.fetchone()
|
111 |
-
conn.close()
|
112 |
-
|
113 |
-
return result[0] if result else None
|
114 |
-
|
115 |
-
def cleanup_gemini_file(file_uri):
|
116 |
-
"""Clean up uploaded Gemini file"""
|
117 |
-
try:
|
118 |
-
# Extract file name from URI for deletion
|
119 |
-
if file_uri and file_uri.startswith('https://generativelanguage.googleapis.com'):
|
120 |
-
file_name = file_uri.split('/')[-1]
|
121 |
-
genai_client.files.delete(name=file_name)
|
122 |
-
logging.info(f"Cleaned up Gemini file: {file_name}")
|
123 |
-
except Exception as e:
|
124 |
-
logging.warning(f"Could not clean up Gemini file {file_uri}: {e}")
|
125 |
-
|
126 |
-
# Initialize database on startup
|
127 |
-
init_db()
|
128 |
-
|
129 |
-
|
130 |
# --- Prompts (Keep them as they are) ---
|
131 |
prompt_tableau = """
|
132 |
Traite selon cette méthodologie :
|
@@ -208,20 +120,20 @@ Voici un exercice a trous présentant la rédaction.. référence textuelle= rep
|
|
208 |
|
209 |
EXERCICE À TROUS
|
210 |
|
211 |
-
Le thème de ... (thème du texte) a souvent fait l'objet de nombreuses préoccupations dans le monde littéraire (mais pas que). C
|
212 |
|
213 |
-
Dans son extrait (poème), l
|
214 |
|
215 |
-
S'agissant de ... (sous-axe 1), l
|
216 |
De plus, l'homme de lettres emploie ... (outil d'analyse 1 + référence textuelle) pour ... (interprétation). Il se sert aussi de ...(outil d'analyse 2 + référence textuelle) afin de ... (interprétation). Pour continuer sa (description, représentation), le ...(nationalité de l'auteur) se manque pas de faire recours à ... (outil d'analyse 3 référence textuelle) Ici, il s'agit pour l'auteur autour de ... (sous-axe 1) et ... (sous-axe 2).
|
217 |
|
218 |
Après avoir démontré ... (axe 1), voyons à présent ... (axe 2).
|
219 |
|
220 |
-
En second lieu, le poète (l
|
221 |
-
En parlant de ... (sous-axe 2), l'auteur met l'accent en premier sur… (interprétation), comme nous pouvons le voir avec la récurrence de (du/des) ... (rappel du sous-axe 2), le poète (l'auteur) souligne ... (interprétation) toujours dans le même sens de ... (rappel du sous-axe 2) . Il use de ... (outil d
|
222 |
Ainsi, ... (axe 2) est lié (e) à ... (sous-axe 1) et à ... (sous-axe 2).
|
223 |
|
224 |
-
Somme toute, ... (titre du texte) organise son sens autour de … (axe 1) et de ... (axe 2). De ces deux centres d
|
225 |
"""
|
226 |
# --- End Prompts ---
|
227 |
|
@@ -230,42 +142,22 @@ generate_config = types.GenerateContentConfig(
|
|
230 |
)
|
231 |
|
232 |
# --- Model IDs ---
|
233 |
-
MODEL_ID_STANDARD = "gemini-2.5-flash"
|
234 |
-
MODEL_ID_DEEPTHINK = "gemini-2.5-flash"
|
235 |
# --- End Model IDs ---
|
236 |
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
logging.info(f"Uploading image to Gemini: {image_path}")
|
241 |
-
uploaded_file = genai_client.files.upload(file=image_path)
|
242 |
-
|
243 |
-
# Wait for file to be processed
|
244 |
-
while uploaded_file.state == 'PROCESSING':
|
245 |
-
logging.info("File still processing, waiting...")
|
246 |
-
time.sleep(1)
|
247 |
-
uploaded_file = genai_client.files.get(name=uploaded_file.name)
|
248 |
-
|
249 |
-
if uploaded_file.state == 'FAILED':
|
250 |
-
raise Exception(f"File upload failed: {uploaded_file.error}")
|
251 |
-
|
252 |
-
logging.info(f"File uploaded successfully: {uploaded_file.uri}")
|
253 |
-
return uploaded_file
|
254 |
-
except Exception as e:
|
255 |
-
logging.error(f"Error uploading file to Gemini: {e}")
|
256 |
-
raise
|
257 |
-
|
258 |
-
def generate_table_stream(uploaded_file, consignes="", model_id=MODEL_ID_STANDARD):
|
259 |
-
"""Génère le tableau d'analyse à partir de l'image uploadée"""
|
260 |
prompt = prompt_tableau
|
261 |
if consignes:
|
262 |
-
prompt += "\n\nConsignes supplémentaires de l'utilisateur :\n" + consignes
|
263 |
|
264 |
logging.info(f"Generating table using model: {model_id}")
|
265 |
try:
|
266 |
response_stream = genai_client.models.generate_content_stream(
|
267 |
-
model=model_id,
|
268 |
-
contents=[prompt,
|
269 |
config=generate_config
|
270 |
)
|
271 |
|
@@ -276,6 +168,7 @@ def generate_table_stream(uploaded_file, consignes="", model_id=MODEL_ID_STANDAR
|
|
276 |
logging.error(f"Error during table generation stream: {e}", exc_info=True)
|
277 |
yield json.dumps({"type": "error", "error": f"Erreur lors de la génération du tableau: {e}"}) + "\n"
|
278 |
|
|
|
279 |
def generate_dissertation_stream(tableau, model_id=MODEL_ID_STANDARD):
|
280 |
"""Génère la dissertation basée sur le tableau"""
|
281 |
prompt = f"""
|
@@ -285,13 +178,13 @@ def generate_dissertation_stream(tableau, model_id=MODEL_ID_STANDARD):
|
|
285 |
{tableau}
|
286 |
--- Fin du Tableau ---
|
287 |
|
288 |
-
Écris maintenant une rédaction structurée pour le commentaire composé basé EXCLUSIVEMENT sur le tableau d'analyse fourni ci-dessus.
|
289 |
"""
|
290 |
|
291 |
logging.info(f"Generating dissertation using model: {model_id}")
|
292 |
try:
|
293 |
response_stream = genai_client.models.generate_content_stream(
|
294 |
-
model=model_id,
|
295 |
contents=prompt,
|
296 |
config=generate_config
|
297 |
)
|
@@ -303,55 +196,19 @@ def generate_dissertation_stream(tableau, model_id=MODEL_ID_STANDARD):
|
|
303 |
logging.error(f"Error during dissertation generation stream: {e}", exc_info=True)
|
304 |
yield json.dumps({"type": "error", "error": f"Erreur lors de la génération de la dissertation: {e}"}) + "\n"
|
305 |
|
|
|
|
|
|
|
306 |
# --- Flask Routes ---
|
307 |
@app.route('/')
|
308 |
def index():
|
|
|
309 |
return render_template('index.html')
|
310 |
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
@app.route('/api/analyses')
|
317 |
-
def get_analyses():
|
318 |
-
"""API endpoint to get all analyses data"""
|
319 |
-
try:
|
320 |
-
analyses = get_all_analyses()
|
321 |
-
analyses_data = []
|
322 |
-
|
323 |
-
for analysis in analyses:
|
324 |
-
analyses_data.append({
|
325 |
-
'id': analysis[0],
|
326 |
-
'timestamp': analysis[1],
|
327 |
-
'image_name': analysis[2],
|
328 |
-
'consignes': analysis[3],
|
329 |
-
'use_deepthink': analysis[4],
|
330 |
-
'model_used': analysis[5],
|
331 |
-
'tableau_output': analysis[6],
|
332 |
-
'dissertation_output': analysis[7],
|
333 |
-
'user_ip': analysis[8],
|
334 |
-
'processing_time': analysis[9],
|
335 |
-
'gemini_file_uri': analysis[10] if len(analysis) > 10 else None
|
336 |
-
})
|
337 |
-
|
338 |
-
return jsonify(analyses_data)
|
339 |
-
except Exception as e:
|
340 |
-
logging.error(f"Error fetching analyses: {e}")
|
341 |
-
return jsonify({'error': 'Erreur lors de la récupération des données'}), 500
|
342 |
-
|
343 |
-
@app.route('/api/analysis/<int:analysis_id>/image')
|
344 |
-
def get_analysis_image_endpoint(analysis_id):
|
345 |
-
"""API endpoint to get image for a specific analysis"""
|
346 |
-
try:
|
347 |
-
image_data = get_analysis_image(analysis_id)
|
348 |
-
if image_data:
|
349 |
-
return Response(base64.b64decode(image_data), mimetype='image/jpeg')
|
350 |
-
else:
|
351 |
-
return jsonify({'error': 'Image non trouvée'}), 404
|
352 |
-
except Exception as e:
|
353 |
-
logging.error(f"Error fetching image: {e}")
|
354 |
-
return jsonify({'error': 'Erreur lors de la récupération de l\'image'}), 500
|
355 |
|
356 |
@app.route('/analyze', methods=['POST'])
|
357 |
def analyze():
|
@@ -361,173 +218,127 @@ def analyze():
|
|
361 |
|
362 |
image_file = request.files['image']
|
363 |
consignes = request.form.get("consignes", "")
|
|
|
364 |
use_deepthink = request.form.get('use_deepthink', 'false').lower() == 'true'
|
365 |
|
366 |
# Select model based on the flag
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
try:
|
378 |
-
# Create temporary file
|
|
|
379 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
# Validate image
|
384 |
try:
|
385 |
-
image = Image.open(
|
386 |
-
image.verify()
|
387 |
-
|
388 |
-
|
389 |
-
# Convert image to base64 for storage
|
390 |
-
img_buffer = io.BytesIO()
|
391 |
-
|
392 |
-
# Convert RGBA to RGB if necessary for JPEG compatibility
|
393 |
-
if image.mode in ('RGBA', 'LA', 'P'):
|
394 |
-
# Create white background
|
395 |
-
background = Image.new('RGB', image.size, (255, 255, 255))
|
396 |
-
if image.mode == 'P':
|
397 |
-
image = image.convert('RGBA')
|
398 |
-
background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
|
399 |
-
image = background
|
400 |
-
elif image.mode != 'RGB':
|
401 |
-
image = image.convert('RGB')
|
402 |
-
|
403 |
-
image.save(img_buffer, format='JPEG', quality=95)
|
404 |
-
image_base64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
|
405 |
-
|
406 |
except (IOError, SyntaxError) as e:
|
407 |
logging.error(f"Invalid image file uploaded: {e}")
|
408 |
return jsonify({'error': f'Invalid or corrupted image file: {e}'}), 400
|
409 |
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
try:
|
423 |
-
logging.info("Starting table generation stream...")
|
424 |
-
|
425 |
-
# Phase 1: Génération du tableau
|
426 |
-
for chunk_json in generate_table_stream(uploaded_file, consignes, model_id):
|
427 |
-
try:
|
428 |
-
chunk_data = json.loads(chunk_json)
|
429 |
-
if chunk_data.get("type") == "error":
|
430 |
-
logging.error(f"Error received from table stream: {chunk_data.get('error')}")
|
431 |
-
yield chunk_json
|
432 |
-
return
|
433 |
-
elif chunk_data.get("type") == "tableau":
|
434 |
-
full_tableau_content += chunk_data.get("chunk", "")
|
435 |
-
yield chunk_json
|
436 |
-
except json.JSONDecodeError:
|
437 |
-
logging.error(f"Received invalid JSON from table stream: {chunk_json}")
|
438 |
-
yield json.dumps({"type": "error", "error": "Invalid data received during table generation."}) + "\n"
|
439 |
-
return
|
440 |
-
|
441 |
-
logging.info("Table generation stream finished.")
|
442 |
-
logging.info("Starting dissertation generation stream...")
|
443 |
-
|
444 |
-
# Phase 2: Génération de la dissertation
|
445 |
-
if full_tableau_content:
|
446 |
-
for chunk_json in generate_dissertation_stream(full_tableau_content, model_id):
|
447 |
try:
|
448 |
chunk_data = json.loads(chunk_json)
|
449 |
if chunk_data.get("type") == "error":
|
450 |
-
logging.error(f"Error received from
|
451 |
-
yield chunk_json
|
452 |
-
|
453 |
-
|
454 |
-
|
|
|
455 |
except json.JSONDecodeError:
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
463 |
|
464 |
-
# Save analysis data to database
|
465 |
-
try:
|
466 |
-
end_time = datetime.now()
|
467 |
-
processing_time = (end_time - start_time).total_seconds()
|
468 |
-
|
469 |
-
save_analysis_data(
|
470 |
-
image_name=image_file.filename,
|
471 |
-
image_data=image_base64,
|
472 |
-
gemini_file_uri=uploaded_file.uri,
|
473 |
-
consignes=consignes,
|
474 |
-
use_deepthink=use_deepthink,
|
475 |
-
model_used=model_id,
|
476 |
-
tableau_output=full_tableau_content,
|
477 |
-
dissertation_output=full_dissertation_content,
|
478 |
-
user_ip=user_ip,
|
479 |
-
processing_time=processing_time
|
480 |
-
)
|
481 |
-
logging.info(f"Analysis data saved successfully for image: {image_file.filename}")
|
482 |
-
except Exception as save_error:
|
483 |
-
logging.error(f"Error saving analysis data: {save_error}")
|
484 |
-
|
485 |
-
except Exception as e:
|
486 |
-
logging.error(f"Error during streaming generation: {e}", exc_info=True)
|
487 |
-
yield json.dumps({"type": "error", "error": f"Une erreur interne est survenue: {e}"}) + "\n"
|
488 |
-
|
489 |
-
# Create response with cleanup
|
490 |
-
response = Response(generate(), content_type='text/event-stream')
|
491 |
-
|
492 |
-
# Setup cleanup function
|
493 |
-
def cleanup():
|
494 |
-
# Clean up temporary file
|
495 |
-
if temp_file_path and os.path.exists(temp_file_path):
|
496 |
-
try:
|
497 |
-
os.unlink(temp_file_path)
|
498 |
-
logging.info(f"Cleaned up temp file: {temp_file_path}")
|
499 |
-
except OSError as e:
|
500 |
-
logging.error(f"Error cleaning up temp file: {e}")
|
501 |
-
|
502 |
-
# Clean up Gemini file (optional - files auto-expire)
|
503 |
-
if uploaded_file:
|
504 |
-
try:
|
505 |
-
genai_client.files.delete(name=uploaded_file.name)
|
506 |
-
logging.info(f"Cleaned up Gemini file: {uploaded_file.name}")
|
507 |
except Exception as e:
|
508 |
-
logging.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
509 |
|
510 |
-
|
511 |
-
|
512 |
|
513 |
except Exception as e:
|
514 |
-
#
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
try:
|
524 |
-
genai_client.files.delete(name=uploaded_file.name)
|
525 |
-
logging.info(f"Cleaned up Gemini file after error: {uploaded_file.name}")
|
526 |
-
except Exception:
|
527 |
-
pass
|
528 |
-
|
529 |
-
logging.error(f"Error processing upload: {e}", exc_info=True)
|
530 |
return jsonify({'error': f'Error processing file: {e}'}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
531 |
|
532 |
if __name__ == '__main__':
|
533 |
-
|
|
|
|
|
|
5 |
from PIL import Image
|
6 |
import tempfile
|
7 |
import json
|
8 |
+
import logging # Optional: for better logging
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
app = Flask(__name__)
|
11 |
|
12 |
+
# Configure logging (optional but recommended)
|
13 |
logging.basicConfig(level=logging.INFO)
|
14 |
|
15 |
# Configuration de l'API Gemini
|
16 |
token = os.environ.get("TOKEN")
|
17 |
if not token:
|
18 |
logging.error("API Token (TOKEN environment variable) not found!")
|
19 |
+
# Handle the error appropriately - maybe exit or raise an exception
|
20 |
+
# For now, we'll let it potentially fail later if the client is used without a key
|
21 |
genai_client = genai.Client(api_key=token)
|
22 |
|
23 |
SAFETY_SETTINGS = [
|
|
|
39 |
)
|
40 |
]
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# --- Prompts (Keep them as they are) ---
|
43 |
prompt_tableau = """
|
44 |
Traite selon cette méthodologie :
|
|
|
120 |
|
121 |
EXERCICE À TROUS
|
122 |
|
123 |
+
Le thème de ... (thème du texte) a souvent fait l'objet de nombreuses préoccupations dans le monde littéraire (mais pas que). C’est dans ce cadre que s'inscrit l'extrait ... (titre du texte) qui fait l'objet de notre étude, de l'écrivain... (nom de l'auteur), tiré de son œuvre ... (préciser le genre littéraire de l'œuvre) ... (maison d'édition), en ... (date de publication) à la (aux) page (s) …. Dans ce texte (type du texte) à ton ... (tonalité du texte/facultative), structure, nous verrons en premier lieu,...(axe 1) et en second lieu, ...(axe 2).
|
124 |
|
125 |
+
Dans son extrait (poème), l’auteur met en relief ... (axe 1) à travers ... (sous-axe 1) et ... (sous-axe 2).
|
126 |
|
127 |
+
S'agissant de ... (sous-axe 1), l’écrivain utilise ... (outil d'analyse 1 + référence textuelle) pour montrer ... (interprétation). Aussi (de plus), par l'usage de... (outil d'analyse 2 + référence textuelle), l'écrivain ... (interprétation). Mieux encore, ... (outil d'analyse 3 + référence textuelle) nous donne également la possibilité d’appréhender ... (sous-axe 2).
|
128 |
De plus, l'homme de lettres emploie ... (outil d'analyse 1 + référence textuelle) pour ... (interprétation). Il se sert aussi de ...(outil d'analyse 2 + référence textuelle) afin de ... (interprétation). Pour continuer sa (description, représentation), le ...(nationalité de l'auteur) se manque pas de faire recours à ... (outil d'analyse 3 référence textuelle) Ici, il s'agit pour l'auteur autour de ... (sous-axe 1) et ... (sous-axe 2).
|
129 |
|
130 |
Après avoir démontré ... (axe 1), voyons à présent ... (axe 2).
|
131 |
|
132 |
+
En second lieu, le poète (l’écrivain ou l’homme de lettres) met en exergue ... (axe 2) en s’appuyant d’une part, sur... (sous-axe 1) et d’autre part, sur... (sous-axe 2). En ce qui concerne ... (sous-axe 1), l'homme de lettres met d'abord en évidence l'aspect (le caractère) ... (interprétation) comme en témoigne l'emploi (l'usage de) ... (outil d’analyse 1 + référence textuelle). Ensuite, ... (outil d'analyse 2 + référence textuelle) dévoile que... (interprétation) Enfin, ... (outil d'analyse 3 + référence textuelle) suggère que… (interprétation) . ... (axe 2) se révèle grâce à ….
|
133 |
+
En parlant de ... (sous-axe 2), l'auteur met l'accent en premier sur… (interprétation), comme nous pouvons le voir avec la récurrence de (du/des) ... (rappel du sous-axe 2), le poète (l'auteur) souligne ... (interprétation) toujours dans le même sens de ... (rappel du sous-axe 2) . Il use de ... (outil d’analyse 2 + référence textuelle). Dès lors, on peut déduire que ...(interprétation) utilise ... (outil d’analyse 3 + référence textuelle).
|
134 |
Ainsi, ... (axe 2) est lié (e) à ... (sous-axe 1) et à ... (sous-axe 2).
|
135 |
|
136 |
+
Somme toute, ... (titre du texte) organise son sens autour de … (axe 1) et de ... (axe 2). De ces deux centres d’intérêt découlent respectivement, d’une part, … (sous-axe 1 de l'axe 1) et ... (sous-axe 2 de l'axe 1) et, d’autre part, … (sous-axe 1 de l'axe 2) et … (sous-axe 2 de l'axe 2). À travers ce texte, ...(nom de l'auteur) nous ... (opinion personnelle). Une telle optique est perceptible dans la logique de... (nom de l'auteur nous permettant de faire un rapprochement thématique), dans son œuvre ...(titre de l’œuvre), dans lequel il aborde… (bref résumé de l'œuvre en question qui peut être facultatif).
|
137 |
"""
|
138 |
# --- End Prompts ---
|
139 |
|
|
|
142 |
)
|
143 |
|
144 |
# --- Model IDs ---
|
145 |
+
MODEL_ID_STANDARD = "gemini-2.5-flash" # Default model
|
146 |
+
MODEL_ID_DEEPTHINK = "gemini-2.5-flash" # Advanced model for DeepThink
|
147 |
# --- End Model IDs ---
|
148 |
|
149 |
+
# --- Stream Generation Functions ---
|
150 |
+
def generate_table_stream(image, consignes="", model_id=MODEL_ID_STANDARD):
|
151 |
+
"""Génère le tableau d'analyse à partir de l'image en intégrant éventuellement des consignes"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
prompt = prompt_tableau
|
153 |
if consignes:
|
154 |
+
prompt += "\n\nConsignes supplémentaires de l'utilisateur :\n" + consignes # Make consignes clearer in prompt
|
155 |
|
156 |
logging.info(f"Generating table using model: {model_id}")
|
157 |
try:
|
158 |
response_stream = genai_client.models.generate_content_stream(
|
159 |
+
model=model_id, # Use the passed model_id
|
160 |
+
contents=[prompt, image],
|
161 |
config=generate_config
|
162 |
)
|
163 |
|
|
|
168 |
logging.error(f"Error during table generation stream: {e}", exc_info=True)
|
169 |
yield json.dumps({"type": "error", "error": f"Erreur lors de la génération du tableau: {e}"}) + "\n"
|
170 |
|
171 |
+
|
172 |
def generate_dissertation_stream(tableau, model_id=MODEL_ID_STANDARD):
|
173 |
"""Génère la dissertation basée sur le tableau"""
|
174 |
prompt = f"""
|
|
|
178 |
{tableau}
|
179 |
--- Fin du Tableau ---
|
180 |
|
181 |
+
Écris maintenant une rédaction structurée pour le commentaire composé basé EXCLUSIVEMENT sur le tableau d'analyse fourni ci-dessus, en suivant strictement le modèle de l'exercice à trous présenté dans la première partie de ce prompt. Assure-toi d'utiliser les "Repérage / Citation" du tableau quand le modèle le demande.
|
182 |
"""
|
183 |
|
184 |
logging.info(f"Generating dissertation using model: {model_id}")
|
185 |
try:
|
186 |
response_stream = genai_client.models.generate_content_stream(
|
187 |
+
model=model_id, # Use the passed model_id
|
188 |
contents=prompt,
|
189 |
config=generate_config
|
190 |
)
|
|
|
196 |
logging.error(f"Error during dissertation generation stream: {e}", exc_info=True)
|
197 |
yield json.dumps({"type": "error", "error": f"Erreur lors de la génération de la dissertation: {e}"}) + "\n"
|
198 |
|
199 |
+
# --- End Stream Generation Functions ---
|
200 |
+
|
201 |
+
|
202 |
# --- Flask Routes ---
|
203 |
@app.route('/')
|
204 |
def index():
|
205 |
+
# Assuming your main HTML file is index.html
|
206 |
return render_template('index.html')
|
207 |
|
208 |
+
# Removed /free route as it wasn't mentioned in the final HTML
|
209 |
+
# @app.route('/free')
|
210 |
+
# def free():
|
211 |
+
# return render_template('free.html')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
@app.route('/analyze', methods=['POST'])
|
214 |
def analyze():
|
|
|
218 |
|
219 |
image_file = request.files['image']
|
220 |
consignes = request.form.get("consignes", "")
|
221 |
+
# Check for the DeepThink flag
|
222 |
use_deepthink = request.form.get('use_deepthink', 'false').lower() == 'true'
|
223 |
|
224 |
# Select model based on the flag
|
225 |
+
if use_deepthink:
|
226 |
+
model_id = MODEL_ID_DEEPTHINK
|
227 |
+
logging.info("DeepThink requested, using advanced model.")
|
228 |
+
else:
|
229 |
+
model_id = MODEL_ID_STANDARD
|
230 |
+
logging.info("Standard analysis requested, using standard model.")
|
231 |
+
|
232 |
+
# Use a temporary file to handle the image upload
|
233 |
+
temp_file = None # Initialize to None
|
|
|
234 |
try:
|
235 |
+
# Create a temporary file with a specific suffix if needed, or let NamedTemporaryFile handle it
|
236 |
+
# Using 'delete=False' requires manual cleanup
|
237 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
|
238 |
+
image_file.save(temp_file.name)
|
239 |
+
# Ensure image is valid before proceeding
|
|
|
|
|
240 |
try:
|
241 |
+
image = Image.open(temp_file.name)
|
242 |
+
image.verify() # Verify image header
|
243 |
+
# Re-open after verify
|
244 |
+
image = Image.open(temp_file.name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
except (IOError, SyntaxError) as e:
|
246 |
logging.error(f"Invalid image file uploaded: {e}")
|
247 |
return jsonify({'error': f'Invalid or corrupted image file: {e}'}), 400
|
248 |
|
249 |
+
# Now 'image' holds the PIL Image object
|
250 |
+
# We need to pass the image object to the stream function
|
251 |
+
# Note: google.generativeai often works directly with PIL Image objects
|
252 |
+
|
253 |
+
@stream_with_context
|
254 |
+
def generate():
|
255 |
+
temp_file_path = temp_file.name # Store path for finally block
|
256 |
+
full_tableau_content = "" # Accumulate full table content for dissertation
|
257 |
+
try:
|
258 |
+
logging.info("Starting table generation stream...")
|
259 |
+
# Phase 1: Génération du tableau, passing the selected model_id
|
260 |
+
for chunk_json in generate_table_stream(image, consignes, model_id):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
try:
|
262 |
chunk_data = json.loads(chunk_json)
|
263 |
if chunk_data.get("type") == "error":
|
264 |
+
logging.error(f"Error received from table stream: {chunk_data.get('error')}")
|
265 |
+
yield chunk_json # Forward the error to the client
|
266 |
+
return # Stop generation if table fails
|
267 |
+
elif chunk_data.get("type") == "tableau":
|
268 |
+
full_tableau_content += chunk_data.get("chunk", "")
|
269 |
+
yield chunk_json # Stream chunk to client
|
270 |
except json.JSONDecodeError:
|
271 |
+
logging.error(f"Received invalid JSON from table stream: {chunk_json}")
|
272 |
+
# Decide how to handle: yield error, ignore, etc.
|
273 |
+
yield json.dumps({"type": "error", "error": "Invalid data received during table generation."}) + "\n"
|
274 |
+
return # Stop if data is corrupt
|
275 |
+
|
276 |
+
logging.info("Table generation stream finished.")
|
277 |
+
logging.info("Starting dissertation generation stream...")
|
278 |
+
|
279 |
+
# Phase 2: Génération de la dissertation basée sur le tableau COMPLET, passing the selected model_id
|
280 |
+
if full_tableau_content: # Only generate if table content exists
|
281 |
+
for chunk_json in generate_dissertation_stream(full_tableau_content, model_id):
|
282 |
+
try:
|
283 |
+
chunk_data = json.loads(chunk_json)
|
284 |
+
if chunk_data.get("type") == "error":
|
285 |
+
logging.error(f"Error received from dissertation stream: {chunk_data.get('error')}")
|
286 |
+
yield chunk_json # Forward the error
|
287 |
+
# Decide if you want to return here or let it finish
|
288 |
+
elif chunk_data.get("type") == "dissertation":
|
289 |
+
yield chunk_json # Stream chunk to client
|
290 |
+
except json.JSONDecodeError:
|
291 |
+
logging.error(f"Received invalid JSON from dissertation stream: {chunk_json}")
|
292 |
+
yield json.dumps({"type": "error", "error": "Invalid data received during dissertation generation."}) + "\n"
|
293 |
+
# Potentially return here too
|
294 |
+
else:
|
295 |
+
logging.warning("Tableau content is empty, skipping dissertation generation.")
|
296 |
+
yield json.dumps({"type": "error", "error": "Tableau d'analyse non généré, impossible de créer la dissertation."}) + "\n"
|
297 |
+
|
298 |
+
logging.info("Dissertation generation stream finished.")
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
except Exception as e:
|
301 |
+
logging.error(f"Error during streaming generation: {e}", exc_info=True)
|
302 |
+
# Yield a JSON error message for the client
|
303 |
+
yield json.dumps({"type": "error", "error": f"Une erreur interne est survenue: {e}"}) + "\n"
|
304 |
+
finally:
|
305 |
+
# Nettoyer le fichier temporaire in the finally block of generate
|
306 |
+
# Ensure the image object is closed if necessary (PIL handles this reasonably well)
|
307 |
+
# image.close() # Usually not needed with 'with open' or tempfile context
|
308 |
+
pass # temp_file is closed by with statement, path needed for unlink
|
309 |
+
|
310 |
+
# Return the streaming response
|
311 |
+
# Make sure temp_file path is accessible for cleanup *after* streaming finishes
|
312 |
+
response = Response(generate(), content_type='text/event-stream')
|
313 |
+
|
314 |
+
# Use response.call_on_close for reliable cleanup after stream finishes/closes
|
315 |
+
if temp_file and os.path.exists(temp_file.name):
|
316 |
+
cleanup_path = temp_file.name
|
317 |
+
response.call_on_close(lambda: os.unlink(cleanup_path) if os.path.exists(cleanup_path) else None)
|
318 |
+
logging.info(f"Scheduled cleanup for temp file: {cleanup_path}")
|
319 |
|
320 |
+
|
321 |
+
return response
|
322 |
|
323 |
except Exception as e:
|
324 |
+
# Catch errors during file handling or initial PIL processing
|
325 |
+
logging.error(f"Error processing upload before streaming: {e}", exc_info=True)
|
326 |
+
# Ensure cleanup if temp_file was created before the error
|
327 |
+
if temp_file and os.path.exists(temp_file.name):
|
328 |
+
try:
|
329 |
+
os.unlink(temp_file.name)
|
330 |
+
logging.info(f"Cleaned up temp file due to pre-stream error: {temp_file.name}")
|
331 |
+
except OSError as unlink_error:
|
332 |
+
logging.error(f"Error unlinking temp file during error handling: {unlink_error}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
return jsonify({'error': f'Error processing file: {e}'}), 500
|
334 |
+
# Note: The 'finally' block for the 'with tempfile' is implicitly handled
|
335 |
+
# but explicit cleanup using response.call_on_close is better for streaming
|
336 |
+
|
337 |
+
|
338 |
+
# --- End Flask Routes ---
|
339 |
+
|
340 |
|
341 |
if __name__ == '__main__':
|
342 |
+
# Set debug=False for production
|
343 |
+
# Use a proper WSGI server like Gunicorn or Waitress in production
|
344 |
+
app.run(debug=True) # debug=True enables auto-reloading and detailed errors
|