SCGR commited on
Commit
01c5951
·
1 Parent(s): cdacb63

sturctured logging

Browse files
py_backend/app/crud.py CHANGED
@@ -1,9 +1,12 @@
1
  import io, hashlib
 
2
  from typing import Optional, List
3
  from sqlalchemy.orm import Session, joinedload
4
  from . import models, schemas
5
  from fastapi import HTTPException
6
 
 
 
7
  def hash_bytes(data: bytes) -> str:
8
  """Compute SHA-256 hex digest of the data."""
9
  return hashlib.sha256(data).hexdigest()
@@ -88,10 +91,10 @@ def get_image(db: Session, image_id: str):
88
  )
89
 
90
  def create_caption(db: Session, image_id, title, prompt, model_code, raw_json, text, metadata=None, image_count=None):
91
- print(f"Creating caption for image_id: {image_id}")
92
- print(f"Caption data: title={title}, prompt={prompt}, model={model_code}")
93
- print(f"Database session ID: {id(db)}")
94
- print(f"Database session is active: {db.is_active}")
95
 
96
  if metadata:
97
  raw_json["extracted_metadata"] = metadata
@@ -122,11 +125,11 @@ def create_caption(db: Session, image_id, title, prompt, model_code, raw_json, t
122
  # Link caption to image
123
  img.captions.append(caption)
124
 
125
- print(f"About to commit caption to database...")
126
  db.commit()
127
- print(f"Caption commit successful!")
128
  db.refresh(caption)
129
- print(f"Caption created successfully for image: {img.image_id}")
130
  return caption
131
 
132
  def get_caption(db: Session, caption_id: str):
 
1
  import io, hashlib
2
+ import logging
3
  from typing import Optional, List
4
  from sqlalchemy.orm import Session, joinedload
5
  from . import models, schemas
6
  from fastapi import HTTPException
7
 
8
+ logger = logging.getLogger(__name__)
9
+
10
  def hash_bytes(data: bytes) -> str:
11
  """Compute SHA-256 hex digest of the data."""
12
  return hashlib.sha256(data).hexdigest()
 
91
  )
92
 
93
  def create_caption(db: Session, image_id, title, prompt, model_code, raw_json, text, metadata=None, image_count=None):
94
+ logger.debug(f"Creating caption for image_id: {image_id}")
95
+ logger.debug(f"Caption data: title={title}, prompt={prompt}, model={model_code}")
96
+ logger.debug(f"Database session ID: {id(db)}")
97
+ logger.debug(f"Database session is active: {db.is_active}")
98
 
99
  if metadata:
100
  raw_json["extracted_metadata"] = metadata
 
125
  # Link caption to image
126
  img.captions.append(caption)
127
 
128
+ logger.debug(f"About to commit caption to database...")
129
  db.commit()
130
+ logger.debug(f"Caption commit successful!")
131
  db.refresh(caption)
132
+ logger.info(f"Caption created successfully for image: {img.image_id}")
133
  return caption
134
 
135
  def get_caption(db: Session, caption_id: str):
py_backend/app/database.py CHANGED
@@ -6,6 +6,8 @@ from sqlalchemy.orm import sessionmaker, declarative_base
6
 
7
  from .config import settings
8
 
 
 
9
  raw_db_url = settings.DATABASE_URL
10
 
11
  if raw_db_url.startswith("psql '") and raw_db_url.endswith("'"):
@@ -18,7 +20,7 @@ if raw_db_url.startswith("postgresql://") and not raw_db_url.startswith("postgre
18
  if "sslmode=" not in raw_db_url and "localhost" not in raw_db_url and "127.0.0.1" not in raw_db_url:
19
  raw_db_url = f"{raw_db_url}{'&' if '?' in raw_db_url else '?'}sslmode=require"
20
 
21
- print(f"database url: {raw_db_url}")
22
 
23
  engine = create_engine(
24
  raw_db_url,
 
6
 
7
  from .config import settings
8
 
9
+ logger = logging.getLogger(__name__)
10
+
11
  raw_db_url = settings.DATABASE_URL
12
 
13
  if raw_db_url.startswith("psql '") and raw_db_url.endswith("'"):
 
20
  if "sslmode=" not in raw_db_url and "localhost" not in raw_db_url and "127.0.0.1" not in raw_db_url:
21
  raw_db_url = f"{raw_db_url}{'&' if '?' in raw_db_url else '?'}sslmode=require"
22
 
23
+ logger.debug(f"database url: {raw_db_url}")
24
 
25
  engine = create_engine(
26
  raw_db_url,
py_backend/app/main.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import subprocess
 
3
  from datetime import datetime
4
  from pathlib import Path
5
 
@@ -13,6 +14,17 @@ from dotenv import load_dotenv
13
  load_dotenv()
14
 
15
  from app.config import settings
 
 
 
 
 
 
 
 
 
 
 
16
  from app.routers import upload, caption, metadata, models
17
  from app.routers.images import router as images_router
18
  from app.routers.prompts import router as prompts_router
@@ -34,9 +46,9 @@ app.add_middleware(GZipMiddleware, minimum_size=500)
34
  # --------------------------------------------------------------------
35
  @app.middleware("http")
36
  async def log_requests(request: Request, call_next):
37
- print(f"DEBUG: {request.method} {request.url.path}")
38
  response = await call_next(request)
39
- print(f"DEBUG: {request.method} {request.url.path} -> {response.status_code}")
40
  return response
41
 
42
  # --------------------------------------------------------------------
@@ -155,7 +167,7 @@ CANDIDATES = [
155
  Path("/app/app") / "static", # some containers use /app/app
156
  ]
157
  STATIC_DIR = next((p for p in CANDIDATES if p.is_dir()), APP_DIR / "static")
158
- print(f"Serving static from: {STATIC_DIR}")
159
 
160
  # --------------------------------------------------------------------
161
  # Explicit top-level static files
@@ -268,19 +280,19 @@ def spa_fallback(full_path: str):
268
  def run_migrations():
269
  """Run database migrations on startup"""
270
  try:
271
- print("Running database migrations...")
272
  current_dir = os.getcwd()
273
- print(f"Current working directory: {current_dir}")
274
 
275
  try:
276
  result = subprocess.run(["which", "alembic"], capture_output=True, text=True)
277
- print(f"Alembic location: {result.stdout.strip() if result.stdout else 'Not found'}")
278
  except Exception as e:
279
- print(f"Could not check alembic location: {e}")
280
 
281
- print(f"Checking if /app exists: {os.path.exists('/app')}")
282
  if os.path.exists('/app'):
283
- print(f"Contents of /app: {os.listdir('/app')}")
284
 
285
  alembic_paths = [
286
  "alembic.ini",
@@ -292,14 +304,14 @@ def run_migrations():
292
  for path in alembic_paths:
293
  if os.path.exists(path):
294
  alembic_dir = os.path.dirname(path)
295
- print(f"Found alembic.ini at: {path}")
296
  break
297
  if not alembic_dir:
298
- print("Could not find alembic.ini - using current directory")
299
  alembic_dir = current_dir
300
 
301
  try:
302
- print(f"Running alembic upgrade head from: {alembic_dir}")
303
  result = subprocess.run(
304
  ["alembic", "upgrade", "head"],
305
  cwd=alembic_dir,
@@ -307,46 +319,46 @@ def run_migrations():
307
  text=True,
308
  timeout=60,
309
  )
310
- print(f"Alembic return code: {result.returncode}")
311
- print(f"Alembic stdout: {result.stdout}")
312
- print(f"Alembic stderr: {result.stderr}")
313
 
314
  if result.returncode == 0:
315
- print("Database migrations completed successfully")
316
  else:
317
- print("Database migrations failed")
318
- print("Trying fallback: create tables directly...")
319
  try:
320
  from app.database import engine
321
  from app.models import Base
322
  Base.metadata.create_all(bind=engine)
323
- print("Tables created directly via SQLAlchemy")
324
  except Exception as fallback_error:
325
- print(f"Fallback also failed: {fallback_error}")
326
  except Exception as e:
327
- print(f"Error running alembic: {e}")
328
 
329
  except Exception as e:
330
- print(f"Could not run migrations: {e}")
331
 
332
  def ensure_storage_ready():
333
  """Ensure storage is ready before starting the app"""
334
- print(f"Storage provider from settings: '{settings.STORAGE_PROVIDER}'")
335
- print(f"S3 endpoint from settings: '{settings.S3_ENDPOINT}'")
336
- print(f"S3 bucket from settings: '{settings.S3_BUCKET}'")
337
  if settings.STORAGE_PROVIDER == "s3":
338
  try:
339
- print("Checking S3 storage connection...")
340
  from app.storage import _ensure_bucket
341
  _ensure_bucket()
342
- print("S3 storage ready")
343
  except Exception as e:
344
- print(f"Warning: S3 storage not ready: {e}")
345
- print("Storage operations may fail until S3 is available")
346
  elif settings.STORAGE_PROVIDER == "local":
347
- print("Using local storage - no external dependencies")
348
  else:
349
- print(f"Unknown storage provider: {settings.STORAGE_PROVIDER}")
350
 
351
  # --------------------------------------------------------------------
352
  # VLM service registration on startup
@@ -367,45 +379,45 @@ import asyncio
367
  @app.on_event("startup")
368
  async def startup_tasks() -> None:
369
  """Run all startup tasks including migrations, storage setup, and VLM service registration."""
370
- print("Starting application initialization...")
371
 
372
  # Run database migrations
373
- print("Running database migrations...")
374
  run_migrations()
375
 
376
  # Ensure storage is ready
377
- print("Checking storage...")
378
  ensure_storage_ready()
379
 
380
  # Register VLM services
381
- print("Registering VLM services...")
382
 
383
  # Always have a stub as a safe fallback
384
  try:
385
  vlm_manager.register_service(StubVLMService())
386
- print("✓ STUB_MODEL registered")
387
  except Exception as e:
388
- print(f"✗ Failed to register STUB_MODEL: {e}")
389
 
390
  # OpenAI GPT-4V (if configured)
391
  if settings.OPENAI_API_KEY:
392
  try:
393
  vlm_manager.register_service(GPT4VService(settings.OPENAI_API_KEY))
394
- print("✓ GPT-4 Vision service registered")
395
  except Exception as e:
396
- print(f"✗ GPT-4 Vision service failed to register: {e}")
397
  else:
398
- print("○ GPT-4 Vision not configured (OPENAI_API_KEY missing)")
399
 
400
  # Google Gemini (if configured)
401
  if settings.GOOGLE_API_KEY:
402
  try:
403
  vlm_manager.register_service(GeminiService(settings.GOOGLE_API_KEY))
404
- print("✓ Gemini service registered")
405
  except Exception as e:
406
- print(f"✗ Gemini service failed to register: {e}")
407
  else:
408
- print("○ Gemini not configured (GOOGLE_API_KEY missing)")
409
 
410
  # Hugging Face Inference Providers (if configured)
411
  if settings.HF_API_KEY:
@@ -428,35 +440,35 @@ async def startup_tasks() -> None:
428
  public_name=m.m_code, # stable name your UI/DB uses
429
  )
430
  vlm_manager.register_service(svc)
431
- print(f"✓ HF registered: {m.m_code} -> {m.model_id}")
432
  registered += 1
433
  except Exception as e:
434
- print(f"✗ HF model {m.m_code} failed to register: {e}")
435
  else:
436
  skipped += 1
437
 
438
  if registered:
439
- print(f"✓ Hugging Face services registered: {registered}")
440
  else:
441
- print("○ No Hugging Face models registered (none found or all skipped)")
442
  if skipped:
443
- print(f"ℹ HF skipped entries: {skipped}")
444
  finally:
445
  db.close()
446
  else:
447
- print("○ Hugging Face not configured (HF_API_KEY missing)")
448
 
449
- # Kick off lightweight probes in the background (dont block startup)
450
  try:
451
  asyncio.create_task(vlm_manager.probe_all())
452
  except Exception as e:
453
- print(f"Probe scheduling failed: {e}")
454
 
455
- print(f"✓ Available models now: {', '.join(vlm_manager.get_available_models())}")
456
- print(f"✓ Total services: {len(vlm_manager.services)}")
457
 
458
 
459
- print("PromptAid Vision API server ready")
460
- print("Endpoints: /api/images, /api/captions, /api/metadata, /api/models")
461
- print(f"Environment: {settings.ENVIRONMENT}")
462
- print("CORS: localhost + *.hf.space")
 
1
  import os
2
  import subprocess
3
+ import logging
4
  from datetime import datetime
5
  from pathlib import Path
6
 
 
14
  load_dotenv()
15
 
16
  from app.config import settings
17
+
18
+ # Configure logging
19
+ logging.basicConfig(
20
+ level=logging.INFO,
21
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
22
+ handlers=[
23
+ logging.StreamHandler(),
24
+ ]
25
+ )
26
+ logger = logging.getLogger(__name__)
27
+
28
  from app.routers import upload, caption, metadata, models
29
  from app.routers.images import router as images_router
30
  from app.routers.prompts import router as prompts_router
 
46
  # --------------------------------------------------------------------
47
  @app.middleware("http")
48
  async def log_requests(request: Request, call_next):
49
+ logger.debug(f"{request.method} {request.url.path}")
50
  response = await call_next(request)
51
+ logger.debug(f"{request.method} {request.url.path} -> {response.status_code}")
52
  return response
53
 
54
  # --------------------------------------------------------------------
 
167
  Path("/app/app") / "static", # some containers use /app/app
168
  ]
169
  STATIC_DIR = next((p for p in CANDIDATES if p.is_dir()), APP_DIR / "static")
170
+ logger.info(f"Serving static files from: {STATIC_DIR}")
171
 
172
  # --------------------------------------------------------------------
173
  # Explicit top-level static files
 
280
  def run_migrations():
281
  """Run database migrations on startup"""
282
  try:
283
+ logger.info("Running database migrations...")
284
  current_dir = os.getcwd()
285
+ logger.debug(f"Current working directory: {current_dir}")
286
 
287
  try:
288
  result = subprocess.run(["which", "alembic"], capture_output=True, text=True)
289
+ logger.debug(f"Alembic location: {result.stdout.strip() if result.stdout else 'Not found'}")
290
  except Exception as e:
291
+ logger.debug(f"Could not check alembic location: {e}")
292
 
293
+ logger.debug(f"Checking if /app exists: {os.path.exists('/app')}")
294
  if os.path.exists('/app'):
295
+ logger.debug(f"Contents of /app: {os.listdir('/app')}")
296
 
297
  alembic_paths = [
298
  "alembic.ini",
 
304
  for path in alembic_paths:
305
  if os.path.exists(path):
306
  alembic_dir = os.path.dirname(path)
307
+ logger.debug(f"Found alembic.ini at: {path}")
308
  break
309
  if not alembic_dir:
310
+ logger.warning("Could not find alembic.ini - using current directory")
311
  alembic_dir = current_dir
312
 
313
  try:
314
+ logger.info(f"Running alembic upgrade head from: {alembic_dir}")
315
  result = subprocess.run(
316
  ["alembic", "upgrade", "head"],
317
  cwd=alembic_dir,
 
319
  text=True,
320
  timeout=60,
321
  )
322
+ logger.debug(f"Alembic return code: {result.returncode}")
323
+ logger.debug(f"Alembic stdout: {result.stdout}")
324
+ logger.debug(f"Alembic stderr: {result.stderr}")
325
 
326
  if result.returncode == 0:
327
+ logger.info("Database migrations completed successfully")
328
  else:
329
+ logger.error("Database migrations failed")
330
+ logger.warning("Trying fallback: create tables directly...")
331
  try:
332
  from app.database import engine
333
  from app.models import Base
334
  Base.metadata.create_all(bind=engine)
335
+ logger.info("Tables created directly via SQLAlchemy")
336
  except Exception as fallback_error:
337
+ logger.error(f"Fallback also failed: {fallback_error}")
338
  except Exception as e:
339
+ logger.error(f"Error running alembic: {e}")
340
 
341
  except Exception as e:
342
+ logger.error(f"Could not run migrations: {e}")
343
 
344
  def ensure_storage_ready():
345
  """Ensure storage is ready before starting the app"""
346
+ logger.debug(f"Storage provider from settings: '{settings.STORAGE_PROVIDER}'")
347
+ logger.debug(f"S3 endpoint from settings: '{settings.S3_ENDPOINT}'")
348
+ logger.debug(f"S3 bucket from settings: '{settings.S3_BUCKET}'")
349
  if settings.STORAGE_PROVIDER == "s3":
350
  try:
351
+ logger.info("Checking S3 storage connection...")
352
  from app.storage import _ensure_bucket
353
  _ensure_bucket()
354
+ logger.info("S3 storage ready")
355
  except Exception as e:
356
+ logger.warning(f"S3 storage not ready: {e}")
357
+ logger.warning("Storage operations may fail until S3 is available")
358
  elif settings.STORAGE_PROVIDER == "local":
359
+ logger.info("Using local storage - no external dependencies")
360
  else:
361
+ logger.warning(f"Unknown storage provider: {settings.STORAGE_PROVIDER}")
362
 
363
  # --------------------------------------------------------------------
364
  # VLM service registration on startup
 
379
  @app.on_event("startup")
380
  async def startup_tasks() -> None:
381
  """Run all startup tasks including migrations, storage setup, and VLM service registration."""
382
+ logger.info("Starting application initialization...")
383
 
384
  # Run database migrations
385
+ logger.info("Running database migrations...")
386
  run_migrations()
387
 
388
  # Ensure storage is ready
389
+ logger.info("Checking storage...")
390
  ensure_storage_ready()
391
 
392
  # Register VLM services
393
+ logger.info("Registering VLM services...")
394
 
395
  # Always have a stub as a safe fallback
396
  try:
397
  vlm_manager.register_service(StubVLMService())
398
+ logger.info("✓ STUB_MODEL registered")
399
  except Exception as e:
400
+ logger.error(f"✗ Failed to register STUB_MODEL: {e}")
401
 
402
  # OpenAI GPT-4V (if configured)
403
  if settings.OPENAI_API_KEY:
404
  try:
405
  vlm_manager.register_service(GPT4VService(settings.OPENAI_API_KEY))
406
+ logger.info("✓ GPT-4 Vision service registered")
407
  except Exception as e:
408
+ logger.error(f"✗ GPT-4 Vision service failed to register: {e}")
409
  else:
410
+ logger.info("○ GPT-4 Vision not configured (OPENAI_API_KEY missing)")
411
 
412
  # Google Gemini (if configured)
413
  if settings.GOOGLE_API_KEY:
414
  try:
415
  vlm_manager.register_service(GeminiService(settings.GOOGLE_API_KEY))
416
+ logger.info("✓ Gemini service registered")
417
  except Exception as e:
418
+ logger.error(f"✗ Gemini service failed to register: {e}")
419
  else:
420
+ logger.info("○ Gemini not configured (GOOGLE_API_KEY missing)")
421
 
422
  # Hugging Face Inference Providers (if configured)
423
  if settings.HF_API_KEY:
 
440
  public_name=m.m_code, # stable name your UI/DB uses
441
  )
442
  vlm_manager.register_service(svc)
443
+ logger.info(f"✓ HF registered: {m.m_code} -> {m.model_id}")
444
  registered += 1
445
  except Exception as e:
446
+ logger.error(f"✗ HF model {m.m_code} failed to register: {e}")
447
  else:
448
  skipped += 1
449
 
450
  if registered:
451
+ logger.info(f"✓ Hugging Face services registered: {registered}")
452
  else:
453
+ logger.info("○ No Hugging Face models registered (none found or all skipped)")
454
  if skipped:
455
+ logger.info(f"ℹ HF skipped entries: {skipped}")
456
  finally:
457
  db.close()
458
  else:
459
+ logger.info("○ Hugging Face not configured (HF_API_KEY missing)")
460
 
461
+ # Kick off lightweight probes in the background (don't block startup)
462
  try:
463
  asyncio.create_task(vlm_manager.probe_all())
464
  except Exception as e:
465
+ logger.error(f"Probe scheduling failed: {e}")
466
 
467
+ logger.info(f"✓ Available models now: {', '.join(vlm_manager.get_available_models())}")
468
+ logger.info(f"✓ Total services: {len(vlm_manager.services)}")
469
 
470
 
471
+ logger.info("PromptAid Vision API server ready")
472
+ logger.info("Endpoints: /api/images, /api/captions, /api/metadata, /api/models")
473
+ logger.info(f"Environment: {settings.ENVIRONMENT}")
474
+ logger.info("CORS: localhost + *.hf.space")
py_backend/app/routers/caption.py CHANGED
@@ -2,6 +2,7 @@
2
  from fastapi import APIRouter, HTTPException, Depends, Form, Request
3
  from sqlalchemy.orm import Session
4
  from typing import List
 
5
 
6
  from .. import crud, database, schemas, storage
7
  from ..services.vlm_service import vlm_manager
@@ -9,6 +10,7 @@ from ..services.schema_validator import schema_validator
9
  from ..config import settings
10
 
11
  router = APIRouter()
 
12
 
13
  def get_db():
14
  db = database.SessionLocal()
@@ -28,7 +30,7 @@ async def create_caption(
28
  model_name: str | None = Form(None),
29
  db: Session = Depends(get_db),
30
  ):
31
- print(f"DEBUG: Received request - image_id: {image_id}, title: {title}, prompt: {prompt}, model_name: {model_name}")
32
 
33
  img = crud.get_image(db, image_id)
34
  if not img:
@@ -36,24 +38,24 @@ async def create_caption(
36
 
37
  # Get the prompt (explicit by code/label, or active for image type)
38
  if prompt:
39
- print(f"Looking for prompt: '{prompt}' (type: {type(prompt)})")
40
  prompt_obj = crud.get_prompt(db, prompt) or crud.get_prompt_by_label(db, prompt)
41
  else:
42
- print(f"Looking for active prompt for image type: {img.image_type}")
43
  prompt_obj = crud.get_active_prompt_by_image_type(db, img.image_type)
44
 
45
- print(f"Prompt lookup result: {prompt_obj}")
46
  if not prompt_obj:
47
  raise HTTPException(400, f"No prompt found (requested: '{prompt}' or active for type '{img.image_type}')")
48
 
49
  prompt_text = prompt_obj.label
50
  metadata_instructions = prompt_obj.metadata_instructions or ""
51
- print(f"Using prompt text: '{prompt_text}'")
52
- print(f"Using metadata instructions: '{metadata_instructions[:100]}...'")
53
 
54
  # Load image bytes (S3 or local)
55
  try:
56
- print(f"DEBUG: About to call VLM service with model_name: {model_name}")
57
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
58
  response = storage.s3.get_object(
59
  Bucket=settings.S3_BUCKET,
@@ -66,7 +68,7 @@ async def create_caption(
66
  with open(file_path, 'rb') as f:
67
  img_bytes = f.read()
68
  except Exception as e:
69
- print(f"Error reading image file: {e}")
70
  # fallback: try presigned/public URL
71
  try:
72
  url = storage.get_object_url(img.file_key)
@@ -77,7 +79,7 @@ async def create_caption(
77
  resp.raise_for_status()
78
  img_bytes = resp.content
79
  except Exception as fallback_error:
80
- print(f"Fallback also failed: {fallback_error}")
81
  raise HTTPException(500, f"Could not read image file: {e}")
82
 
83
  metadata = {}
@@ -90,24 +92,24 @@ async def create_caption(
90
  db_session=db,
91
  )
92
 
93
- print(f"DEBUG: VLM service result: {result}")
94
- print(f"DEBUG: Result model field: {result.get('model', 'NOT_FOUND')}")
95
 
96
  raw = result.get("raw_response", {})
97
 
98
  # Validate and clean the data using schema validation
99
  image_type = img.image_type
100
- print(f"DEBUG: Validating data for image type: {image_type}")
101
- print(f"DEBUG: Raw data structure: {list(raw.keys()) if isinstance(raw, dict) else 'Not a dict'}")
102
 
103
  cleaned_data, is_valid, validation_error = schema_validator.clean_and_validate_data(raw, image_type)
104
 
105
  if is_valid:
106
- print(f"✓ Schema validation passed for {image_type}")
107
  text = cleaned_data.get("analysis", "")
108
  metadata = cleaned_data.get("metadata", {})
109
  else:
110
- print(f"⚠ Schema validation failed for {image_type}: {validation_error}")
111
  text = result.get("caption", "This is a fallback caption due to schema validation error.")
112
  metadata = result.get("metadata", {})
113
  raw["validation_error"] = validation_error
@@ -115,7 +117,7 @@ async def create_caption(
115
 
116
  used_model = result.get("model", model_name) or "STUB_MODEL"
117
  if used_model == "random":
118
- print(f"WARNING: VLM service returned 'random' as model name, using STUB_MODEL fallback")
119
  used_model = "STUB_MODEL"
120
 
121
  # Fallback info (if any)
@@ -127,7 +129,7 @@ async def create_caption(
127
  }
128
 
129
  except Exception as e:
130
- print(f"VLM error, using fallback: {e}")
131
  text = "This is a fallback caption due to VLM service error."
132
  used_model = "STUB_MODEL"
133
  raw = {"error": str(e), "fallback": True}
@@ -145,8 +147,8 @@ async def create_caption(
145
  )
146
 
147
  db.refresh(caption)
148
- print(f"DEBUG: Caption created, caption object: {caption}")
149
- print(f"DEBUG: caption_id: {caption.caption_id}")
150
  return schemas.CaptionOut.from_orm(caption)
151
 
152
  @router.get(
@@ -158,9 +160,9 @@ def get_all_captions_legacy_format(
158
  db: Session = Depends(get_db),
159
  ):
160
  """Get all images with captions in the old format for backward compatibility"""
161
- print(f"DEBUG: Fetching all captions in legacy format...")
162
  captions = crud.get_all_captions_with_images(db)
163
- print(f"DEBUG: Found {len(captions)} captions")
164
 
165
  result = []
166
  for caption in captions:
@@ -170,7 +172,7 @@ def get_all_captions_legacy_format(
170
  from .upload import convert_image_to_dict
171
  base_url = str(request.base_url).rstrip('/')
172
  url = f"{base_url}/api/images/{image.image_id}/file"
173
- print(f"DEBUG: Generated image URL: {url}")
174
  img_dict = convert_image_to_dict(image, url)
175
 
176
  # Overlay caption fields (legacy shape)
@@ -190,7 +192,7 @@ def get_all_captions_legacy_format(
190
  "updated_at": caption.updated_at,
191
  })
192
  result.append(schemas.ImageOut(**img_dict))
193
- print(f"DEBUG: Returning {len(result)} legacy format results")
194
  return result
195
 
196
  @router.get(
@@ -201,16 +203,16 @@ def get_all_captions_with_images(
201
  db: Session = Depends(get_db),
202
  ):
203
  """Get all captions"""
204
- print(f"DEBUG: Fetching all captions...")
205
  captions = crud.get_all_captions_with_images(db)
206
- print(f"DEBUG: Found {len(captions)} captions")
207
 
208
  result = []
209
  for caption in captions:
210
- print(f"DEBUG: Processing caption {caption.caption_id}, title: {caption.title}, generated: {caption.generated}, model: {caption.model}")
211
  db.refresh(caption)
212
  result.append(schemas.CaptionOut.from_orm(caption))
213
- print(f"DEBUG: Returning {len(result)} formatted results")
214
  return result
215
 
216
  @router.get(
 
2
  from fastapi import APIRouter, HTTPException, Depends, Form, Request
3
  from sqlalchemy.orm import Session
4
  from typing import List
5
+ import logging
6
 
7
  from .. import crud, database, schemas, storage
8
  from ..services.vlm_service import vlm_manager
 
10
  from ..config import settings
11
 
12
  router = APIRouter()
13
+ logger = logging.getLogger(__name__)
14
 
15
  def get_db():
16
  db = database.SessionLocal()
 
30
  model_name: str | None = Form(None),
31
  db: Session = Depends(get_db),
32
  ):
33
+ logger.debug(f"Received request - image_id: {image_id}, title: {title}, prompt: {prompt}, model_name: {model_name}")
34
 
35
  img = crud.get_image(db, image_id)
36
  if not img:
 
38
 
39
  # Get the prompt (explicit by code/label, or active for image type)
40
  if prompt:
41
+ logger.debug(f"Looking for prompt: '{prompt}' (type: {type(prompt)})")
42
  prompt_obj = crud.get_prompt(db, prompt) or crud.get_prompt_by_label(db, prompt)
43
  else:
44
+ logger.debug(f"Looking for active prompt for image type: {img.image_type}")
45
  prompt_obj = crud.get_active_prompt_by_image_type(db, img.image_type)
46
 
47
+ logger.debug(f"Prompt lookup result: {prompt_obj}")
48
  if not prompt_obj:
49
  raise HTTPException(400, f"No prompt found (requested: '{prompt}' or active for type '{img.image_type}')")
50
 
51
  prompt_text = prompt_obj.label
52
  metadata_instructions = prompt_obj.metadata_instructions or ""
53
+ logger.debug(f"Using prompt text: '{prompt_text}'")
54
+ logger.debug(f"Using metadata instructions: '{metadata_instructions[:100]}...'")
55
 
56
  # Load image bytes (S3 or local)
57
  try:
58
+ logger.debug(f"About to call VLM service with model_name: {model_name}")
59
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
60
  response = storage.s3.get_object(
61
  Bucket=settings.S3_BUCKET,
 
68
  with open(file_path, 'rb') as f:
69
  img_bytes = f.read()
70
  except Exception as e:
71
+ logger.error(f"Error reading image file: {e}")
72
  # fallback: try presigned/public URL
73
  try:
74
  url = storage.get_object_url(img.file_key)
 
79
  resp.raise_for_status()
80
  img_bytes = resp.content
81
  except Exception as fallback_error:
82
+ logger.error(f"Fallback also failed: {fallback_error}")
83
  raise HTTPException(500, f"Could not read image file: {e}")
84
 
85
  metadata = {}
 
92
  db_session=db,
93
  )
94
 
95
+ logger.debug(f"VLM service result: {result}")
96
+ logger.debug(f"Result model field: {result.get('model', 'NOT_FOUND')}")
97
 
98
  raw = result.get("raw_response", {})
99
 
100
  # Validate and clean the data using schema validation
101
  image_type = img.image_type
102
+ logger.debug(f"Validating data for image type: {image_type}")
103
+ logger.debug(f"Raw data structure: {list(raw.keys()) if isinstance(raw, dict) else 'Not a dict'}")
104
 
105
  cleaned_data, is_valid, validation_error = schema_validator.clean_and_validate_data(raw, image_type)
106
 
107
  if is_valid:
108
+ logger.debug(f"✓ Schema validation passed for {image_type}")
109
  text = cleaned_data.get("analysis", "")
110
  metadata = cleaned_data.get("metadata", {})
111
  else:
112
+ logger.debug(f"⚠ Schema validation failed for {image_type}: {validation_error}")
113
  text = result.get("caption", "This is a fallback caption due to schema validation error.")
114
  metadata = result.get("metadata", {})
115
  raw["validation_error"] = validation_error
 
117
 
118
  used_model = result.get("model", model_name) or "STUB_MODEL"
119
  if used_model == "random":
120
+ logger.warning(f"VLM service returned 'random' as model name, using STUB_MODEL fallback")
121
  used_model = "STUB_MODEL"
122
 
123
  # Fallback info (if any)
 
129
  }
130
 
131
  except Exception as e:
132
+ logger.warning(f"VLM error, using fallback: {e}")
133
  text = "This is a fallback caption due to VLM service error."
134
  used_model = "STUB_MODEL"
135
  raw = {"error": str(e), "fallback": True}
 
147
  )
148
 
149
  db.refresh(caption)
150
+ logger.debug(f"Caption created, caption object: {caption}")
151
+ logger.debug(f"caption_id: {caption.caption_id}")
152
  return schemas.CaptionOut.from_orm(caption)
153
 
154
  @router.get(
 
160
  db: Session = Depends(get_db),
161
  ):
162
  """Get all images with captions in the old format for backward compatibility"""
163
+ logger.debug(f"Fetching all captions in legacy format...")
164
  captions = crud.get_all_captions_with_images(db)
165
+ logger.debug(f"Found {len(captions)} captions")
166
 
167
  result = []
168
  for caption in captions:
 
172
  from .upload import convert_image_to_dict
173
  base_url = str(request.base_url).rstrip('/')
174
  url = f"{base_url}/api/images/{image.image_id}/file"
175
+ logger.debug(f"Generated image URL: {url}")
176
  img_dict = convert_image_to_dict(image, url)
177
 
178
  # Overlay caption fields (legacy shape)
 
192
  "updated_at": caption.updated_at,
193
  })
194
  result.append(schemas.ImageOut(**img_dict))
195
+ logger.debug(f"Returning {len(result)} legacy format results")
196
  return result
197
 
198
  @router.get(
 
203
  db: Session = Depends(get_db),
204
  ):
205
  """Get all captions"""
206
+ logger.debug(f"Fetching all captions...")
207
  captions = crud.get_all_captions_with_images(db)
208
+ logger.debug(f"Found {len(captions)} captions")
209
 
210
  result = []
211
  for caption in captions:
212
+ logger.debug(f"Processing caption {caption.caption_id}, title: {caption.title}, generated: {caption.generated}, model: {caption.model}")
213
  db.refresh(caption)
214
  result.append(schemas.CaptionOut.from_orm(caption))
215
+ logger.debug(f"Returning {len(result)} formatted results")
216
  return result
217
 
218
  @router.get(
py_backend/app/routers/images.py CHANGED
@@ -1,5 +1,6 @@
1
  import hashlib
2
  import mimetypes
 
3
  from fastapi import APIRouter, HTTPException, Depends
4
  from sqlalchemy.orm import Session
5
  from sqlalchemy import text
@@ -13,6 +14,7 @@ from ..config import settings
13
  from ..services.image_preprocessor import ImagePreprocessor
14
 
15
  router = APIRouter()
 
16
 
17
  def get_db():
18
  db = SessionLocal()
@@ -24,15 +26,15 @@ def get_db():
24
  @router.post("/from-url", response_model=CreateImageFromUrlOut)
25
  async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Depends(get_db)):
26
  try:
27
- print(f"DEBUG: Creating contribution from URL: {payload.url}")
28
- print(f"DEBUG: Payload: {payload}")
29
 
30
  # Check database connectivity
31
  try:
32
  db.execute(text("SELECT 1"))
33
- print("Database connection OK")
34
  except Exception as db_error:
35
- print(f"Database connection failed: {db_error}")
36
  raise HTTPException(status_code=500, detail=f"Database connection failed: {db_error}")
37
 
38
  # Check if required tables exist
@@ -41,9 +43,9 @@ async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Dep
41
  db.execute(text("SELECT 1 FROM event_types LIMIT 1"))
42
  db.execute(text("SELECT 1 FROM spatial_references LIMIT 1"))
43
  db.execute(text("SELECT 1 FROM image_types LIMIT 1"))
44
- print("Required tables exist")
45
  except Exception as table_error:
46
- print(f"Required tables missing: {table_error}")
47
  raise HTTPException(status_code=500, detail=f"Required tables missing: {table_error}")
48
 
49
  if '/api/images/' in payload.url and '/file' in payload.url:
@@ -55,33 +57,33 @@ async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Dep
55
  else:
56
  raise HTTPException(status_code=400, detail="Invalid image URL format")
57
 
58
- print(f"DEBUG: Extracted image_id: {image_id}")
59
 
60
  existing_image = db.query(Images).filter(Images.image_id == image_id).first()
61
  if not existing_image:
62
  raise HTTPException(status_code=404, detail="Source image not found")
63
 
64
- print(f"DEBUG: Found existing image: {existing_image.image_id}")
65
 
66
  try:
67
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
68
- print(f"DEBUG: Using S3 storage, bucket: {settings.S3_BUCKET}")
69
  response = storage.s3.get_object(
70
  Bucket=settings.S3_BUCKET,
71
  Key=existing_image.file_key,
72
  )
73
  data = response["Body"].read()
74
  else:
75
- print(f"DEBUG: Using local storage: {settings.STORAGE_DIR}")
76
  import os
77
  file_path = os.path.join(settings.STORAGE_DIR, existing_image.file_key)
78
  with open(file_path, 'rb') as f:
79
  data = f.read()
80
 
81
  content_type = "image/jpeg"
82
- print(f"DEBUG: Image data size: {len(data)} bytes")
83
  except Exception as e:
84
- print(f"ERROR: Failed to fetch image from storage: {e}")
85
  raise HTTPException(status_code=400, detail=f"Failed to fetch image from storage: {e}")
86
 
87
  if len(data) > 25 * 1024 * 1024:
@@ -97,10 +99,10 @@ async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Dep
97
  )
98
 
99
  # Log preprocessing info
100
- print(f"DEBUG: Image preprocessed: {mime_type} -> {processed_filename}")
101
 
102
  except Exception as e:
103
- print(f"DEBUG: Image preprocessing failed: {str(e)}")
104
  # Fall back to original content if preprocessing fails
105
  processed_data = data
106
  processed_filename = f"contributed.jpg"
@@ -110,11 +112,11 @@ async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Dep
110
  key = upload_bytes(processed_data, filename=processed_filename, content_type=mime_type)
111
  image_url = get_object_url(key, expires_in=86400)
112
 
113
- print(f"DEBUG: Uploaded to key: {key}")
114
- print(f"DEBUG: Generated URL: {image_url}")
115
 
116
  sha = hashlib.sha256(processed_data).hexdigest()
117
- print(f"DEBUG: Generated SHA256: {sha}")
118
 
119
  # Set prompt and schema based on image type
120
  prompt_code = "DEFAULT_CRISIS_MAP"
@@ -166,28 +168,28 @@ async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Dep
166
  std_v_m=payload.std_v_m
167
  )
168
 
169
- print(f"DEBUG: Created Images object: {img}")
170
  db.add(img)
171
  db.flush()
172
- print(f"DEBUG: Flushed to database, image_id: {img.image_id}")
173
 
174
  for c in payload.countries:
175
- print(f"DEBUG: Adding country: {c}")
176
  db.execute(image_countries.insert().values(image_id=img.image_id, c_code=c))
177
 
178
- print(f"DEBUG: About to commit transaction")
179
  db.commit()
180
- print(f"DEBUG: Transaction committed successfully")
181
 
182
  result = CreateImageFromUrlOut(image_id=str(img.image_id), image_url=image_url)
183
- print(f"DEBUG: Returning result: {result}")
184
  return result
185
 
186
  except Exception as e:
187
- print(f"ERROR: Exception in create_image_from_url: {e}")
188
- print(f"ERROR: Exception type: {type(e)}")
189
  import traceback
190
- traceback.print_exc()
191
  db.rollback()
192
  raise HTTPException(status_code=500, detail=f"Failed to create image: {str(e)}")
193
 
 
1
  import hashlib
2
  import mimetypes
3
+ import logging
4
  from fastapi import APIRouter, HTTPException, Depends
5
  from sqlalchemy.orm import Session
6
  from sqlalchemy import text
 
14
  from ..services.image_preprocessor import ImagePreprocessor
15
 
16
  router = APIRouter()
17
+ logger = logging.getLogger(__name__)
18
 
19
  def get_db():
20
  db = SessionLocal()
 
26
  @router.post("/from-url", response_model=CreateImageFromUrlOut)
27
  async def create_image_from_url(payload: CreateImageFromUrlIn, db: Session = Depends(get_db)):
28
  try:
29
+ logger.debug(f"Creating contribution from URL: {payload.url}")
30
+ logger.debug(f"Payload: {payload}")
31
 
32
  # Check database connectivity
33
  try:
34
  db.execute(text("SELECT 1"))
35
+ logger.info("Database connection OK")
36
  except Exception as db_error:
37
+ logger.error(f"Database connection failed: {db_error}")
38
  raise HTTPException(status_code=500, detail=f"Database connection failed: {db_error}")
39
 
40
  # Check if required tables exist
 
43
  db.execute(text("SELECT 1 FROM event_types LIMIT 1"))
44
  db.execute(text("SELECT 1 FROM spatial_references LIMIT 1"))
45
  db.execute(text("SELECT 1 FROM image_types LIMIT 1"))
46
+ logger.info("Required tables exist")
47
  except Exception as table_error:
48
+ logger.error(f"Required tables missing: {table_error}")
49
  raise HTTPException(status_code=500, detail=f"Required tables missing: {table_error}")
50
 
51
  if '/api/images/' in payload.url and '/file' in payload.url:
 
57
  else:
58
  raise HTTPException(status_code=400, detail="Invalid image URL format")
59
 
60
+ logger.debug(f"Extracted image_id: {image_id}")
61
 
62
  existing_image = db.query(Images).filter(Images.image_id == image_id).first()
63
  if not existing_image:
64
  raise HTTPException(status_code=404, detail="Source image not found")
65
 
66
+ logger.debug(f"Found existing image: {existing_image.image_id}")
67
 
68
  try:
69
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
70
+ logger.debug(f"Using S3 storage, bucket: {settings.S3_BUCKET}")
71
  response = storage.s3.get_object(
72
  Bucket=settings.S3_BUCKET,
73
  Key=existing_image.file_key,
74
  )
75
  data = response["Body"].read()
76
  else:
77
+ logger.debug(f"Using local storage: {settings.STORAGE_DIR}")
78
  import os
79
  file_path = os.path.join(settings.STORAGE_DIR, existing_image.file_key)
80
  with open(file_path, 'rb') as f:
81
  data = f.read()
82
 
83
  content_type = "image/jpeg"
84
+ logger.debug(f"Image data size: {len(data)} bytes")
85
  except Exception as e:
86
+ logger.error(f"Failed to fetch image from storage: {e}")
87
  raise HTTPException(status_code=400, detail=f"Failed to fetch image from storage: {e}")
88
 
89
  if len(data) > 25 * 1024 * 1024:
 
99
  )
100
 
101
  # Log preprocessing info
102
+ logger.debug(f"Image preprocessed: {mime_type} -> {processed_filename}")
103
 
104
  except Exception as e:
105
+ logger.debug(f"Image preprocessing failed: {str(e)}")
106
  # Fall back to original content if preprocessing fails
107
  processed_data = data
108
  processed_filename = f"contributed.jpg"
 
112
  key = upload_bytes(processed_data, filename=processed_filename, content_type=mime_type)
113
  image_url = get_object_url(key, expires_in=86400)
114
 
115
+ logger.debug(f"Uploaded to key: {key}")
116
+ logger.debug(f"Generated URL: {image_url}")
117
 
118
  sha = hashlib.sha256(processed_data).hexdigest()
119
+ logger.debug(f"Generated SHA256: {sha}")
120
 
121
  # Set prompt and schema based on image type
122
  prompt_code = "DEFAULT_CRISIS_MAP"
 
168
  std_v_m=payload.std_v_m
169
  )
170
 
171
+ logger.debug(f"Created Images object: {img}")
172
  db.add(img)
173
  db.flush()
174
+ logger.debug(f"Flushed to database, image_id: {img.image_id}")
175
 
176
  for c in payload.countries:
177
+ logger.debug(f"Adding country: {c}")
178
  db.execute(image_countries.insert().values(image_id=img.image_id, c_code=c))
179
 
180
+ logger.debug(f"About to commit transaction")
181
  db.commit()
182
+ logger.debug(f"Transaction committed successfully")
183
 
184
  result = CreateImageFromUrlOut(image_id=str(img.image_id), image_url=image_url)
185
+ logger.debug(f"Returning result: {result}")
186
  return result
187
 
188
  except Exception as e:
189
+ logger.error(f"Exception in create_image_from_url: {e}")
190
+ logger.error(f"Exception type: {type(e)}")
191
  import traceback
192
+ traceback.logger.debug_exc()
193
  db.rollback()
194
  raise HTTPException(status_code=500, detail=f"Failed to create image: {str(e)}")
195
 
py_backend/app/routers/prompts.py CHANGED
@@ -1,9 +1,11 @@
1
  from fastapi import APIRouter, Depends
2
  from sqlalchemy.orm import Session
3
  from typing import List
 
4
  from .. import crud, database, schemas
5
 
6
  router = APIRouter()
 
7
 
8
  def get_db():
9
  db = database.SessionLocal()
@@ -15,15 +17,15 @@ def get_db():
15
  @router.get("/", response_model=List[schemas.PromptOut])
16
  def get_prompts(db: Session = Depends(get_db)):
17
  """Get all available prompts"""
18
- print("=== get_prompts called ===")
19
  try:
20
  prompts = crud.get_prompts(db)
21
- print(f"=== Found {len(prompts)} prompts ===")
22
  for prompt in prompts:
23
- print(f" - {prompt.p_code}: {prompt.label} ({prompt.image_type}, active: {prompt.is_active})")
24
  return prompts
25
  except Exception as e:
26
- print(f"=== Error in get_prompts: {e} ===")
27
  raise
28
 
29
  @router.post("/", response_model=schemas.PromptOut)
 
1
  from fastapi import APIRouter, Depends
2
  from sqlalchemy.orm import Session
3
  from typing import List
4
+ import logging
5
  from .. import crud, database, schemas
6
 
7
  router = APIRouter()
8
+ logger = logging.getLogger(__name__)
9
 
10
  def get_db():
11
  db = database.SessionLocal()
 
17
  @router.get("/", response_model=List[schemas.PromptOut])
18
  def get_prompts(db: Session = Depends(get_db)):
19
  """Get all available prompts"""
20
+ logger.debug("get_prompts called")
21
  try:
22
  prompts = crud.get_prompts(db)
23
+ logger.debug(f"Found {len(prompts)} prompts")
24
  for prompt in prompts:
25
+ logger.debug(f" - {prompt.p_code}: {prompt.label} ({prompt.image_type}, active: {prompt.is_active})")
26
  return prompts
27
  except Exception as e:
28
+ logger.error(f"Error in get_prompts: {e}")
29
  raise
30
 
31
  @router.post("/", response_model=schemas.PromptOut)
py_backend/app/routers/upload.py CHANGED
@@ -1,6 +1,7 @@
1
  from fastapi import APIRouter, UploadFile, Form, Depends, HTTPException, Response
2
  from pydantic import BaseModel
3
  import io
 
4
  from sqlalchemy.orm import Session
5
  from .. import crud, schemas, storage, database
6
  from ..config import settings
@@ -13,6 +14,7 @@ import base64
13
  import datetime
14
 
15
  router = APIRouter()
 
16
 
17
  class CopyImageRequest(BaseModel):
18
  source_image_id: str
@@ -49,7 +51,7 @@ def convert_image_to_dict(img, image_url):
49
  try:
50
  countries_list = [{"c_code": c.c_code, "label": c.label, "r_code": c.r_code} for c in img.countries]
51
  except Exception as e:
52
- print(f"Warning: Error processing countries for image {img.image_id}: {e}")
53
  countries_list = []
54
 
55
  captions_list = []
@@ -74,7 +76,7 @@ def convert_image_to_dict(img, image_url):
74
  } for c in img.captions
75
  ]
76
  except Exception as e:
77
- print(f"Warning: Error processing captions for image {img.image_id}: {e}")
78
  captions_list = []
79
 
80
  # Get starred status and other caption fields from first caption for backward compatibility
@@ -116,13 +118,13 @@ def convert_image_to_dict(img, image_url):
116
  try:
117
  thumbnail_url = storage.get_object_url(img.thumbnail_key)
118
  except Exception as e:
119
- print(f"Warning: Error generating thumbnail URL for image {img.image_id}: {e}")
120
 
121
  if hasattr(img, 'detail_key') and img.detail_key:
122
  try:
123
  detail_url = storage.get_object_url(img.detail_key)
124
  except Exception as e:
125
- print(f"Warning: Error generating detail URL for image {img.image_id}: {e}")
126
 
127
  img_dict = {
128
  "image_id": img.image_id,
@@ -585,7 +587,7 @@ async def upload_image(
585
  # Log preprocessing info
586
  preprocessing_info = None
587
  if processed_filename != file.filename:
588
- print(f"Image preprocessed: {file.filename} -> {processed_filename} ({mime_type})")
589
  preprocessing_info = {
590
  "original_filename": file.filename,
591
  "processed_filename": processed_filename,
@@ -603,7 +605,7 @@ async def upload_image(
603
  }
604
 
605
  except Exception as e:
606
- print(f"Image preprocessing failed: {str(e)}")
607
  # Fall back to original content if preprocessing fails
608
  processed_content = content
609
  processed_filename = file.filename
@@ -636,14 +638,14 @@ async def upload_image(
636
 
637
  if thumbnail_result:
638
  thumbnail_key, thumbnail_sha256 = thumbnail_result
639
- print(f"Thumbnail generated and uploaded: key={thumbnail_key}, sha256={thumbnail_sha256}")
640
 
641
  if detail_result:
642
  detail_key, detail_sha256 = detail_result
643
- print(f"Detail version generated and uploaded: key={detail_key}, sha256={detail_sha256}")
644
 
645
  except Exception as e:
646
- print(f"Image resolution processing failed: {str(e)}")
647
  # Continue without processed versions if generation fails
648
 
649
  try:
@@ -710,7 +712,7 @@ async def upload_image(
710
  )
711
 
712
  except Exception as e:
713
- print(f"VLM caption generation failed: {str(e)}")
714
  # Continue without caption if VLM fails
715
 
716
  img_dict = convert_image_to_dict(img, url)
@@ -798,7 +800,7 @@ async def upload_multiple_images(
798
  quality=95
799
  )
800
  except Exception as e:
801
- print(f"Image preprocessing failed: {str(e)}")
802
  processed_content = content
803
  processed_filename = file.filename
804
  mime_type = 'image/png'
@@ -900,7 +902,7 @@ async def upload_multiple_images(
900
  db.commit()
901
 
902
  except Exception as e:
903
- print(f"VLM error: {e}")
904
  # Create fallback caption
905
  fallback_text = f"Analysis of {len(image_bytes_list)} images"
906
  caption = crud.create_caption(
@@ -988,52 +990,52 @@ async def copy_image_for_contribution(
988
  @router.get("/{image_id}/file")
989
  async def get_image_file(image_id: str, db: Session = Depends(get_db)):
990
  """Serve the actual image file"""
991
- print(f"🔍 Serving image file for image_id: {image_id}")
992
 
993
  img = crud.get_image(db, image_id)
994
  if not img:
995
- print(f"Image not found: {image_id}")
996
  raise HTTPException(404, "Image not found")
997
 
998
- print(f"Found image: {img.image_id}, file_key: {img.file_key}")
999
 
1000
  try:
1001
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
1002
- print(f"�� Using S3 storage - serving file content directly")
1003
  try:
1004
  response = storage.s3.get_object(Bucket=settings.S3_BUCKET, Key=img.file_key)
1005
  content = response['Body'].read()
1006
- print(f"Read {len(content)} bytes from S3")
1007
  except Exception as e:
1008
- print(f"Failed to get S3 object: {e}")
1009
  raise HTTPException(500, f"Failed to retrieve image from storage: {e}")
1010
  else:
1011
- print(f"🔍 Using local storage")
1012
  import os
1013
  file_path = os.path.join(settings.STORAGE_DIR, img.file_key)
1014
- print(f"📁 Reading from: {file_path}")
1015
- print(f"📁 File exists: {os.path.exists(file_path)}")
1016
 
1017
  if not os.path.exists(file_path):
1018
- print(f"File not found at: {file_path}")
1019
  raise FileNotFoundError(f"Image file not found: {file_path}")
1020
 
1021
  with open(file_path, 'rb') as f:
1022
  content = f.read()
1023
 
1024
- print(f"Read {len(content)} bytes from file")
1025
 
1026
  import mimetypes
1027
  content_type, _ = mimetypes.guess_type(img.file_key)
1028
  if not content_type:
1029
  content_type = 'application/octet-stream'
1030
 
1031
- print(f"Serving image with content-type: {content_type}, size: {len(content)} bytes")
1032
  return Response(content=content, media_type=content_type)
1033
  except Exception as e:
1034
- print(f"Error serving image: {e}")
1035
  import traceback
1036
- print(f"🔍 Full traceback: {traceback.format_exc()}")
1037
  raise HTTPException(500, f"Failed to serve image file: {e}")
1038
 
1039
  @router.put("/{image_id}")
@@ -1043,15 +1045,15 @@ def update_image_metadata(
1043
  db: Session = Depends(get_db)
1044
  ):
1045
  """Update image metadata (source, type, epsg, image_type, countries)"""
1046
- print(f"DEBUG: Updating metadata for image {image_id}")
1047
- print(f"DEBUG: Metadata received: {metadata}")
1048
 
1049
  img = crud.get_image(db, image_id)
1050
  if not img:
1051
- print(f"DEBUG: Image {image_id} not found in database")
1052
  raise HTTPException(404, "Image not found")
1053
 
1054
- print(f"DEBUG: Found image {image_id} in database")
1055
 
1056
  try:
1057
  if metadata.source is not None:
@@ -1103,17 +1105,17 @@ def update_image_metadata(
1103
  img.std_v_m = metadata.std_v_m
1104
 
1105
  if metadata.countries is not None:
1106
- print(f"DEBUG: Updating countries to: {metadata.countries}")
1107
  img.countries.clear()
1108
  for country_code in metadata.countries:
1109
  country = crud.get_country(db, country_code)
1110
  if country:
1111
  img.countries.append(country)
1112
- print(f"DEBUG: Added country: {country_code}")
1113
 
1114
  db.commit()
1115
  db.refresh(img)
1116
- print(f"DEBUG: Metadata update successful for image {image_id}")
1117
 
1118
  try:
1119
  url = storage.get_object_url(img.file_key)
@@ -1125,7 +1127,7 @@ def update_image_metadata(
1125
 
1126
  except Exception as e:
1127
  db.rollback()
1128
- print(f"DEBUG: Metadata update failed for image {image_id}: {str(e)}")
1129
  raise HTTPException(500, f"Failed to update image metadata: {str(e)}")
1130
 
1131
  @router.delete("/{image_id}")
 
1
  from fastapi import APIRouter, UploadFile, Form, Depends, HTTPException, Response
2
  from pydantic import BaseModel
3
  import io
4
+ import logging
5
  from sqlalchemy.orm import Session
6
  from .. import crud, schemas, storage, database
7
  from ..config import settings
 
14
  import datetime
15
 
16
  router = APIRouter()
17
+ logger = logging.getLogger(__name__)
18
 
19
  class CopyImageRequest(BaseModel):
20
  source_image_id: str
 
51
  try:
52
  countries_list = [{"c_code": c.c_code, "label": c.label, "r_code": c.r_code} for c in img.countries]
53
  except Exception as e:
54
+ logger.warning(f"Error processing countries for image {img.image_id}: {e}")
55
  countries_list = []
56
 
57
  captions_list = []
 
76
  } for c in img.captions
77
  ]
78
  except Exception as e:
79
+ logger.warning(f"Error processing captions for image {img.image_id}: {e}")
80
  captions_list = []
81
 
82
  # Get starred status and other caption fields from first caption for backward compatibility
 
118
  try:
119
  thumbnail_url = storage.get_object_url(img.thumbnail_key)
120
  except Exception as e:
121
+ logger.warning(f"Error generating thumbnail URL for image {img.image_id}: {e}")
122
 
123
  if hasattr(img, 'detail_key') and img.detail_key:
124
  try:
125
  detail_url = storage.get_object_url(img.detail_key)
126
  except Exception as e:
127
+ logger.warning(f"Error generating detail URL for image {img.image_id}: {e}")
128
 
129
  img_dict = {
130
  "image_id": img.image_id,
 
587
  # Log preprocessing info
588
  preprocessing_info = None
589
  if processed_filename != file.filename:
590
+ logger.info(f"Image preprocessed: {file.filename} -> {processed_filename} ({mime_type})")
591
  preprocessing_info = {
592
  "original_filename": file.filename,
593
  "processed_filename": processed_filename,
 
605
  }
606
 
607
  except Exception as e:
608
+ logger.error(f"Image preprocessing failed: {str(e)}")
609
  # Fall back to original content if preprocessing fails
610
  processed_content = content
611
  processed_filename = file.filename
 
638
 
639
  if thumbnail_result:
640
  thumbnail_key, thumbnail_sha256 = thumbnail_result
641
+ logger.info(f"Thumbnail generated and uploaded: key={thumbnail_key}, sha256={thumbnail_sha256}")
642
 
643
  if detail_result:
644
  detail_key, detail_sha256 = detail_result
645
+ logger.info(f"Detail version generated and uploaded: key={detail_key}, sha256={detail_sha256}")
646
 
647
  except Exception as e:
648
+ logger.error(f"Image resolution processing failed: {str(e)}")
649
  # Continue without processed versions if generation fails
650
 
651
  try:
 
712
  )
713
 
714
  except Exception as e:
715
+ logger.error(f"VLM caption generation failed: {str(e)}")
716
  # Continue without caption if VLM fails
717
 
718
  img_dict = convert_image_to_dict(img, url)
 
800
  quality=95
801
  )
802
  except Exception as e:
803
+ logger.debug(f"Image preprocessing failed: {str(e)}")
804
  processed_content = content
805
  processed_filename = file.filename
806
  mime_type = 'image/png'
 
902
  db.commit()
903
 
904
  except Exception as e:
905
+ logger.debug(f"VLM error: {e}")
906
  # Create fallback caption
907
  fallback_text = f"Analysis of {len(image_bytes_list)} images"
908
  caption = crud.create_caption(
 
990
  @router.get("/{image_id}/file")
991
  async def get_image_file(image_id: str, db: Session = Depends(get_db)):
992
  """Serve the actual image file"""
993
+ logger.debug(f"Serving image file for image_id: {image_id}")
994
 
995
  img = crud.get_image(db, image_id)
996
  if not img:
997
+ logger.warning(f"Image not found: {image_id}")
998
  raise HTTPException(404, "Image not found")
999
 
1000
+ logger.debug(f"Found image: {img.image_id}, file_key: {img.file_key}")
1001
 
1002
  try:
1003
  if hasattr(storage, 's3') and settings.STORAGE_PROVIDER != "local":
1004
+ logger.debug(f"Using S3 storage - serving file content directly")
1005
  try:
1006
  response = storage.s3.get_object(Bucket=settings.S3_BUCKET, Key=img.file_key)
1007
  content = response['Body'].read()
1008
+ logger.debug(f"Read {len(content)} bytes from S3")
1009
  except Exception as e:
1010
+ logger.error(f"Failed to get S3 object: {e}")
1011
  raise HTTPException(500, f"Failed to retrieve image from storage: {e}")
1012
  else:
1013
+ logger.debug(f"Using local storage")
1014
  import os
1015
  file_path = os.path.join(settings.STORAGE_DIR, img.file_key)
1016
+ logger.debug(f"Reading from: {file_path}")
1017
+ logger.debug(f"File exists: {os.path.exists(file_path)}")
1018
 
1019
  if not os.path.exists(file_path):
1020
+ logger.error(f"File not found at: {file_path}")
1021
  raise FileNotFoundError(f"Image file not found: {file_path}")
1022
 
1023
  with open(file_path, 'rb') as f:
1024
  content = f.read()
1025
 
1026
+ logger.debug(f"Read {len(content)} bytes from file")
1027
 
1028
  import mimetypes
1029
  content_type, _ = mimetypes.guess_type(img.file_key)
1030
  if not content_type:
1031
  content_type = 'application/octet-stream'
1032
 
1033
+ logger.debug(f"Serving image with content-type: {content_type}, size: {len(content)} bytes")
1034
  return Response(content=content, media_type=content_type)
1035
  except Exception as e:
1036
+ logger.error(f"Error serving image: {e}")
1037
  import traceback
1038
+ logger.debug(f"Full traceback: {traceback.format_exc()}")
1039
  raise HTTPException(500, f"Failed to serve image file: {e}")
1040
 
1041
  @router.put("/{image_id}")
 
1045
  db: Session = Depends(get_db)
1046
  ):
1047
  """Update image metadata (source, type, epsg, image_type, countries)"""
1048
+ logger.debug(f"DEBUG: Updating metadata for image {image_id}")
1049
+ logger.debug(f"DEBUG: Metadata received: {metadata}")
1050
 
1051
  img = crud.get_image(db, image_id)
1052
  if not img:
1053
+ logger.debug(f"DEBUG: Image {image_id} not found in database")
1054
  raise HTTPException(404, "Image not found")
1055
 
1056
+ logger.debug(f"DEBUG: Found image {image_id} in database")
1057
 
1058
  try:
1059
  if metadata.source is not None:
 
1105
  img.std_v_m = metadata.std_v_m
1106
 
1107
  if metadata.countries is not None:
1108
+ logger.debug(f"DEBUG: Updating countries to: {metadata.countries}")
1109
  img.countries.clear()
1110
  for country_code in metadata.countries:
1111
  country = crud.get_country(db, country_code)
1112
  if country:
1113
  img.countries.append(country)
1114
+ logger.debug(f"DEBUG: Added country: {country_code}")
1115
 
1116
  db.commit()
1117
  db.refresh(img)
1118
+ logger.debug(f"DEBUG: Metadata update successful for image {image_id}")
1119
 
1120
  try:
1121
  url = storage.get_object_url(img.file_key)
 
1127
 
1128
  except Exception as e:
1129
  db.rollback()
1130
+ logger.debug(f"DEBUG: Metadata update failed for image {image_id}: {str(e)}")
1131
  raise HTTPException(500, f"Failed to update image metadata: {str(e)}")
1132
 
1133
  @router.delete("/{image_id}")
py_backend/app/services/gpt4v_service.py CHANGED
@@ -4,29 +4,32 @@ import openai
4
  import base64
5
  import asyncio
6
  import json
 
 
 
7
 
8
  class GPT4VService(VLMService):
9
  """GPT-4 Vision service implementation"""
10
 
11
  def __init__(self, api_key: str):
12
  super().__init__("GPT4V", ModelType.GPT4V)
13
- print(f"[DEBUG] GPT4V Service - Initializing with API key: {api_key[:10]}...{api_key[-4:] if api_key else 'None'}")
14
  self.client = openai.OpenAI(api_key=api_key)
15
  self.model_name = "GPT-4O"
16
- print(f"[DEBUG] GPT4V Service - Initialized successfully")
17
 
18
  async def generate_caption(self, image_bytes: bytes, prompt: str, metadata_instructions: str = "") -> Dict[str, Any]:
19
  """Generate caption using GPT-4 Vision"""
20
  try:
21
  # Debug logging
22
  api_key_preview = self.client.api_key[:10] + "..." + self.client.api_key[-4:] if self.client.api_key else "None"
23
- print(f"[DEBUG] GPT4V Service - API Key preview: {api_key_preview}")
24
- print(f"[DEBUG] GPT4V Service - Image size: {len(image_bytes)} bytes")
25
- print(f"[DEBUG] GPT4V Service - Prompt length: {len(prompt)} chars")
26
 
27
  image_base64 = base64.b64encode(image_bytes).decode('utf-8')
28
 
29
- print(f"[DEBUG] GPT4V Service - Making API call to OpenAI...")
30
  response = await asyncio.to_thread(
31
  self.client.chat.completions.create,
32
  model="gpt-4o",
@@ -46,7 +49,7 @@ class GPT4VService(VLMService):
46
  ],
47
  max_tokens=800
48
  )
49
- print(f"[DEBUG] GPT4V Service - API call successful!")
50
 
51
  content = response.choices[0].message.content
52
 
@@ -69,7 +72,7 @@ class GPT4VService(VLMService):
69
  try:
70
  metadata = json.loads(json_str)
71
  except json.JSONDecodeError as e:
72
- print(f"JSON parse error: {e}")
73
  else:
74
  import re
75
  json_match = re.search(r'\{[^{}]*"metadata"[^{}]*\{[^{}]*\}', content)
@@ -101,11 +104,11 @@ class GPT4VService(VLMService):
101
  }
102
 
103
  except Exception as e:
104
- print(f"[DEBUG] GPT4V Service - API call failed: {str(e)}")
105
- print(f"[DEBUG] GPT4V Service - Error type: {type(e).__name__}")
106
  if hasattr(e, 'response'):
107
- print(f"[DEBUG] GPT4V Service - Response status: {getattr(e.response, 'status_code', 'Unknown')}")
108
- print(f"[DEBUG] GPT4V Service - Response body: {getattr(e.response, 'text', 'Unknown')}")
109
  raise Exception(f"GPT-4 Vision API error: {str(e)}")
110
 
111
  async def generate_multi_image_caption(self, image_bytes_list: List[bytes], prompt: str, metadata_instructions: str = "") -> Dict[str, Any]:
@@ -157,7 +160,7 @@ class GPT4VService(VLMService):
157
  try:
158
  metadata = json.loads(json_str)
159
  except json.JSONDecodeError as e:
160
- print(f"JSON parse error: {e}")
161
  else:
162
  import re
163
  json_match = re.search(r'\{[^{}]*"metadata"[^{}]*\{[^{}]*\}', content)
@@ -190,9 +193,9 @@ class GPT4VService(VLMService):
190
  }
191
 
192
  except Exception as e:
193
- print(f"[DEBUG] GPT4V Service - API call failed: {str(e)}")
194
- print(f"[DEBUG] GPT4V Service - Error type: {type(e).__name__}")
195
  if hasattr(e, 'response'):
196
- print(f"[DEBUG] GPT4V Service - Response status: {getattr(e.response, 'status_code', 'Unknown')}")
197
- print(f"[DEBUG] GPT4V Service - Response body: {getattr(e.response, 'text', 'Unknown')}")
198
  raise Exception(f"GPT-4 Vision API error: {str(e)}")
 
4
  import base64
5
  import asyncio
6
  import json
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
 
11
  class GPT4VService(VLMService):
12
  """GPT-4 Vision service implementation"""
13
 
14
  def __init__(self, api_key: str):
15
  super().__init__("GPT4V", ModelType.GPT4V)
16
+ logger.debug(f"Initializing with API key: {api_key[:10]}...{api_key[-4:] if api_key else 'None'}")
17
  self.client = openai.OpenAI(api_key=api_key)
18
  self.model_name = "GPT-4O"
19
+ logger.info("Initialized successfully")
20
 
21
  async def generate_caption(self, image_bytes: bytes, prompt: str, metadata_instructions: str = "") -> Dict[str, Any]:
22
  """Generate caption using GPT-4 Vision"""
23
  try:
24
  # Debug logging
25
  api_key_preview = self.client.api_key[:10] + "..." + self.client.api_key[-4:] if self.client.api_key else "None"
26
+ logger.debug(f"API Key preview: {api_key_preview}")
27
+ logger.debug(f"Image size: {len(image_bytes)} bytes")
28
+ logger.debug(f"Prompt length: {len(prompt)} chars")
29
 
30
  image_base64 = base64.b64encode(image_bytes).decode('utf-8')
31
 
32
+ logger.debug(f"Making API call to OpenAI...")
33
  response = await asyncio.to_thread(
34
  self.client.chat.completions.create,
35
  model="gpt-4o",
 
49
  ],
50
  max_tokens=800
51
  )
52
+ logger.info("API call successful!")
53
 
54
  content = response.choices[0].message.content
55
 
 
72
  try:
73
  metadata = json.loads(json_str)
74
  except json.JSONDecodeError as e:
75
+ logger.error(f"JSON parse error: {e}")
76
  else:
77
  import re
78
  json_match = re.search(r'\{[^{}]*"metadata"[^{}]*\{[^{}]*\}', content)
 
104
  }
105
 
106
  except Exception as e:
107
+ logger.error(f"API call failed: {str(e)}")
108
+ logger.error(f"Error type: {type(e).__name__}")
109
  if hasattr(e, 'response'):
110
+ logger.error(f"Response status: {getattr(e.response, 'status_code', 'Unknown')}")
111
+ logger.error(f"Response body: {getattr(e.response, 'text', 'Unknown')}")
112
  raise Exception(f"GPT-4 Vision API error: {str(e)}")
113
 
114
  async def generate_multi_image_caption(self, image_bytes_list: List[bytes], prompt: str, metadata_instructions: str = "") -> Dict[str, Any]:
 
160
  try:
161
  metadata = json.loads(json_str)
162
  except json.JSONDecodeError as e:
163
+ logger.error(f"JSON parse error: {e}")
164
  else:
165
  import re
166
  json_match = re.search(r'\{[^{}]*"metadata"[^{}]*\{[^{}]*\}', content)
 
193
  }
194
 
195
  except Exception as e:
196
+ logger.error(f"API call failed: {str(e)}")
197
+ logger.error(f"Error type: {type(e).__name__}")
198
  if hasattr(e, 'response'):
199
+ logger.error(f"Response status: {getattr(e.response, 'status_code', 'Unknown')}")
200
+ logger.error(f"Response body: {getattr(e.response, 'text', 'Unknown')}")
201
  raise Exception(f"GPT-4 Vision API error: {str(e)}")
py_backend/app/services/image_preprocessor.py CHANGED
@@ -1,8 +1,11 @@
1
  import io
2
  import mimetypes
 
3
  from typing import Tuple, Optional, BinaryIO
4
  from PIL import Image, ImageOps
5
 
 
 
6
  # Import PyMuPDF for PDF processing
7
  try:
8
  import fitz # PyMuPDF for PDF processing
@@ -146,7 +149,7 @@ class ImagePreprocessor:
146
  ImagePreprocessor.PDF_ZOOM_FACTOR = zoom_factor
147
  ImagePreprocessor.PDF_COMPRESS_LEVEL = compress_level
148
 
149
- print(f"PDF processing configured: zoom={ImagePreprocessor.PDF_ZOOM_FACTOR}, "
150
  f"compression={ImagePreprocessor.PDF_COMPRESS_LEVEL}, mode={quality_mode}")
151
 
152
  @staticmethod
@@ -161,7 +164,7 @@ class ImagePreprocessor:
161
  raise ValueError("PDF processing is not available. PyMuPDF is not installed.")
162
 
163
  try:
164
- print(f"Starting PDF processing for {filename}...")
165
 
166
  # Open PDF with PyMuPDF
167
  pdf_document = fitz.open(stream=file_content, filetype="pdf")
@@ -169,7 +172,7 @@ class ImagePreprocessor:
169
  if len(pdf_document) == 0:
170
  raise ValueError("PDF has no pages")
171
 
172
- print(f"PDF opened successfully, processing page 1 of {len(pdf_document)}...")
173
 
174
  # Get first page
175
  page = pdf_document[0]
@@ -178,7 +181,7 @@ class ImagePreprocessor:
178
  zoom = ImagePreprocessor.PDF_ZOOM_FACTOR
179
  mat = fitz.Matrix(zoom, zoom)
180
 
181
- print(f"Rendering page at {zoom}x zoom...")
182
 
183
  # Render page to image with optimized settings
184
  pix = page.get_pixmap(
@@ -187,7 +190,7 @@ class ImagePreprocessor:
187
  colorspace="rgb" # Force RGB colorspace
188
  )
189
 
190
- print(f"Page rendered, size: {pix.width}x{pix.height}")
191
 
192
  # Convert to PIL Image - use more efficient method
193
  img_data = pix.tobytes("png")
@@ -197,7 +200,7 @@ class ImagePreprocessor:
197
  if img.mode in ('RGBA', 'LA', 'P'):
198
  img = img.convert('RGB')
199
 
200
- print(f"Image converted to RGB, mode: {img.mode}")
201
 
202
  # Save to bytes with optimization
203
  output_buffer = io.BytesIO()
@@ -218,12 +221,12 @@ class ImagePreprocessor:
218
  base_name = os.path.splitext(filename)[0]
219
  new_filename = f"{base_name}{new_extension}"
220
 
221
- print(f"PDF processing completed: {filename} -> {new_filename}")
222
 
223
  return output_buffer.getvalue(), new_filename, new_mime_type
224
 
225
  except Exception as e:
226
- print(f"PDF processing failed: {str(e)}")
227
  raise ValueError(f"Failed to process PDF: {str(e)}")
228
 
229
  @staticmethod
 
1
  import io
2
  import mimetypes
3
+ import logging
4
  from typing import Tuple, Optional, BinaryIO
5
  from PIL import Image, ImageOps
6
 
7
+ logger = logging.getLogger(__name__)
8
+
9
  # Import PyMuPDF for PDF processing
10
  try:
11
  import fitz # PyMuPDF for PDF processing
 
149
  ImagePreprocessor.PDF_ZOOM_FACTOR = zoom_factor
150
  ImagePreprocessor.PDF_COMPRESS_LEVEL = compress_level
151
 
152
+ logger.debug(f"PDF processing configured: zoom={ImagePreprocessor.PDF_ZOOM_FACTOR}, "
153
  f"compression={ImagePreprocessor.PDF_COMPRESS_LEVEL}, mode={quality_mode}")
154
 
155
  @staticmethod
 
164
  raise ValueError("PDF processing is not available. PyMuPDF is not installed.")
165
 
166
  try:
167
+ logger.info(f"Starting PDF processing for {filename}...")
168
 
169
  # Open PDF with PyMuPDF
170
  pdf_document = fitz.open(stream=file_content, filetype="pdf")
 
172
  if len(pdf_document) == 0:
173
  raise ValueError("PDF has no pages")
174
 
175
+ logger.debug(f"PDF opened successfully, processing page 1 of {len(pdf_document)}...")
176
 
177
  # Get first page
178
  page = pdf_document[0]
 
181
  zoom = ImagePreprocessor.PDF_ZOOM_FACTOR
182
  mat = fitz.Matrix(zoom, zoom)
183
 
184
+ logger.debug(f"Rendering page at {zoom}x zoom...")
185
 
186
  # Render page to image with optimized settings
187
  pix = page.get_pixmap(
 
190
  colorspace="rgb" # Force RGB colorspace
191
  )
192
 
193
+ logger.debug(f"Page rendered, size: {pix.width}x{pix.height}")
194
 
195
  # Convert to PIL Image - use more efficient method
196
  img_data = pix.tobytes("png")
 
200
  if img.mode in ('RGBA', 'LA', 'P'):
201
  img = img.convert('RGB')
202
 
203
+ logger.debug(f"Image converted to RGB, mode: {img.mode}")
204
 
205
  # Save to bytes with optimization
206
  output_buffer = io.BytesIO()
 
221
  base_name = os.path.splitext(filename)[0]
222
  new_filename = f"{base_name}{new_extension}"
223
 
224
+ logger.info(f"PDF processing completed: {filename} -> {new_filename}")
225
 
226
  return output_buffer.getvalue(), new_filename, new_mime_type
227
 
228
  except Exception as e:
229
+ logger.error(f"PDF processing failed: {str(e)}")
230
  raise ValueError(f"Failed to process PDF: {str(e)}")
231
 
232
  @staticmethod
py_backend/app/services/thumbnail_service.py CHANGED
@@ -1,9 +1,12 @@
1
  import io
 
2
  from PIL import Image, ImageOps
3
  from typing import Tuple, Optional
4
  import base64
5
  from ..storage import upload_fileobj, get_object_url
6
 
 
 
7
  class ImageProcessingService:
8
  """Service for creating and managing multiple image resolutions"""
9
 
@@ -76,7 +79,7 @@ class ImageProcessingService:
76
  return resized_bytes, resized_filename
77
 
78
  except Exception as e:
79
- print(f"Error creating resized image: {str(e)}")
80
  # Return original content as fallback
81
  return image_content, filename
82
 
@@ -153,7 +156,7 @@ class ImageProcessingService:
153
  return resized_bytes, resized_filename
154
 
155
  except Exception as e:
156
- print(f"Error creating resized image: {str(e)}")
157
  # Return original content as fallback
158
  return image_content, filename
159
 
@@ -227,7 +230,7 @@ class ImageProcessingService:
227
  return resized_key, resized_sha256
228
 
229
  except Exception as e:
230
- print(f"Error uploading resized image: {str(e)}")
231
  return None
232
 
233
  @staticmethod
@@ -278,7 +281,7 @@ class ImageProcessingService:
278
  return resized_key, resized_sha256
279
 
280
  except Exception as e:
281
- print(f"Error uploading resized image: {str(e)}")
282
  return None
283
 
284
  @staticmethod
@@ -318,7 +321,7 @@ class ImageProcessingService:
318
  return uploaded_key, sha256
319
 
320
  except Exception as e:
321
- print(f"Error uploading image bytes: {str(e)}")
322
  return None
323
 
324
  @staticmethod
@@ -357,7 +360,7 @@ class ImageProcessingService:
357
  )
358
 
359
  except Exception as e:
360
- print(f"Error processing image resolutions: {str(e)}")
361
 
362
  return thumbnail_result, detail_result
363
 
 
1
  import io
2
+ import logging
3
  from PIL import Image, ImageOps
4
  from typing import Tuple, Optional
5
  import base64
6
  from ..storage import upload_fileobj, get_object_url
7
 
8
+ logger = logging.getLogger(__name__)
9
+
10
  class ImageProcessingService:
11
  """Service for creating and managing multiple image resolutions"""
12
 
 
79
  return resized_bytes, resized_filename
80
 
81
  except Exception as e:
82
+ logger.error(f"Error creating resized image: {str(e)}")
83
  # Return original content as fallback
84
  return image_content, filename
85
 
 
156
  return resized_bytes, resized_filename
157
 
158
  except Exception as e:
159
+ logger.error(f"Error creating resized image: {str(e)}")
160
  # Return original content as fallback
161
  return image_content, filename
162
 
 
230
  return resized_key, resized_sha256
231
 
232
  except Exception as e:
233
+ logger.error(f"Error uploading resized image: {str(e)}")
234
  return None
235
 
236
  @staticmethod
 
281
  return resized_key, resized_sha256
282
 
283
  except Exception as e:
284
+ logger.error(f"Error uploading resized image: {str(e)}")
285
  return None
286
 
287
  @staticmethod
 
321
  return uploaded_key, sha256
322
 
323
  except Exception as e:
324
+ logger.error(f"Error uploading image bytes: {str(e)}")
325
  return None
326
 
327
  @staticmethod
 
360
  )
361
 
362
  except Exception as e:
363
+ logger.error(f"Error processing image resolutions: {str(e)}")
364
 
365
  return thumbnail_result, detail_result
366