Spaces:
Sleeping
Sleeping
| """ | |
| Production-ready Flask backend for deployment | |
| Optimized for Hugging Face Spaces / Railway / Render | |
| """ | |
| from flask import Flask, request, jsonify | |
| import io | |
| from PIL import Image | |
| from flask_cors import CORS | |
| import logging | |
| import os | |
| app = Flask(__name__) | |
| CORS(app, resources={r"/predict_pet": {"origins": "*"}}) | |
| # Logging configuration | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Model global variables | |
| model = None | |
| image_processor = None | |
| def load_model(): | |
| """Load model on first request""" | |
| global model, image_processor | |
| if model is not None: | |
| return | |
| try: | |
| import torch | |
| from transformers import AutoImageProcessor, AutoModelForImageClassification | |
| logger.info("Loading ConvNextV2-large-DogBreed model...") | |
| model_name = "Pavarissy/ConvNextV2-large-DogBreed" | |
| # Detect device | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| logger.info(f"Using device: {device}") | |
| # Load model | |
| image_processor = AutoImageProcessor.from_pretrained(model_name) | |
| model = AutoModelForImageClassification.from_pretrained(model_name) | |
| model = model.to(device) | |
| model.eval() | |
| logger.info(f"✓ Model loaded successfully on {device}") | |
| except Exception as e: | |
| logger.error(f"Failed to load model: {e}") | |
| raise | |
| def health_check(): | |
| """Health check endpoint""" | |
| return jsonify({ | |
| 'status': 'healthy', | |
| 'service': 'Dog Breed Prediction API', | |
| 'model': 'ConvNextV2-large-DogBreed', | |
| 'accuracy': '91.39%', | |
| 'version': '1.0.0' | |
| }) | |
| def predict_pet(): | |
| """Predict dog breed from uploaded image""" | |
| try: | |
| # Load model if not loaded | |
| load_model() | |
| # Validate request | |
| if 'image' not in request.files: | |
| return jsonify({'error': 'No image file provided'}), 400 | |
| file = request.files['image'] | |
| # Read and validate image | |
| image_bytes = file.read() | |
| pil_image = Image.open(io.BytesIO(image_bytes)) | |
| if pil_image.mode != 'RGB': | |
| pil_image = pil_image.convert('RGB') | |
| # Make prediction | |
| import torch | |
| inputs = image_processor(pil_image, return_tensors="pt") | |
| device = next(model.parameters()).device | |
| inputs = {k: v.to(device) for k, v in inputs.items()} | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| probs = torch.nn.functional.softmax(logits, dim=-1)[0].cpu() | |
| top_5_probs, top_5_indices = torch.topk(probs, 5) | |
| # Format results | |
| top_5_breeds = [] | |
| for prob, idx in zip(top_5_probs, top_5_indices): | |
| top_5_breeds.append({ | |
| 'breed': model.config.id2label[idx.item()], | |
| 'confidence': float(prob.item()) | |
| }) | |
| logger.info(f"Prediction: {top_5_breeds[0]['breed']} ({top_5_breeds[0]['confidence']:.2%})") | |
| return jsonify({ | |
| 'breed': top_5_breeds[0]['breed'], | |
| 'confidence': top_5_breeds[0]['confidence'], | |
| 'top_5': top_5_breeds, | |
| 'model': 'ConvNextV2-large-DogBreed', | |
| 'accuracy': '91.39%' | |
| }) | |
| except Exception as e: | |
| logger.error(f"Error: {str(e)}", exc_info=True) | |
| return jsonify({'error': str(e)}), 500 | |
| if __name__ == '__main__': | |
| port = int(os.environ.get('PORT', 7860)) | |
| app.run(host='0.0.0.0', port=port, debug=False) | |