Spaces:
Build error
Build error
| import gradio as gr | |
| import tensorflow as tf | |
| from tensorflow import keras | |
| import numpy as np | |
| from PIL import Image | |
| # --- Configuration --- | |
| MODEL_PATH = "cats-vs-dogs-finetuned.keras" | |
| IMAGE_SIZE = (150, 150) # Adjust this to match the input size your model expects! | |
| CLASS_LABELS = ['Cat', 'Dog'] | |
| # --- Load the Model --- | |
| # We load the Keras model. Hugging Face Spaces will automatically find this file | |
| # if you upload it to your repository. | |
| try: | |
| model = keras.models.load_model(MODEL_PATH) | |
| print(f"Model loaded successfully from {MODEL_PATH}") | |
| except Exception as e: | |
| # If the model fails to load (e.g., during initial setup before it's uploaded), | |
| # we use a placeholder function. This helps the app start. | |
| print(f"Error loading model: {e}. Using a placeholder function.") | |
| model = None | |
| # --- Prediction Function --- | |
| def predict_image(input_img_pil): | |
| """ | |
| Predicts the class (Cat or Dog) given a PIL Image object. | |
| Args: | |
| input_img_pil: A PIL Image object received from Gradio's Image input. | |
| Returns: | |
| A dictionary of class labels and their probabilities (for Gradio's Label output). | |
| """ | |
| if model is None: | |
| # Placeholder behavior if model loading failed | |
| return {"Error": 1.0} | |
| # 1. Preprocessing: Resize and convert to NumPy array | |
| img_resized = input_img_pil.resize(IMAGE_SIZE) | |
| print("image resized") | |
| img_array = keras.preprocessing.image.img_to_array(img_resized) | |
| print(" image converted to array") | |
| # 2. Rescaling and Batch dimension: | |
| # Keras models usually expect input shapes like (Batch_Size, Height, Width, Channels) | |
| # and often expect pixel values to be normalized (e.g., 0-1 range). | |
| # Please adjust the normalization based on how your model was trained! | |
| img_array = img_array / 255.0 # Common normalization step | |
| img_array = np.expand_dims(img_array, axis=0) # Add batch dimension | |
| # 3. Prediction | |
| predictions = model.predict(img_array)[0] # Get the single prediction result | |
| # 4. Format the output for Gradio's Label component | |
| # The output is expected to be a dictionary: {'label': probability, ...} | |
| # Assuming predictions is a 2-element array: [prob_cat, prob_dog] | |
| output_dict = { | |
| CLASS_LABELS[0]: float(predictions[0]), | |
| CLASS_LABELS[1]: float(predictions[1]) | |
| } | |
| return output_dict | |
| # --- Gradio Interface Setup --- | |
| # Define the input component (Image) and output component (Label) | |
| image_input = gr.Image(type="pil", label="Upload a Cat or Dog Image") | |
| label_output = gr.Label(num_top_classes=2, label="Prediction") | |
| # Example images for users to try (place these in your Space if you use them) | |
| examples = [ | |
| # To use these, you would need to upload files named 'example_cat.jpg' and 'example_dog.jpg' | |
| # 'example_cat.jpg', | |
| # 'example_dog.jpg' | |
| ] | |
| # Create the Gradio interface | |
| demo = gr.Interface( | |
| fn=predict_image, | |
| inputs=image_input, | |
| outputs=label_output, | |
| title="Keras Cat vs Dog Classifier", | |
| description="Upload an image of a cat or dog to see the model's prediction. The model is loaded from cat-vs-dog.keras.", | |
| theme=gr.themes.Soft(), | |
| # Optional: Add examples if you upload them | |
| # examples=examples | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() | |