from ast import mod import gradio as gr from huggingface_hub import hf_hub_download from PIL import Image import numpy as np import tensorflow as tf # Load the pre-trained model from Hugging Face model_name = "iamsuman/household-waste-EfficientNetV2M" model_path = hf_hub_download(repo_id=model_name, filename="EfficientNetV2M.h5") model = tf.keras.models.load_model(model_path) # Define a manual mapping of label indices to human-readable labels index_to_label = { 0: "battery", 1: "biological", 2: "cardboard", 3: "clothes", 4: "glass", 5: "metal", 6: "paper", 7: "plastic", 8: "shoes", 9: "trash" } def classify_image(image): image = Image.fromarray(image.astype('uint8'), 'RGB') image = image.resize((400, 400)) # Convert the PIL Image to a format compatible with the model image = np.array(image) image = np.expand_dims(image, axis=0) # Add batch dimension image = tf.keras.applications.efficientnet.preprocess_input(image) # Make prediction preds = model.predict(image) # Retrieve the highest probability class label index predicted_class_idx = np.argmax(preds, axis=-1)[0] # Convert the index to the model's class label label = index_to_label.get(predicted_class_idx, "Unknown Label") return label.capitalize() # Create Gradio interface iface = gr.Interface(fn=classify_image, inputs=gr.Image(), # Accepts image of any size outputs=gr.Label(), title="Household Waste Classification with EfficientNetV2M", description="Upload an image of household waste, and the model will classify it.") # Launch the app iface.launch()