import gradio as gr import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.keras.applications.mobilenet_v2 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array,load_img import numpy as np from PIL import Image import os # Load your model and tokenizer labels = { 'class': ['amphibia', 'aves', 'invertebrates', 'lacertilia', 'mammalia', 'serpentes', 'testudines'], 'serpentes': ["Butler's Gartersnake", "Dekay's Brownsnake", 'Eastern Gartersnake', 'Eastern Hog-nosed snake', 'Eastern Massasauga', 'Eastern Milksnake', 'Eastern Racer Snake', 'Eastern Ribbonsnake', 'Gray Ratsnake', "Kirtland's Snake", 'Northern Watersnake', 'Plains Gartersnake', 'Red-bellied Snake', 'Smooth Greensnake'], 'mammalia': ['American Mink', 'Brown Rat', 'Eastern Chipmunk', 'Eastern Cottontail', 'Long-tailed Weasel', 'Masked Shrew', 'Meadow Jumping Mouse', 'Meadow Vole', 'N. Short-tailed Shrew', 'Raccoon', 'Star-nosed mole', 'Striped Skunk', 'Virginia Opossum', 'White-footed Mouse', 'Woodchuck', 'Woodland Jumping Mouse'], 'aves': ['Common Yellowthroat', 'Gray Catbird', 'Indigo Bunting', 'Northern House Wren', 'Song Sparrow', 'Sora'], 'amphibia': ['American Bullfrog', 'American Toad', 'Green Frog', 'Northern Leopard Frog'] } def preprocess_image(image): img = image.resize((224, 224)) # MobileNet requires 224x224 input size img_array = img_to_array(img) img_array = np.expand_dims(img_array, axis=0) # Add batch dimension img_array = preprocess_input(img_array) # Preprocess the image return img_array # Function to perform inference def predict(img_path): img = preprocess_image(img_path) model = load_model(r"inceptionv3_classs.h5") preds = model.predict(img) # Get the model predictions decoded_preds = np.argmax(preds) print(decoded_preds, labels['class'][decoded_preds]) return labels['class'][decoded_preds] # results = {} # image_array = load_and_preprocess_image(image) # print(image_array) # # Predict class level # class_preds = hierarchical_models['class'].predict(image_array) # print(class_preds) # class_idx = np.argmax(class_preds) # print(class_idx) # class_label = labels['class'][class_idx] # class_confidence = class_preds[0][class_idx] # class_level = f"{class_label} ({class_confidence*100:.2f}%)" # # Predict species level # hierarchical_models[class_label] = load_model(f"inceptionv3_{class_label}.h5") # species_preds = hierarchical_models[class_label].predict(image_array) # species_idx = np.argmax(species_preds) # species_label = labels[class_label][species_idx] # species_confidence = species_preds[0][species_idx] # species_level = f"{species_label} ({species_confidence*100:.2f}%)" # return class_level,species_level # Sample images (you can add paths to images here) # sample_images = [ # ("Sample Amphibia", "path/to/amphibia.jpg"), # ("Sample Aves", "path/to/aves.jpg"), # ("Sample Mammalia", "path/to/mammalia.jpg"), # # Add more sample images as needed # ] # Create Gradio interface iface = gr.Interface( fn=predict, inputs=gr.Image(type="pil"), outputs=[gr.Label(label="class_label")], # examples=sample_images, title="Image Classification", description="Upload an image to classify it into species and class level.", ) # Launch the interface iface.launch()