import gradio as gr import tensorflow as tf import numpy as np corn_model = tf.keras.models.load_model("./corn.h5") wheat_model = tf.keras.models.load_model("./wheat.h5") rice_model = tf.keras.models.load_model("./rice.h5") potato_model = tf.keras.models.load_model("./potato.h5") corn_classes = ['Corn___Common_Rust', 'Corn___Gray_Leaf_Spot', 'Corn___Healthy', 'Corn___Northern_Leaf_Blight'] wheat_classes = ['Wheat___Brown_Rust', 'Wheat___Healthy', 'Wheat___Yellow_Rust'] rice_classes = ['Rice___Brown_Spot', 'Rice___Healthy', 'Rice___Leaf_Blast', 'Rice___Neck_Blast'] potato_classes = ['Potato___Early_Blight', 'Potato___Healthy', 'Potato___Late_Blight'] IMAGE_SIZE = 224 def predict_crop(crop, img): if img is None: return "No image uploaded.", 0 img = tf.image.resize(img, (IMAGE_SIZE, IMAGE_SIZE)) img = img / 255.0 img = np.expand_dims(img, axis=0) if crop == "Corn": predictions = corn_model.predict(img) classes = corn_classes elif crop == "Wheat": predictions = wheat_model.predict(img) classes = wheat_classes elif crop == "Rice": predictions = rice_model.predict(img) classes = rice_classes elif crop == "Potato": predictions = potato_model.predict(img) classes = potato_classes else: return "Invalid crop selected.", 0 predicted_class = classes[np.argmax(predictions[0])] confidence = round(100 * (np.max(predictions[0])), 2) return f"Prediction: {predicted_class}", confidence # Gradio Interface with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown("# 🌾 Crop Disease Classifier") gr.Markdown("Select your crop, upload a leaf image, and get a disease prediction with confidence!") with gr.Row(): crop_choice = gr.Dropdown( ["Corn", "Wheat", "Rice", "Potato"], label="Select your Crop", info="Choose the type of crop you want to diagnose." ) with gr.Row(): image_input = gr.Image(type="numpy", label="Upload Leaf Image") with gr.Row(): predict_button = gr.Button("Predict Disease") with gr.Row(): output_label = gr.Label(label="Prediction Result") output_confidence = gr.Slider(0, 100, label="Confidence Level", interactive=False) predict_button.click( predict_crop, inputs=[crop_choice, image_input], outputs=[output_label, output_confidence] ) demo.launch()