File size: 3,237 Bytes
9f8fcff
dea7ada
 
 
 
9f8fcff
dea7ada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f8fcff
dea7ada
 
 
 
 
 
 
 
d341b67
dea7ada
 
d341b67
dea7ada
9f8fcff
dea7ada
9f8fcff
dea7ada
9f8fcff
dea7ada
7abc3b5
dea7ada
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import torch
from sahi.prediction import ObjectPrediction
from sahi.utils.cv import visualize_object_predictions, read_image
from ultralyticsplus import YOLO, render_result

def yolov8_inference(
    image: gr.inputs.Image = None,
    model_path: gr.inputs.Dropdown = None,
    image_size: gr.inputs.Slider = 640,
    conf_threshold: gr.inputs.Slider = 0.25,
    iou_threshold: gr.inputs.Slider = 0.45,
):
    """
    YOLOv8 inference function
    Args:
        image: Input image
        model_path: Path to the model
        image_size: Image size
        conf_threshold: Confidence threshold
        iou_threshold: IOU threshold
    Returns:
        Rendered image
    """
    model = YOLO(model_path)
    model.overrides['conf'] = conf_threshold
    model.overrides['iou']= iou_threshold
    model.overrides['agnostic_nms'] = False  # NMS class-agnostic
    model.overrides['max_det'] = 1000 
    image = read_image(image)
    results = model.predict(image)
    render = render_result(model=model, image=image, result=results[0])
    
    return render
        

inputs = [
    gr.inputs.Image(type="filepath", label="Input Image"),
    gr.inputs.Dropdown(["foduucom/plant-leaf-detection-and-classification"], 
                       default="foduucom/plant-leaf-detection-and-classification", label="Model"),
    gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
    gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
]

outputs = gr.outputs.Image(type="filepath", label="Output Image")
title = "Leafify: Advanced Plant Leaf Detection and Classification"

description = """Unleash the botanical detective in you with Leafify, powered by the incredible Foduu AI model! πŸŒ±πŸ”¬ Detecting over 45 plant varieties (and counting!), Leafify turns leaf identification into an exciting adventure.

Curious about our secret sauce? The Foduu AI model is like a plant whisperer – it can spot and name plants like a pro. For the juicy deets, swing by the Foduu model page – it's where the AI magic happens!

Oh, but the excitement doesn't stop there. We're plant enthusiasts, just like you! If you're craving even more plant pals in the AI family, or you're thinking, 'Hey, my plants deserve to be famous too!' – drop us a line at info@foduu.com. We're all ears, leaves, and stems!

And, hey, if you heart the universe as much as we do, show some love! Just hit that little heart button – because every time you do, a virtual garden blooms. 🌼🧑 Let's get leafy!"""
examples = [['test-images/plant1.jpg', 'foduucom/plant-leaf-detection-and-classification', 640, 0.25, 0.45], ['test-images/plant2.jpeg', 'foduucom/plant-leaf-detection-and-classification', 640, 0.25, 0.45], ['test-images/plant3.webp', 'foduucom/plant-leaf-detection-and-classification', 1280, 0.25, 0.45]]
demo_app = gr.Interface(
    fn=yolov8_inference,
    inputs=inputs,
    outputs=outputs,
    title=title,
    description=description,
    examples=examples,
    cache_examples=True,
    theme='huggingface',
)
demo_app.launch(debug=True, enable_queue=True)