File size: 3,305 Bytes
d8180db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

import os
os.system("pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --index-url https://download.pytorch.org/whl/cpu")
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
os.system('pip install opencv-python-headless==4.8.1.78')

import gradio as gr
import cv2
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import ColorMode
from detectron2.data import MetadataCatalog
import numpy as np

# Path to the trained model weights
model_path = './model/keypoint_rcnn_X_101_32x8d_FPN_3x.pth'

number_of_keypoints = 15

# Setup the configuration for the model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml"))
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = number_of_keypoints
cfg.TEST.KEYPOINT_OKS_SIGMAS = np.ones((number_of_keypoints, 1), dtype=float).tolist()

# Load the trained model weights
cfg.MODEL.WEIGHTS = model_path
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6  # set a custom testing threshold
predictor = DefaultPredictor(cfg)

# Set metadata for visualization
MetadataCatalog.get("spot").set(thing_classes=["wing"])
metadata = MetadataCatalog.get("spot")


def markin(image_path):
    im = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1],
                   metadata=metadata,
                   # scale=0.9,
                   instance_mode=ColorMode.SEGMENTATION
                   )
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    return out.get_image()


# Setup the Gradio interface
demo = gr.Interface(markin,
                    gr.Image(type="filepath", sources=['upload']),
                    "image",
                    examples=[
                        os.path.join(os.path.dirname(__file__), "images/drosophila-wing-1.jpg"),
                        os.path.join(os.path.dirname(__file__), "images/drosophila-wing-2.jpg"),
                        os.path.join(os.path.dirname(__file__), "images/drosophila-wing-3.jpg"),
                        os.path.join(os.path.dirname(__file__), "images/drosophila-wing-4.jpg"),
                        os.path.join(os.path.dirname(__file__), "images/drosophila-wing-5.jpg")
                    ],
                    title='Drosophila wing landmarkin',
                    description='Drosophila is a genus of small flies, commonly called fruit flies. These flies are widely used in scientific research, particularly in genetics and evolutionary biology, because they are easy to care for, reproduce rapidly, and have a short generation time. Measuring the wings of Drosophila is important in scientific research. Wing size and shape can vary among different Drosophila species and strains, and these differences can be used to study the genetic basis of wing development, evolution and other studies. <br> <a href="https://datamarkin.com/models/automated-measurement-of-drosophila-wings" class="navbar-item "> More about project </a>')

if __name__ == "__main__":
    demo.launch()