File size: 2,866 Bytes
0ef6060
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
from __future__ import annotations

import os

import numpy as np
import torch
import torch.nn as nn

from mmdet.apis import inference_detector, init_detector_from_hf_hub

MODEL_DICT = {"faster_rcnn": {"repo_id": "blesot/Faster-R-CNN-Object-detection"}, "mask_rcnn": {"repo_id": "blesot/Mask-RCNN"}}


class Model:

    def __init__(self, model_name: str, device: str | torch.device):
        self.device = torch.device(device)
        self._load_all_models_once()
        self.model_name = model_name
        self.model = self._load_model(model_name)

    def _load_all_models_once(self) -> None:
        for name in MODEL_DICT.keys():
            self._load_model(name)

    def _load_model(self, name: str) -> nn.Module:
        dic = MODEL_DICT[name]
        return init_detector_from_hf_hub(dic['repo_id'], device=self.device)

    def set_model(self, name: str) -> None:
        if name == self.model_name:
            return
        self.model_name = name
        self.model = self._load_model(name)

    def detect_and_visualize(
        self, image: np.ndarray, score_threshold: float
    ) -> tuple[list[np.ndarray] | tuple[list[np.ndarray],
                                        list[list[np.ndarray]]]
               | dict[str, np.ndarray], np.ndarray]:
        out = self.detect(image)
        vis = self.visualize_detection_results(image, out, score_threshold)
        return out, vis

    def detect(
        self, image: np.ndarray
    ) -> list[np.ndarray] | tuple[
            list[np.ndarray], list[list[np.ndarray]]] | dict[str, np.ndarray]:
        image = image[:, :, ::-1]  # RGB -> BGR
        out = inference_detector(self.model, image)
        return out

    def visualize_detection_results(
            self,
            image: np.ndarray,
            detection_results: list[np.ndarray]
        | tuple[list[np.ndarray], list[list[np.ndarray]]]
        | dict[str, np.ndarray],
            score_threshold: float = 0.3) -> np.ndarray:
        image = image[:, :, ::-1]  # RGB -> BGR
        vis = self.model.show_result(image,
                                     detection_results,
                                     score_thr=score_threshold,
                                     bbox_color=None,
                                     text_color=(200, 200, 200),
                                     mask_color=None)
        return vis[:, :, ::-1]  # BGR -> RGB


class AppModel(Model):
    def run(
        self, model_name: str, image: np.ndarray, score_threshold: float
    ) -> tuple[list[np.ndarray] | tuple[list[np.ndarray],
                                        list[list[np.ndarray]]]
               | dict[str, np.ndarray], np.ndarray]:
        self.set_model(model_name)
        return self.detect_and_visualize(image, score_threshold)
    
    def model_list(self) -> list[str]:
        return list(MODEL_DICT.keys())