File size: 1,511 Bytes
49bceed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))

import numpy as np
from PIL import Image

from models.base_model import BaseModelImageSimilarity
from utils import configs

from .backbone_model import CLIPModel, TorchModel


class ImageSimilarity(BaseModelImageSimilarity):
    def __init__(

        self,

        name_model: str,

        freeze_model: bool,

        pretrained_model: bool,

        support_set_method: str,

    ):
        super().__init__(name_model, freeze_model, pretrained_model, support_set_method)
        self.init_model()

    def init_model(self):
        if self.name_model == "clip":
            self.model = CLIPModel(
                configs.CLIP_NAME_MODEL, self.freeze_model, self.pretrained_model
            )
        else:
            self.model = TorchModel(
                self.name_model, self.freeze_model, self.pretrained_model
            )
        self.model.to(self.device)
        self.model.eval()


if __name__ == "__main__":
    model = ImageSimilarity("mobilenetv3_large_100", True, True, "5_shot")
    image1 = np.array(
        Image.open(
            "../../assets/example_images/gon/306e5d35-b301-4299-8022-0c89dc0b7690.png"
        ).convert("RGB")
    )
    image2 = np.array(
        Image.open(
            "../../assets/example_images/gon/306e5d35-b301-4299-8022-0c89dc0b7690.png"
        ).convert("RGB")
    )
    print(model.get_similarity(image1, image2))