File size: 3,353 Bytes
30c1de8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8aa4a9a
30c1de8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff10e6a
30c1de8
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import pandas as pd
import numpy as np
# import onnxruntime as ort
import os
from tqdm import tqdm
import timm
import torchvision.transforms as T
from PIL import Image
import torch

def is_gpu_available():
    """Check if the python package `onnxruntime-gpu` is installed."""
    return torch.cuda.is_available()


class PytorchWorker:
    """Run inference using ONNX runtime."""

    def __init__(self,
                 model_path: str,
                 model_name: str,
                 number_of_categories: int = 1784):

        def _load_model(model_name, model_path):

            print("Setting up Pytorch Model")
            self.device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            print(f"Using devide: {self.device}")

            model = timm.create_model(model_name,
                                      num_classes=number_of_categories,
                                      pretrained=False)

            # if not torch.cuda.is_available():
            #     model_ckpt = torch.load(model_path, map_location=torch.device("cpu"))
            # else:
            #     model_ckpt = torch.load(model_path)

            model_ckpt = torch.load(model_path, map_location=self.device)
            model.load_state_dict(model_ckpt)

            return model.to(self.device).eval()

        self.model = _load_model(model_name, model_path)

        self.transforms = T.Compose([
            T.Resize((336, 336)),
            T.ToTensor(),
            T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

    def predict_image(self, image: np.ndarray) -> list():
        """Run inference using ONNX runtime.

        :param image: Input image as numpy array.
        :return: A list with logits and confidences.
        """

        logits = self.model(
            self.transforms(image).unsqueeze(0).to(self.device))

        return logits.tolist()


def make_submission(test_metadata, model_path, model_name, output_csv_path="./submission.csv", images_root_path="/tmp/data/private_testset"):
    """Make submission with given """

    model = PytorchWorker(model_path, model_name)

    predictions = []

    for _, row in tqdm(test_metadata.iterrows(), total=len(test_metadata)):
        image_path = os.path.join(images_root_path, row.image_path)

        test_image = Image.open(image_path).convert("RGB")

        logits = model.predict_image(test_image)

        predictions.append(np.argmax(logits))


    test_metadata["class_id"] = predictions

    user_pred_df = test_metadata.drop_duplicates("observation_id", keep="first")
    user_pred_df[["observation_id", "class_id"]].to_csv(output_csv_path, index=None)

if __name__ == "__main__":

    import zipfile

    with zipfile.ZipFile("/tmp/data/private_testset.zip", 'r') as zip_ref:
        zip_ref.extractall("/tmp/data")

    MODEL_PATH = "pytorch_model.bin"
    MODEL_NAME = "hf-hub:timm/eva02_large_patch14_clip_336.merged2b_ft_inat21"

    metadata_file_path = "./SnakeCLEF2024_TestMetadata.csv"
    test_metadata = pd.read_csv(metadata_file_path)

    # metadata_file_path = "./unit_test.csv"
    # test_metadata = pd.read_csv(metadata_file_path)
    # images_root_path = "SnakeCLEF2023-large_size"

    make_submission(
        test_metadata=test_metadata,
        model_path=MODEL_PATH,
        model_name=MODEL_NAME,
    )