import pandas as pd import numpy as np from torch import nn import os from tqdm import tqdm import timm import torchvision.transforms as T from PIL import Image import torch def is_gpu_available(): """Check if the python package `onnxruntime-gpu` is installed.""" return torch.cuda.is_available() class CustomModel(nn.Module): def __init__(self, base_model_name, num_classes1, num_classes2): super(CustomModel, self).__init__() self.base_model = timm.create_model(base_model_name, pretrained=False) in_features = self.base_model.get_classifier().in_features self.base_model.reset_classifier(0) # Remove the original classification layer self.fc1 = nn.Linear(in_features, num_classes1) # Binary classification output self.fc2 = nn.Linear(in_features, num_classes2) # Categorical classification output def forward(self, x): x = self.base_model(x) out1 = torch.sigmoid(self.fc1(x)) # Binary output out2 = self.fc2(x) # Categorical output return out2 class PytorchWorker: """Run inference using ONNX runtime.""" def __init__(self, model_path: str, model_name: str, number_of_categories: int = 1784): def _load_model(model_name, model_path): print("Setting up Pytorch Model") self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"Using devide: {self.device}") model = CustomModel(model_name, 1, number_of_categories) # if not torch.cuda.is_available(): # model_ckpt = torch.load(model_path, map_location=torch.device("cpu")) # else: # model_ckpt = torch.load(model_path) model_ckpt = torch.load(model_path, map_location=self.device) model.load_state_dict(model_ckpt) return model.to(self.device).eval() self.model = _load_model(model_name, model_path) self.transforms = T.Compose([T.Resize((256, 256)), T.ToTensor(), T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) def predict_image(self, image: np.ndarray) -> list(): """Run inference using ONNX runtime. :param image: Input image as numpy array. :return: A list with logits and confidences. """ logits = self.model(self.transforms(image).unsqueeze(0).to(self.device)) return logits.tolist() def make_submission(test_metadata, model_path, model_name, output_csv_path="./submission.csv", images_root_path="/tmp/data/private_testset"): """Make submission with given """ model = PytorchWorker(model_path, model_name) predictions = [] for _, row in tqdm(test_metadata.iterrows(), total=len(test_metadata)): image_path = os.path.join(images_root_path, row.filename) test_image = Image.open(image_path).convert("RGB") logits = model.predict_image(test_image) predictions.append(np.argmax(logits)) test_metadata["class_id"] = predictions user_pred_df = test_metadata.drop_duplicates("observation_id", keep="first") user_pred_df[["observation_id", "class_id"]].to_csv(output_csv_path, index=None) if __name__ == "__main__": import zipfile with zipfile.ZipFile("/tmp/data/private_testset.zip", 'r') as zip_ref: zip_ref.extractall("/tmp/data") MODEL_PATH = "best_accuracy_BCE_CE.pth" MODEL_NAME = "swinv2_tiny_window16_256.ms_in1k" metadata_file_path = "./SnakeCLEF2024-TestMetadata.csv" test_metadata = pd.read_csv(metadata_file_path) make_submission( test_metadata=test_metadata, model_path=MODEL_PATH, model_name=MODEL_NAME, # images_root_path='/home/zeleznyt/mnt/data-ntis/projects/korpusy_cv/SnakeCLEF2024/val/SnakeCLEF2023-medium_size' )