File size: 1,357 Bytes
1d70194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from transformers import PreTrainedModel, PretrainedConfig
from torchvision import models
import torch.nn as nn

class CustomEfficientNetConfig(PretrainedConfig):
    model_type = "custom_efficientnet"
    def __init__(self, num_classes=2, **kwargs):
        super().__init__(**kwargs)
        self.num_classes = num_classes

class CustomEfficientNetForImageClassification(PreTrainedModel):
    config_class = CustomEfficientNetConfig
    base_model_prefix = "efficientnet"

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_classes
        self.efficientnet = models.efficientnet_b0(num_classes=config.num_classes)
        
    def forward(self, pixel_values, labels=None):
        outputs = self.efficientnet(pixel_values)
        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(outputs, labels)
        return {"loss": loss, "logits": outputs}

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
        config = CustomEfficientNetConfig.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
        model = cls(config)
        state_dict = torch.load(pretrained_model_name_or_path + "/pytorch_model.bin")
        model.load_state_dict(state_dict)
        return model