jacoballessio commited on
Commit
1d70194
1 Parent(s): 6a22967

Upload 5 files

Browse files
config.json CHANGED
@@ -1,67 +1,5 @@
1
  {
2
- "return_dict": true,
3
- "output_hidden_states": false,
4
- "output_attentions": false,
5
- "torchscript": false,
6
- "torch_dtype": "float32",
7
- "use_bfloat16": false,
8
- "tf_legacy_loss": false,
9
- "pruned_heads": {},
10
- "tie_word_embeddings": true,
11
- "chunk_size_feed_forward": 0,
12
- "is_encoder_decoder": false,
13
- "is_decoder": false,
14
- "cross_attention_hidden_size": null,
15
- "add_cross_attention": false,
16
- "tie_encoder_decoder": false,
17
- "max_length": 20,
18
- "min_length": 0,
19
- "do_sample": false,
20
- "early_stopping": false,
21
- "num_beams": 1,
22
- "num_beam_groups": 1,
23
- "diversity_penalty": 0.0,
24
- "temperature": 1.0,
25
- "top_k": 50,
26
- "top_p": 1.0,
27
- "typical_p": 1.0,
28
- "repetition_penalty": 1.0,
29
- "length_penalty": 1.0,
30
- "no_repeat_ngram_size": 0,
31
- "encoder_no_repeat_ngram_size": 0,
32
- "bad_words_ids": null,
33
- "num_return_sequences": 1,
34
- "output_scores": false,
35
- "return_dict_in_generate": false,
36
- "forced_bos_token_id": null,
37
- "forced_eos_token_id": null,
38
- "remove_invalid_values": false,
39
- "exponential_decay_length_penalty": null,
40
- "suppress_tokens": null,
41
- "begin_suppress_tokens": null,
42
- "architectures": [
43
- "CustomEfficientNet"
44
- ],
45
- "finetuning_task": null,
46
- "id2label": {
47
- "0": "LABEL_0",
48
- "1": "LABEL_1"
49
- },
50
- "label2id": {
51
- "LABEL_0": 0,
52
- "LABEL_1": 1
53
- },
54
- "tokenizer_class": null,
55
- "prefix": null,
56
- "bos_token_id": null,
57
- "pad_token_id": null,
58
- "eos_token_id": null,
59
- "sep_token_id": null,
60
- "decoder_start_token_id": null,
61
- "task_specific_params": null,
62
- "problem_type": null,
63
- "_name_or_path": "",
64
- "transformers_version": "4.41.2",
65
  "num_classes": 2,
66
- "model_type": "custom_efficientnet"
67
- }
 
1
  {
2
+ "model_type": "custom_efficientnet",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "num_classes": 2,
4
+ "transformers_version": "4.41.2"
5
+ }
custom_efficientnet.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel, PretrainedConfig
2
+ from torchvision import models
3
+ import torch.nn as nn
4
+
5
+ class CustomEfficientNetConfig(PretrainedConfig):
6
+ model_type = "custom_efficientnet"
7
+ def __init__(self, num_classes=2, **kwargs):
8
+ super().__init__(**kwargs)
9
+ self.num_classes = num_classes
10
+
11
+ class CustomEfficientNetForImageClassification(PreTrainedModel):
12
+ config_class = CustomEfficientNetConfig
13
+ base_model_prefix = "efficientnet"
14
+
15
+ def __init__(self, config):
16
+ super().__init__(config)
17
+ self.num_labels = config.num_classes
18
+ self.efficientnet = models.efficientnet_b0(num_classes=config.num_classes)
19
+
20
+ def forward(self, pixel_values, labels=None):
21
+ outputs = self.efficientnet(pixel_values)
22
+ loss = None
23
+ if labels is not None:
24
+ loss_fct = nn.CrossEntropyLoss()
25
+ loss = loss_fct(outputs, labels)
26
+ return {"loss": loss, "logits": outputs}
27
+
28
+ @classmethod
29
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
30
+ config = CustomEfficientNetConfig.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
31
+ model = cls(config)
32
+ state_dict = torch.load(pretrained_model_name_or_path + "/pytorch_model.bin")
33
+ model.load_state_dict(state_dict)
34
+ return model
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b3aa5cb431970b4a34fddda4069a0be17f15e648ba9020a4b514957742a237f
3
+ size 256569792
preprocessor_config.json CHANGED
@@ -1,47 +1,15 @@
1
  {
2
- "_valid_processor_keys": [
3
- "images",
4
- "do_resize",
5
- "size",
6
- "resample",
7
- "do_center_crop",
8
- "crop_size",
9
- "do_rescale",
10
- "rescale_factor",
11
- "rescale_offset",
12
- "do_normalize",
13
- "image_mean",
14
- "image_std",
15
- "include_top",
16
- "return_tensors",
17
- "data_format",
18
- "input_data_format"
19
- ],
20
- "crop_size": {
21
- "height": 289,
22
- "width": 289
23
- },
24
- "do_center_crop": false,
25
- "do_normalize": true,
26
- "do_rescale": true,
27
  "do_resize": true,
 
 
28
  "image_mean": [
29
  0.485,
30
  0.456,
31
  0.406
32
  ],
33
- "image_processor_type": "EfficientNetImageProcessor",
34
  "image_std": [
35
  0.229,
36
  0.224,
37
  0.225
38
- ],
39
- "include_top": true,
40
- "resample": 0,
41
- "rescale_factor": 0.00392156862745098,
42
- "rescale_offset": false,
43
- "size": {
44
- "height": 600,
45
- "width": 600
46
- }
47
- }
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "do_resize": true,
3
+ "size": 224,
4
+ "do_normalize": true,
5
  "image_mean": [
6
  0.485,
7
  0.456,
8
  0.406
9
  ],
 
10
  "image_std": [
11
  0.229,
12
  0.224,
13
  0.225
14
+ ]
15
+ }
 
 
 
 
 
 
 
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:146cbd6be07e82ae04954ac25ed6ec4dd0af7acdbc59f35ad50d34afb1b33b3f
3
+ size 16344682