qassim227 commited on
Commit
4cd3bf2
1 Parent(s): 73f6436

End of training

Browse files
README.md CHANGED
@@ -5,7 +5,6 @@ tags:
5
  model-index:
6
  - name: Auto-pharmacy-V3
7
  results: []
8
- pipeline_tag: image-to-text
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -45,7 +44,7 @@ The following hyperparameters were used during training:
45
 
46
  ### Framework versions
47
 
48
- - Transformers 4.41.2
49
  - Pytorch 2.1.2
50
- - Datasets 2.19.2
51
- - Tokenizers 0.19.1
 
5
  model-index:
6
  - name: Auto-pharmacy-V3
7
  results: []
 
8
  ---
9
 
10
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
44
 
45
  ### Framework versions
46
 
47
+ - Transformers 4.39.3
48
  - Pytorch 2.1.2
49
+ - Datasets 2.18.0
50
+ - Tokenizers 0.15.2
config.json CHANGED
@@ -165,13 +165,13 @@
165
  "eos_token_id": 2,
166
  "is_encoder_decoder": true,
167
  "length_penalty": 2.0,
168
- "max_length": 7,
169
  "model_type": "vision-encoder-decoder",
170
  "no_repeat_ngram_size": 3,
171
  "num_beams": 8,
172
  "pad_token_id": 1,
173
  "tie_word_embeddings": false,
174
  "torch_dtype": "float32",
175
- "transformers_version": "4.41.2",
176
  "vocab_size": 50265
177
  }
 
165
  "eos_token_id": 2,
166
  "is_encoder_decoder": true,
167
  "length_penalty": 2.0,
168
+ "max_length": 11,
169
  "model_type": "vision-encoder-decoder",
170
  "no_repeat_ngram_size": 3,
171
  "num_beams": 8,
172
  "pad_token_id": 1,
173
  "tie_word_embeddings": false,
174
  "torch_dtype": "float32",
175
+ "transformers_version": "4.39.3",
176
  "vocab_size": 50265
177
  }
generation_config.json CHANGED
@@ -5,10 +5,10 @@
5
  "early_stopping": true,
6
  "eos_token_id": 2,
7
  "length_penalty": 2.0,
8
- "max_length": 7,
9
  "no_repeat_ngram_size": 3,
10
  "num_beams": 8,
11
  "pad_token_id": 1,
12
- "transformers_version": "4.41.2",
13
  "use_cache": false
14
  }
 
5
  "early_stopping": true,
6
  "eos_token_id": 2,
7
  "length_penalty": 2.0,
8
+ "max_length": 11,
9
  "no_repeat_ngram_size": 3,
10
  "num_beams": 8,
11
  "pad_token_id": 1,
12
+ "transformers_version": "4.39.3",
13
  "use_cache": false
14
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3256be4d091c98450c2e5f37574f62b00c226fd2670c21461e180054553a42d6
3
  size 218412460
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece03b603445e305dc02cbf31d49322e04130efd53e5f3408ad9f5fe4f5490fb
3
  size 218412460
tokenizer.json CHANGED
@@ -3,7 +3,7 @@
3
  "truncation": null,
4
  "padding": {
5
  "strategy": {
6
- "Fixed": 7
7
  },
8
  "direction": "Right",
9
  "pad_to_multiple_of": null,
@@ -92,7 +92,6 @@
92
  "end_of_word_suffix": "",
93
  "fuse_unk": false,
94
  "byte_fallback": false,
95
- "ignore_merges": false,
96
  "vocab": {
97
  "<s>": 0,
98
  "<pad>": 1,
 
3
  "truncation": null,
4
  "padding": {
5
  "strategy": {
6
+ "Fixed": 11
7
  },
8
  "direction": "Right",
9
  "pad_to_multiple_of": null,
 
92
  "end_of_word_suffix": "",
93
  "fuse_unk": false,
94
  "byte_fallback": false,
 
95
  "vocab": {
96
  "<s>": 0,
97
  "<pad>": 1,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cc3a8d8f906ee7170a244d30c0a24e8d8f5b8ce50765457031366cbc3cf1349
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5caf2b190452b6c68b0dcaf52f458feb351cbb53865fc00934591fcbb4463bf
3
+ size 4920