MohamedAhmedAE commited on
Commit
57b875c
1 Parent(s): e6b701b

Training in progress, step 30

Browse files
Files changed (3) hide show
  1. config.json +10 -12
  2. model.safetensors +2 -2
  3. training_args.bin +1 -1
config.json CHANGED
@@ -8,11 +8,9 @@
8
  "model_type": "vision-text-dual-encoder",
9
  "projection_dim": 512,
10
  "text_config": {
11
- "_name_or_path": "roberta-base",
12
  "add_cross_attention": false,
13
- "architectures": [
14
- "RobertaForMaskedLM"
15
- ],
16
  "attention_probs_dropout_prob": 0.1,
17
  "bad_words_ids": null,
18
  "begin_suppress_tokens": null,
@@ -90,7 +88,7 @@
90
  "torch_dtype": "float32",
91
  "transformers_version": "4.41.2",
92
  "vision_config": {
93
- "_name_or_path": "openai/clip-vit-base-patch32",
94
  "add_cross_attention": false,
95
  "architectures": null,
96
  "attention_dropout": 0.0,
@@ -111,15 +109,15 @@
111
  "forced_bos_token_id": null,
112
  "forced_eos_token_id": null,
113
  "hidden_act": "quick_gelu",
114
- "hidden_size": 768,
115
  "id2label": {
116
  "0": "LABEL_0",
117
  "1": "LABEL_1"
118
  },
119
- "image_size": 224,
120
  "initializer_factor": 1.0,
121
  "initializer_range": 0.02,
122
- "intermediate_size": 3072,
123
  "is_decoder": false,
124
  "is_encoder_decoder": false,
125
  "label2id": {
@@ -132,20 +130,20 @@
132
  "min_length": 0,
133
  "model_type": "clip_vision_model",
134
  "no_repeat_ngram_size": 0,
135
- "num_attention_heads": 12,
136
  "num_beam_groups": 1,
137
  "num_beams": 1,
138
  "num_channels": 3,
139
- "num_hidden_layers": 12,
140
  "num_return_sequences": 1,
141
  "output_attentions": false,
142
  "output_hidden_states": false,
143
  "output_scores": false,
144
  "pad_token_id": null,
145
- "patch_size": 32,
146
  "prefix": null,
147
  "problem_type": null,
148
- "projection_dim": 512,
149
  "pruned_heads": {},
150
  "remove_invalid_values": false,
151
  "repetition_penalty": 1.0,
 
8
  "model_type": "vision-text-dual-encoder",
9
  "projection_dim": 512,
10
  "text_config": {
11
+ "_name_or_path": "allenai/biomed_roberta_base",
12
  "add_cross_attention": false,
13
+ "architectures": null,
 
 
14
  "attention_probs_dropout_prob": 0.1,
15
  "bad_words_ids": null,
16
  "begin_suppress_tokens": null,
 
88
  "torch_dtype": "float32",
89
  "transformers_version": "4.41.2",
90
  "vision_config": {
91
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
92
  "add_cross_attention": false,
93
  "architectures": null,
94
  "attention_dropout": 0.0,
 
109
  "forced_bos_token_id": null,
110
  "forced_eos_token_id": null,
111
  "hidden_act": "quick_gelu",
112
+ "hidden_size": 1024,
113
  "id2label": {
114
  "0": "LABEL_0",
115
  "1": "LABEL_1"
116
  },
117
+ "image_size": 336,
118
  "initializer_factor": 1.0,
119
  "initializer_range": 0.02,
120
+ "intermediate_size": 4096,
121
  "is_decoder": false,
122
  "is_encoder_decoder": false,
123
  "label2id": {
 
130
  "min_length": 0,
131
  "model_type": "clip_vision_model",
132
  "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 16,
134
  "num_beam_groups": 1,
135
  "num_beams": 1,
136
  "num_channels": 3,
137
+ "num_hidden_layers": 24,
138
  "num_return_sequences": 1,
139
  "output_attentions": false,
140
  "output_hidden_states": false,
141
  "output_scores": false,
142
  "pad_token_id": null,
143
+ "patch_size": 14,
144
  "prefix": null,
145
  "problem_type": null,
146
+ "projection_dim": 768,
147
  "pruned_heads": {},
148
  "remove_invalid_values": false,
149
  "repetition_penalty": 1.0,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa1cfee1a3f3a86f86c4ccb5f265543a870a49cead43e0806e7e141372f3f4ed
3
- size 851603588
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec27fd6b313dfe5163a2352de293480869155443fc2fd2e76aac55f2a7dc2b15
3
+ size 1716360444
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea249cd364526eab937ad6487a7f81bc8fe5346a1416e3119d05e564ae6f62c3
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d91b1496178ac406352964cea26801514f7d8531618943d63c80852c83fa37
3
  size 5112