NimaBoscarino commited on
Commit
483f4b4
1 Parent(s): bcb3a29
Files changed (3) hide show
  1. README.md +46 -0
  2. config.json +8 -4
  3. tf_model.h5 +3 -0
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_keras_callback
4
+ model-index:
5
+ - name: clip-vit-large-patch14-336
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
10
+ probably proofread and complete it, then remove this comment. -->
11
+
12
+ # clip-vit-large-patch14-336
13
+
14
+ This model was trained from scratch on an unknown dataset.
15
+ It achieves the following results on the evaluation set:
16
+
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - optimizer: None
36
+ - training_precision: float32
37
+
38
+ ### Training results
39
+
40
+
41
+
42
+ ### Framework versions
43
+
44
+ - Transformers 4.21.3
45
+ - TensorFlow 2.8.2
46
+ - Tokenizers 0.12.1
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "CLIPModel"
4
  ],
@@ -59,7 +60,7 @@
59
  "pad_token_id": 1,
60
  "prefix": null,
61
  "problem_type": null,
62
- "projection_dim" : 768,
63
  "pruned_heads": {},
64
  "remove_invalid_values": false,
65
  "repetition_penalty": 1.0,
@@ -68,6 +69,7 @@
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
 
71
  "tie_encoder_decoder": false,
72
  "tie_word_embeddings": true,
73
  "tokenizer_class": null,
@@ -75,7 +77,7 @@
75
  "top_p": 1.0,
76
  "torch_dtype": null,
77
  "torchscript": false,
78
- "transformers_version": "4.18.0.dev0",
79
  "typical_p": 1.0,
80
  "use_bfloat16": false,
81
  "vocab_size": 49408
@@ -134,6 +136,7 @@
134
  "num_attention_heads": 16,
135
  "num_beam_groups": 1,
136
  "num_beams": 1,
 
137
  "num_hidden_layers": 24,
138
  "num_return_sequences": 1,
139
  "output_attentions": false,
@@ -143,7 +146,7 @@
143
  "patch_size": 14,
144
  "prefix": null,
145
  "problem_type": null,
146
- "projection_dim" : 768,
147
  "pruned_heads": {},
148
  "remove_invalid_values": false,
149
  "repetition_penalty": 1.0,
@@ -152,6 +155,7 @@
152
  "sep_token_id": null,
153
  "task_specific_params": null,
154
  "temperature": 1.0,
 
155
  "tie_encoder_decoder": false,
156
  "tie_word_embeddings": true,
157
  "tokenizer_class": null,
@@ -159,7 +163,7 @@
159
  "top_p": 1.0,
160
  "torch_dtype": null,
161
  "torchscript": false,
162
- "transformers_version": "4.18.0.dev0",
163
  "typical_p": 1.0,
164
  "use_bfloat16": false
165
  },
1
  {
2
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
3
  "architectures": [
4
  "CLIPModel"
5
  ],
60
  "pad_token_id": 1,
61
  "prefix": null,
62
  "problem_type": null,
63
+ "projection_dim": 768,
64
  "pruned_heads": {},
65
  "remove_invalid_values": false,
66
  "repetition_penalty": 1.0,
69
  "sep_token_id": null,
70
  "task_specific_params": null,
71
  "temperature": 1.0,
72
+ "tf_legacy_loss": false,
73
  "tie_encoder_decoder": false,
74
  "tie_word_embeddings": true,
75
  "tokenizer_class": null,
77
  "top_p": 1.0,
78
  "torch_dtype": null,
79
  "torchscript": false,
80
+ "transformers_version": "4.21.3",
81
  "typical_p": 1.0,
82
  "use_bfloat16": false,
83
  "vocab_size": 49408
136
  "num_attention_heads": 16,
137
  "num_beam_groups": 1,
138
  "num_beams": 1,
139
+ "num_channels": 3,
140
  "num_hidden_layers": 24,
141
  "num_return_sequences": 1,
142
  "output_attentions": false,
146
  "patch_size": 14,
147
  "prefix": null,
148
  "problem_type": null,
149
+ "projection_dim": 768,
150
  "pruned_heads": {},
151
  "remove_invalid_values": false,
152
  "repetition_penalty": 1.0,
155
  "sep_token_id": null,
156
  "task_specific_params": null,
157
  "temperature": 1.0,
158
+ "tf_legacy_loss": false,
159
  "tie_encoder_decoder": false,
160
  "tie_word_embeddings": true,
161
  "tokenizer_class": null,
163
  "top_p": 1.0,
164
  "torch_dtype": null,
165
  "torchscript": false,
166
+ "transformers_version": "4.21.3",
167
  "typical_p": 1.0,
168
  "use_bfloat16": false
169
  },
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d12828ca8f0f3c92194f277b7d893da7f2fb7824d0b99dedb305eb48eb46bb7f
3
+ size 1712454232