jxie commited on
Commit
4c90f36
1 Parent(s): c5d10ff

Upload SMAForSSL

Browse files
Files changed (2) hide show
  1. config.json +106 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": null,
3
+ "architectures": [
4
+ "SMAForSSL"
5
+ ],
6
+ "attention_dropout_prob": 0.0,
7
+ "cross_attention_widening_factor": 1,
8
+ "cross_eval_noising_args": null,
9
+ "cross_train_noising_args": [
10
+ [
11
+ "RandomlySelectedCrossAttentionMasking",
12
+ {
13
+ "exclude_seen_reconstruction": true,
14
+ "head_aggregation": "random_mix",
15
+ "mask_self": true,
16
+ "masking_ratio": 0.3,
17
+ "num_per_query": 3,
18
+ "select_initial_ratio": 1.0,
19
+ "varying_length": true
20
+ }
21
+ ]
22
+ ],
23
+ "decoder_attention_channels": 768,
24
+ "decoder_heads": 1,
25
+ "decoder_latent_channels": 768,
26
+ "decoder_type": "cross_attention",
27
+ "dense_use_bias": true,
28
+ "drop_path_rate": 0.0,
29
+ "embedded_channels": 768,
30
+ "encoder_cross_attention_channels": 768,
31
+ "encoder_type": "self_attention",
32
+ "final_project": true,
33
+ "hidden_act": "gelu",
34
+ "hidden_dropout_prob": 0.0,
35
+ "initializer_range": 0.02,
36
+ "input_channels": 3,
37
+ "input_type": "discrete",
38
+ "latent_channels": 768,
39
+ "layer_norm_eps": 1e-12,
40
+ "layernorm_eps": 1e-12,
41
+ "loss_fn": "mse",
42
+ "max_position_embeddings": 512,
43
+ "model_type": "sma",
44
+ "num_blocks": 1,
45
+ "num_cross_attention_heads": 12,
46
+ "num_discrete_tokens": 262,
47
+ "num_latents": 256,
48
+ "num_outputs": 2048,
49
+ "num_self_attends_per_block": 12,
50
+ "num_self_attention_heads": 12,
51
+ "output_channels": 262,
52
+ "pe_initializer_range": 0.02,
53
+ "post_decoder_layers": null,
54
+ "project_after_concat": true,
55
+ "qk_channels": 768,
56
+ "self_attention_widening_factor": 1,
57
+ "share_decoder_queries": true,
58
+ "share_embedding_weights": true,
59
+ "teacher_args": {
60
+ "auxiliary_loss_fn": "mse",
61
+ "auxiliary_loss_weight": 1.0,
62
+ "ema_args": {
63
+ "ema_decay_end": 0.0,
64
+ "ema_decay_start": 0.0
65
+ },
66
+ "eval_transform_args": [
67
+ [
68
+ "RandomlySelectedCrossAttentionMasking",
69
+ {
70
+ "exclude_seen_reconstruction": true,
71
+ "head_aggregation": "random_mix",
72
+ "mask_self": true,
73
+ "masking_ratio": 0.3,
74
+ "num_per_query": 3,
75
+ "select_initial_ratio": 1.0,
76
+ "varying_length": true
77
+ }
78
+ ]
79
+ ],
80
+ "mask_replace": 3,
81
+ "num_layer_target_avg": null,
82
+ "reconstruction_decoder_args": {
83
+ "num_heads": 12,
84
+ "num_outputs": 512,
85
+ "output_channels": 262,
86
+ "qk_channels": 768,
87
+ "query_num_channels": 768,
88
+ "share_decoder_queries": true,
89
+ "share_embedding_weights": true,
90
+ "use_query_residual": true,
91
+ "v_channels": 768
92
+ },
93
+ "reconstruction_loss_fn": "crossentropy",
94
+ "reconstruction_loss_weight": 1.0,
95
+ "reconstruction_weighted_loss": false,
96
+ "target_normalization_fn": "layernorm",
97
+ "train_transform_args": null
98
+ },
99
+ "teacher_name": "ReconstructionTeacher",
100
+ "torch_dtype": "float32",
101
+ "transformers_version": "4.26.0.dev0",
102
+ "use_decoder": false,
103
+ "use_position_embeddings": true,
104
+ "use_query_residual": true,
105
+ "v_channels": 768
106
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e09923c2781cc77d3d1fdcc66805b9ffa049508f04d5d333472ebec17b243f49
3
+ size 201120305