jxie commited on
Commit
6710be9
1 Parent(s): 064d0b6

Upload SMAForSSL

Browse files
Files changed (2) hide show
  1. config.json +106 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/iris/u/jwxie/workspace/releases/domain-agnostic-pretraining/examples/saved_models/physics_pretrained/higgs_guided_self_rand_select_masking_recon_small_noise_mask_self_random_mix-normalized-adamw_torch-lr1e-4-wd0.01-ws10000-masking_schedule_length0.25-mr0.2",
3
+ "architectures": [
4
+ "SMAForSSL"
5
+ ],
6
+ "attention_dropout_prob": 0.0,
7
+ "cross_attention_widening_factor": 1,
8
+ "cross_eval_noising_args": null,
9
+ "cross_train_noising_args": [
10
+ [
11
+ "RandomlySelectedCrossAttentionMasking",
12
+ {
13
+ "exclude_seen_reconstruction": true,
14
+ "head_aggregation": "random_mix",
15
+ "mask_self": true,
16
+ "masking_ratio": 0.2,
17
+ "num_per_query": 3,
18
+ "select_initial_ratio": 1.0,
19
+ "varying_length": true
20
+ }
21
+ ]
22
+ ],
23
+ "decoder_attention_channels": 128,
24
+ "decoder_heads": 1,
25
+ "decoder_latent_channels": 128,
26
+ "decoder_type": "cross_attention",
27
+ "dense_use_bias": true,
28
+ "drop_path_rate": 0.0,
29
+ "embedded_channels": 128,
30
+ "encoder_cross_attention_channels": 128,
31
+ "encoder_type": "self_attention",
32
+ "final_project": true,
33
+ "hidden_act": "gelu",
34
+ "hidden_dropout_prob": 0.0,
35
+ "initializer_range": 0.02,
36
+ "input_channels": 1,
37
+ "input_type": "continuous",
38
+ "latent_channels": 128,
39
+ "layer_norm_eps": 1e-12,
40
+ "layernorm_eps": 1e-12,
41
+ "loss_fn": "mse",
42
+ "max_position_embeddings": 28,
43
+ "model_type": "perceiver_sma",
44
+ "num_blocks": 1,
45
+ "num_cross_attention_heads": 8,
46
+ "num_discrete_tokens": 262,
47
+ "num_latents": 128,
48
+ "num_outputs": 2048,
49
+ "num_self_attends_per_block": 4,
50
+ "num_self_attention_heads": 8,
51
+ "output_channels": 262,
52
+ "pe_initializer_range": 0.02,
53
+ "post_decoder_layers": null,
54
+ "project_after_concat": true,
55
+ "qk_channels": 128,
56
+ "self_attention_widening_factor": 1,
57
+ "share_decoder_queries": true,
58
+ "share_embedding_weights": true,
59
+ "teacher_args": {
60
+ "auxiliary_loss_fn": "mse",
61
+ "auxiliary_loss_weight": 1.0,
62
+ "ema_args": {
63
+ "ema_decay_end": 0.0,
64
+ "ema_decay_start": 0.0
65
+ },
66
+ "eval_transform_args": [
67
+ [
68
+ "RandomlySelectedCrossAttentionMasking",
69
+ {
70
+ "exclude_seen_reconstruction": true,
71
+ "head_aggregation": "random_mix",
72
+ "mask_self": true,
73
+ "masking_ratio": 0.2,
74
+ "num_per_query": 3,
75
+ "select_initial_ratio": 1.0,
76
+ "varying_length": true
77
+ }
78
+ ]
79
+ ],
80
+ "mask_replace": 3,
81
+ "num_layer_target_avg": null,
82
+ "reconstruction_decoder_args": {
83
+ "num_heads": 8,
84
+ "num_outputs": 28,
85
+ "output_channels": 1,
86
+ "qk_channels": 128,
87
+ "query_num_channels": 128,
88
+ "share_decoder_queries": true,
89
+ "share_embedding_weights": true,
90
+ "use_query_residual": true,
91
+ "v_channels": 128
92
+ },
93
+ "reconstruction_loss_fn": "mse",
94
+ "reconstruction_loss_weight": 1.0,
95
+ "reconstruction_weighted_loss": false,
96
+ "target_normalization_fn": "layernorm",
97
+ "train_transform_args": null
98
+ },
99
+ "teacher_name": "ReconstructionTeacher",
100
+ "torch_dtype": "float32",
101
+ "transformers_version": "4.26.0.dev0",
102
+ "use_decoder": false,
103
+ "use_position_embeddings": true,
104
+ "use_query_residual": true,
105
+ "v_channels": 128
106
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c96639bcf32c123c23acbff5e7bf1b91415d95d2e7c6117c3975bdd6978e65f0
3
+ size 2458681