Transformers
PyTorch
flexibert
Inference Endpoints
shikhartuli commited on
Commit
ea1f6d8
1 Parent(s): 0129efc

Upload model

Browse files
Files changed (2) hide show
  1. config.json +73 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "../models/flexibert-mini/",
3
+ "architectures": [
4
+ "FlexiBERTModel"
5
+ ],
6
+ "attention_heads_list": [
7
+ 2,
8
+ 2,
9
+ 4,
10
+ 4
11
+ ],
12
+ "attention_probs_dropout_prob": 0.1,
13
+ "attention_type": [
14
+ "sa",
15
+ "sa",
16
+ "l",
17
+ "l"
18
+ ],
19
+ "bos_token_id": 0,
20
+ "conv_kernel_size": 9,
21
+ "eos_token_id": 2,
22
+ "ff_dim_list": [
23
+ [
24
+ 512,
25
+ 512,
26
+ 512
27
+ ],
28
+ [
29
+ 512,
30
+ 512,
31
+ 512
32
+ ],
33
+ [
34
+ 1024
35
+ ],
36
+ [
37
+ 1024
38
+ ]
39
+ ],
40
+ "from_model_dict_hetero": false,
41
+ "gradient_checkpointing": false,
42
+ "head_ratio": 2,
43
+ "hidden_act": "gelu",
44
+ "hidden_dim_list": [
45
+ 256,
46
+ 256,
47
+ 128,
48
+ 128
49
+ ],
50
+ "hidden_dropout_prob": 0.1,
51
+ "hidden_size": 768,
52
+ "initializer_range": 0.02,
53
+ "intermediate_size": 3072,
54
+ "layer_norm_eps": 1e-12,
55
+ "max_position_embeddings": 512,
56
+ "model_type": "flexibert",
57
+ "num_attention_heads": 12,
58
+ "num_groups": 1,
59
+ "num_hidden_layers": 4,
60
+ "pad_token_id": 0,
61
+ "position_embedding_type": "relative_key",
62
+ "similarity_list": [
63
+ "sdp",
64
+ "sdp",
65
+ "dct",
66
+ "dct"
67
+ ],
68
+ "torch_dtype": "float32",
69
+ "transformers_version": "4.25.0.dev0",
70
+ "type_vocab_size": 2,
71
+ "use_cache": true,
72
+ "vocab_size": 50265
73
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc14185b193edcbc45be7fdf07748fd982c2cf2aa57ae3e3445e12ebf3197b09
3
+ size 64728249