baebee commited on
Commit
8dac359
1 Parent(s): 65c57f6

Upload 4 files

Browse files
Files changed (3) hide show
  1. config.json +8 -5
  2. tokenizer.json +0 -0
  3. vocab.json +0 -0
config.json CHANGED
@@ -1,10 +1,13 @@
1
  {
 
2
  "architectures": [
3
- "XLMRobertaForMaskedLM"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
 
7
  "eos_token_id": 2,
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
@@ -12,14 +15,14 @@
12
  "intermediate_size": 3072,
13
  "layer_norm_eps": 1e-05,
14
  "max_position_embeddings": 514,
15
- "model_type": "xlm-roberta",
16
  "num_attention_heads": 12,
17
  "num_hidden_layers": 12,
18
- "output_past": true,
19
  "pad_token_id": 1,
20
  "position_embedding_type": "absolute",
21
- "transformers_version": "4.17.0.dev0",
 
22
  "type_vocab_size": 1,
23
  "use_cache": true,
24
- "vocab_size": 250002
25
  }
 
1
  {
2
+ "_name_or_path": "roberta-tagalog-base",
3
  "architectures": [
4
+ "RobertaForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
  "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
 
15
  "intermediate_size": 3072,
16
  "layer_norm_eps": 1e-05,
17
  "max_position_embeddings": 514,
18
+ "model_type": "roberta",
19
  "num_attention_heads": 12,
20
  "num_hidden_layers": 12,
 
21
  "pad_token_id": 1,
22
  "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.12.3",
25
  "type_vocab_size": 1,
26
  "use_cache": true,
27
+ "vocab_size": 30000
28
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
vocab.json ADDED
The diff for this file is too large to render. See raw diff