abhijithneilabraham commited on
Commit
c8f156e
1 Parent(s): b16f2f0

commit from abhijithneilabraham

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LongformerForQuestionAnswering"
4
+ ],
5
+ "attention_mode": "longformer",
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "attention_window": [
8
+ 512,
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "bos_token_id": 0,
22
+ "eos_token_id": 2,
23
+ "gradient_checkpointing": false,
24
+ "hidden_act": "gelu",
25
+ "hidden_dropout_prob": 0.1,
26
+ "hidden_size": 768,
27
+ "ignore_attention_mask": false,
28
+ "initializer_range": 0.02,
29
+ "intermediate_size": 3072,
30
+ "layer_norm_eps": 1e-05,
31
+ "max_position_embeddings": 4098,
32
+ "model_type": "longformer",
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 1,
36
+ "sep_token_id": 2,
37
+ "type_vocab_size": 1,
38
+ "vocab_size": 50265
39
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bbeb5bde8d232d2249dfd0464e147126e0a99f7909c0c239df0cf0de10b98be
3
+ size 594753583
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"model_max_length": 4096, "bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aaa9ee1d6a55f6dad6258ec188427395016fe317568333384705b02b4c00899
3
+ size 1263
vocab.json ADDED
The diff for this file is too large to render. See raw diff