hosein-m commited on
Commit
faa85bf
1 Parent(s): e6580d3

commit from TehranNLP

Browse files
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlnet-base-cased",
3
+ "architectures": [
4
+ "XLNetForSequenceClassification"
5
+ ],
6
+ "attn_type": "bi",
7
+ "bi_data": false,
8
+ "bos_token_id": 1,
9
+ "clamp_len": -1,
10
+ "d_head": 64,
11
+ "d_inner": 3072,
12
+ "d_model": 768,
13
+ "dropout": 0.1,
14
+ "end_n_top": 5,
15
+ "eos_token_id": 2,
16
+ "ff_activation": "gelu",
17
+ "finetuning_task": "mnli",
18
+ "id2label": {
19
+ "0": "LABEL_0",
20
+ "1": "LABEL_1",
21
+ "2": "LABEL_2"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "label2id": {
25
+ "LABEL_0": 0,
26
+ "LABEL_1": 1,
27
+ "LABEL_2": 2
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "mem_len": null,
31
+ "model_type": "xlnet",
32
+ "n_head": 12,
33
+ "n_layer": 12,
34
+ "pad_token_id": 5,
35
+ "reuse_len": null,
36
+ "same_length": false,
37
+ "start_n_top": 5,
38
+ "summary_activation": "tanh",
39
+ "summary_last_dropout": 0.1,
40
+ "summary_type": "last",
41
+ "summary_use_proj": true,
42
+ "task_specific_params": {
43
+ "text-generation": {
44
+ "do_sample": true,
45
+ "max_length": 250
46
+ }
47
+ },
48
+ "transformers_version": "4.6.1",
49
+ "untie_r": true,
50
+ "use_mems_eval": true,
51
+ "use_mems_train": false,
52
+ "vocab_size": 32000
53
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93c4528e1defbdb1df41d0bcb547ed6ef06c47e09336c3a3c62831978df972c
3
+ size 469323187
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "<sep>", "pad_token": "<pad>", "cls_token": "<cls>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}, "additional_special_tokens": ["<eop>", "<eod>"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": false, "bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "<sep>", "pad_token": "<pad>", "cls_token": "<cls>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<eop>", "<eod>"], "special_tokens_map_file": null, "name_or_path": "xlnet-base-cased"}