GuysTrans commited on
Commit
3b7fcfb
1 Parent(s): 5c18dc9

Training in progress, step 500

Browse files
config.json CHANGED
@@ -39,7 +39,6 @@
39
  "LABEL_1": 1,
40
  "LABEL_2": 2
41
  },
42
- "max_length": 26,
43
  "max_position_embeddings": 1024,
44
  "model_type": "bart",
45
  "no_repeat_ngram_size": 3,
@@ -72,5 +71,5 @@
72
  "torch_dtype": "float32",
73
  "transformers_version": "4.35.0",
74
  "use_cache": true,
75
- "vocab_size": 50266
76
  }
 
39
  "LABEL_1": 1,
40
  "LABEL_2": 2
41
  },
 
42
  "max_position_embeddings": 1024,
43
  "model_type": "bart",
44
  "no_repeat_ngram_size": 3,
 
71
  "torch_dtype": "float32",
72
  "transformers_version": "4.35.0",
73
  "use_cache": true,
74
+ "vocab_size": 50265
75
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1311ed58235dc53618d408d93c0e21d4a0a5e4ed83d78a26f9d46ef67341f79c
3
- size 557942512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442de0b8bd8b7895c3f5a19b2ed7ba3a2a20f686a7abf389228e525e6ed9f3a9
3
+ size 557939436
runs/Nov11_08-51-40_bcf7516a079c/events.out.tfevents.1699692715.bcf7516a079c.47.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92f65e68f0507b249e7d12403baee6454e775b83142af9a2323e3b3970b04933
3
+ size 5672
special_tokens_map.json CHANGED
@@ -1,9 +1,15 @@
1
  {
2
- "pad_token": {
3
- "content": "[PAD]",
4
- "lstrip": false,
5
- "normalized": false,
 
 
 
6
  "rstrip": false,
7
  "single_word": false
8
- }
 
 
 
9
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
  "rstrip": false,
10
  "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,9 +1,10 @@
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<s>",
5
  "lstrip": false,
6
- "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
@@ -11,7 +12,7 @@
11
  "1": {
12
  "content": "<pad>",
13
  "lstrip": false,
14
- "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
@@ -19,7 +20,7 @@
19
  "2": {
20
  "content": "</s>",
21
  "lstrip": false,
22
- "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
@@ -27,30 +28,30 @@
27
  "3": {
28
  "content": "<unk>",
29
  "lstrip": false,
30
- "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
  "4": {
36
  "content": "<mask>",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- },
43
- "50265": {
44
- "content": "[PAD]",
45
- "lstrip": false,
46
- "normalized": false,
47
  "rstrip": false,
48
  "single_word": false,
49
  "special": true
50
  }
51
  },
 
52
  "clean_up_tokenization_spaces": true,
53
- "model_max_length": 1000000000000000019884624838656,
54
- "pad_token": "[PAD]",
55
- "tokenizer_class": "PreTrainedTokenizerFast"
 
 
 
 
 
 
 
56
  }
 
1
  {
2
+ "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "0": {
5
  "content": "<s>",
6
  "lstrip": false,
7
+ "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
 
12
  "1": {
13
  "content": "<pad>",
14
  "lstrip": false,
15
+ "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
 
20
  "2": {
21
  "content": "</s>",
22
  "lstrip": false,
23
+ "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
 
28
  "3": {
29
  "content": "<unk>",
30
  "lstrip": false,
31
+ "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
  "4": {
37
  "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
 
 
 
 
 
 
 
 
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
45
+ "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 1024,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "BartTokenizer",
55
+ "trim_offsets": true,
56
+ "unk_token": "<unk>"
57
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee138337726ba143752a50814777c3260033d85f8410d5bd313575f50a5d0e37
3
  size 4347
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a78ef119481658c414a947de14f40708289e31cdfb03c2cf470c5a41241a214
3
  size 4347
vocab.json ADDED
The diff for this file is too large to render. See raw diff