Experiment ID: 3cmghqks
Browse files- merges.txt +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +2 -1
- vocab.json +0 -0
merges.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#version: 0.2
|
2 |
Ġ (
|
3 |
Ġ )
|
4 |
Ġ .
|
|
|
1 |
+
#version: 0.2 - Trained by `huggingface/tokenizers`
|
2 |
Ġ (
|
3 |
Ġ )
|
4 |
Ġ .
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -34,7 +34,7 @@
|
|
34 |
"single_word": false
|
35 |
},
|
36 |
"model_max_length": 512,
|
37 |
-
"name_or_path": "
|
38 |
"pad_token": {
|
39 |
"__type": "AddedToken",
|
40 |
"content": "<pad>",
|
@@ -53,6 +53,7 @@
|
|
53 |
},
|
54 |
"special_tokens_map_file": "/home/s2498103/.cache/huggingface/hub/models--Salesforce--codet5-base-multi-sum/snapshots/37cf4c014106a34663da1008855a6d07affae042/special_tokens_map.json",
|
55 |
"tokenizer_class": "RobertaTokenizer",
|
|
|
56 |
"unk_token": {
|
57 |
"__type": "AddedToken",
|
58 |
"content": "<unk>",
|
|
|
34 |
"single_word": false
|
35 |
},
|
36 |
"model_max_length": 512,
|
37 |
+
"name_or_path": "mamiksik/T5-commit-message-generator",
|
38 |
"pad_token": {
|
39 |
"__type": "AddedToken",
|
40 |
"content": "<pad>",
|
|
|
53 |
},
|
54 |
"special_tokens_map_file": "/home/s2498103/.cache/huggingface/hub/models--Salesforce--codet5-base-multi-sum/snapshots/37cf4c014106a34663da1008855a6d07affae042/special_tokens_map.json",
|
55 |
"tokenizer_class": "RobertaTokenizer",
|
56 |
+
"trim_offsets": true,
|
57 |
"unk_token": {
|
58 |
"__type": "AddedToken",
|
59 |
"content": "<unk>",
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|