Training in progress, step 3
Browse files- adapter_config.json +1 -1
- adapter_model.safetensors +2 -2
- tokenizer.json +6 -16
- tokenizer_config.json +2 -1
- training_args.bin +2 -2
adapter_config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
-
"base_model_name_or_path": "NousResearch/Llama-2-
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": false,
|
6 |
"inference_mode": true,
|
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "NousResearch/Llama-2-13b-chat-hf",
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": false,
|
6 |
"inference_mode": true,
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ef343b08ae560738d7786e5f3a7633eab327f22a2c71b5363fc39e511f35eee
|
3 |
+
size 209736952
|
tokenizer.json
CHANGED
@@ -40,23 +40,13 @@
|
|
40 |
"special": false
|
41 |
}
|
42 |
],
|
43 |
-
"normalizer":
|
44 |
-
|
45 |
-
"
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
},
|
50 |
-
{
|
51 |
-
"type": "Replace",
|
52 |
-
"pattern": {
|
53 |
-
"String": " "
|
54 |
-
},
|
55 |
-
"content": "▁"
|
56 |
-
}
|
57 |
-
]
|
58 |
},
|
59 |
-
"pre_tokenizer": null,
|
60 |
"post_processor": {
|
61 |
"type": "TemplateProcessing",
|
62 |
"single": [
|
|
|
40 |
"special": false
|
41 |
}
|
42 |
],
|
43 |
+
"normalizer": null,
|
44 |
+
"pre_tokenizer": {
|
45 |
+
"type": "Metaspace",
|
46 |
+
"replacement": "▁",
|
47 |
+
"prepend_scheme": "first",
|
48 |
+
"split": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
},
|
|
|
50 |
"post_processor": {
|
51 |
"type": "TemplateProcessing",
|
52 |
"single": [
|
tokenizer_config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
4 |
-
"add_prefix_space":
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
7 |
"content": "<unk>",
|
@@ -43,6 +43,7 @@
|
|
43 |
"model_max_length": 1000000000000000019884624838656,
|
44 |
"pad_token": "</s>",
|
45 |
"sp_model_kwargs": {},
|
|
|
46 |
"tokenizer_class": "LlamaTokenizer",
|
47 |
"unk_token": "<unk>",
|
48 |
"use_default_system_prompt": false
|
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
4 |
+
"add_prefix_space": true,
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
7 |
"content": "<unk>",
|
|
|
43 |
"model_max_length": 1000000000000000019884624838656,
|
44 |
"pad_token": "</s>",
|
45 |
"sp_model_kwargs": {},
|
46 |
+
"spaces_between_special_tokens": false,
|
47 |
"tokenizer_class": "LlamaTokenizer",
|
48 |
"unk_token": "<unk>",
|
49 |
"use_default_system_prompt": false
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44979ab56a9eb1b132c9d8ea5650a5869b9c35786dfaba1865104033d7a59aef
|
3 |
+
size 6136
|