RikkiXu commited on
Commit
433731c
1 Parent(s): 21c7eb4

Training in progress, step 100

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "HuggingFaceH4/mistral-7b-sft-beta",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/mnt/bn/xuruijie-llm/checkpoints/ours_simpo",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
runs/Jul01_16-09-06_n136-129-074/events.out.tfevents.1719821529.n136-129-074.1755952.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9176e09271ca407e7fb96cb13cff2cee2cd955e058dfc3c7776b91877f88cdfa
3
+ size 6265
runs/Jul01_16-16-47_n136-129-074/events.out.tfevents.1719821937.n136-129-074.1759951.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90e032fb39cb2ee70f44881f5030f21001a45a5141b53852b6a8994b2f0172f7
3
+ size 6265
runs/Jul01_16-23-02_n136-129-074/events.out.tfevents.1719822311.n136-129-074.1762604.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ad9af28a889ae424ba034c80554c8d899785a37b635d5afef4a245b5f00f16
3
+ size 12349
special_tokens_map.json CHANGED
@@ -1,9 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
- ],
7
  "bos_token": {
8
  "content": "<s>",
9
  "lstrip": false,
 
1
  {
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -27,11 +27,7 @@
27
  "special": true
28
  }
29
  },
30
- "additional_special_tokens": [
31
- "<unk>",
32
- "<s>",
33
- "</s>"
34
- ],
35
  "bos_token": "<s>",
36
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
  "clean_up_tokenization_spaces": false,
@@ -42,7 +38,6 @@
42
  "sp_model_kwargs": {},
43
  "spaces_between_special_tokens": false,
44
  "tokenizer_class": "LlamaTokenizer",
45
- "truncation_side": "left",
46
  "unk_token": "<unk>",
47
- "use_default_system_prompt": true
48
  }
 
27
  "special": true
28
  }
29
  },
30
+ "additional_special_tokens": [],
 
 
 
 
31
  "bos_token": "<s>",
32
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
 
38
  "sp_model_kwargs": {},
39
  "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
 
41
  "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64fad8436f91a3507adb004da2514213ac77effeffe91238d80a2d759c14d788
3
  size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22b219e231b6dffbba5b945608c2eb416b51d1538b3c1dad26f693dff31ea947
3
  size 6264