v0.2
Browse files- README.md +50 -11
- all_results.json +0 -22
- config.json +1 -1
- eval_results.json +0 -16
- model.safetensors +1 -1
- onnx/model.onnx +2 -2
- onnx/model_bnb4.onnx +2 -2
- onnx/model_fp16.onnx +2 -2
- onnx/model_int8.onnx +2 -2
- onnx/model_q4.onnx +2 -2
- onnx/model_q4f16.onnx +2 -2
- onnx/model_quantized.onnx +2 -2
- onnx/model_uint8.onnx +2 -2
- runs/Jul15_21-49-55_ip-26-0-167-245/events.out.tfevents.1721080291.ip-26-0-167-245.4053728.0 +0 -3
- runs/Jul15_21-49-55_ip-26-0-167-245/events.out.tfevents.1721080634.ip-26-0-167-245.4053728.1 +0 -3
- special_tokens_map.json +17 -23
- test_prompts.py +106 -0
- train_results.json +0 -9
- trainer_state.json +0 -132
- training_args.bin +0 -3
README.md
CHANGED
@@ -1,12 +1,22 @@
|
|
1 |
---
|
2 |
-
library_name: transformers
|
3 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
language:
|
5 |
- en
|
6 |
---
|
7 |
|
8 |
|
9 |
-
# SmolLM-Instruct
|
10 |
|
11 |
<center>
|
12 |
<img src="https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png" alt="SmolLM" width="1100" height="600">
|
@@ -14,15 +24,29 @@ language:
|
|
14 |
|
15 |
|
16 |
## Model Summary
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
22 |
|
23 |
-
|
|
|
24 |
|
25 |
-
|
|
|
|
|
26 |
```bash
|
27 |
pip install transformers
|
28 |
```
|
@@ -37,17 +61,32 @@ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
|
37 |
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
|
38 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
39 |
|
40 |
-
messages = [{"role": "user", "content": "
|
41 |
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
42 |
print(input_text)
|
43 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
44 |
-
outputs = model.generate(inputs, max_new_tokens=
|
45 |
print(tokenizer.decode(outputs[0]))
|
46 |
```
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
# Citation
|
53 |
```bash
|
|
|
1 |
---
|
|
|
2 |
license: apache-2.0
|
3 |
+
base_model: HuggingFaceTB/SmolLM-360M
|
4 |
+
tags:
|
5 |
+
- alignment-handbook
|
6 |
+
- trl
|
7 |
+
- sft
|
8 |
+
datasets:
|
9 |
+
- Magpie-Align/Magpie-Pro-300K-Filtered
|
10 |
+
- bigcode/self-oss-instruct-sc2-exec-filter-50k
|
11 |
+
- teknium/OpenHermes-2.5
|
12 |
+
- HuggingFaceTB/everyday-conversations-llama3.1-2k
|
13 |
+
library_name: transformers
|
14 |
language:
|
15 |
- en
|
16 |
---
|
17 |
|
18 |
|
19 |
+
# SmolLM-360M-Instruct
|
20 |
|
21 |
<center>
|
22 |
<img src="https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png" alt="SmolLM" width="1100" height="600">
|
|
|
24 |
|
25 |
|
26 |
## Model Summary
|
27 |
+
Chat with the model at: https://huggingface.co/spaces/HuggingFaceTB/instant-smol
|
28 |
+
|
29 |
+
SmolLM is a series of language models available in three sizes: 135M, 360M, and 1.7B parameters.
|
30 |
+
|
31 |
+
These models are trained on [SmolLM-Corpus](https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus), a curated collection of high-quality educational and synthetic data designed for training LLMs. For further details, we refer to our [blogpost](https://huggingface.co/blog/smollm).
|
32 |
+
|
33 |
+
To build SmolLM-Instruct, we finetune the base models on publicly available datasets.
|
34 |
+
|
35 |
+
## Changelog
|
36 |
|
37 |
+
|Release|Description|
|
38 |
+
|-|-|
|
39 |
+
|v0.1| Initial release of SmolLM-Instruct. We finetune on the permissive subset of the [WebInstructSub](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub) dataset, combined with [StarCoder2-Self-OSS-Instruct](https://huggingface.co/datasets/bigcode/self-oss-instruct-sc2-exec-filter-50k). Then, we perform DPO (Direct Preference Optimization) for one epoch on [HelpSteer](https://huggingface.co/datasets/nvidia/HelpSteer) for the 135M and 1.7B models, and [argilla/dpo-mix-7k](https://huggingface.co/datasets/argilla/dpo-mix-7k) for the 360M model.|
|
40 |
+
|v0.2| We changed the finetuning mix to datasets more suitable for smol models. We train on a new dataset of 2k simple everyday conversations we generated by llama3.1-70B [everyday-conversations-llama3.1-2k](https://huggingface.co/datasets/HuggingFaceTB/everyday-conversations-llama3.1-2k/), [Magpie-Pro-300K-Filtere](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered), [StarCoder2-Self-OSS-Instruct](https://huggingface.co/datasets/bigcode/self-oss-instruct-sc2-exec-filter-50k), and a small subset of [OpenHermes-2.5](https://huggingface.co/datasets/teknium/OpenHermes-2.5)|
|
41 |
|
42 |
+
## Usage
|
|
|
43 |
|
44 |
+
### Local Applications
|
45 |
+
⚡ For local applications, you can find optimized implementations of the model in MLC, GGUF and Transformers.js formats, in addition to fast in-browser demos in this collection: https://huggingface.co/collections/HuggingFaceTB/local-smollms-66c0f3b2a15b4eed7fb198d0
|
46 |
|
47 |
+
We noticed that 4bit quantization degrades the quality of the 135M and 360M, so we use `q016` for MLC and ONNX/Transformers.js checkpoints for the WebGPU demos. We also suggest using temperature 0.2 and top-p 0.9.
|
48 |
+
|
49 |
+
### Transformers
|
50 |
```bash
|
51 |
pip install transformers
|
52 |
```
|
|
|
61 |
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
|
62 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
63 |
|
64 |
+
messages = [{"role": "user", "content": "What is the capital of France."}]
|
65 |
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
66 |
print(input_text)
|
67 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
68 |
+
outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
|
69 |
print(tokenizer.decode(outputs[0]))
|
70 |
```
|
71 |
|
72 |
+
### Chat in TRL
|
73 |
+
You can also use the TRL CLI to chat with the model from the terminal:
|
74 |
+
```bash
|
75 |
+
pip install trl
|
76 |
+
trl chat --model_name_or_path HuggingFaceTB/SmolLM-360M-Instruct --device cpu
|
77 |
+
```
|
78 |
+
|
79 |
+
## Limitations
|
80 |
+
|
81 |
+
Additionally, the generated content may not always be factually accurate, logically consistent, or free from biases present in the training data, we invite users to leverage them as assistive tools rather than definitive sources of information. We find that they can handle general knowledge questions, creative writing and basic Python programming. But they are English only and may have difficulty with arithmetics, editing tasks and complex reasoning. For more details about the models' capabilities, please refer to our [blog post](https://huggingface.co/blog/smollm).
|
82 |
|
83 |
+
## Training parameters
|
84 |
+
We train the models using the [alignement-handbook](https://github.com/huggingface/alignment-handbook) with the datasets mentioned in the changelog, using these parameters for v0.2:
|
85 |
+
- 1 epoch
|
86 |
+
- lr 1e-3
|
87 |
+
- cosine schedule
|
88 |
+
- warmup ratio 0.1
|
89 |
+
- global batch size 262k tokens
|
90 |
|
91 |
# Citation
|
92 |
```bash
|
all_results.json
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.985781990521327,
|
3 |
-
"eval_logits/chosen": -0.5211005210876465,
|
4 |
-
"eval_logits/rejected": -0.7105662226676941,
|
5 |
-
"eval_logps/chosen": -442.4708557128906,
|
6 |
-
"eval_logps/rejected": -418.5675354003906,
|
7 |
-
"eval_loss": 0.6833909749984741,
|
8 |
-
"eval_rewards/accuracies": 0.59375,
|
9 |
-
"eval_rewards/chosen": -0.008601006120443344,
|
10 |
-
"eval_rewards/margins": 0.021825773641467094,
|
11 |
-
"eval_rewards/rejected": -0.03042677976191044,
|
12 |
-
"eval_runtime": 8.5409,
|
13 |
-
"eval_samples": 750,
|
14 |
-
"eval_samples_per_second": 87.812,
|
15 |
-
"eval_steps_per_second": 2.81,
|
16 |
-
"total_flos": 0.0,
|
17 |
-
"train_loss": 0.6860739244864538,
|
18 |
-
"train_runtime": 316.322,
|
19 |
-
"train_samples": 6750,
|
20 |
-
"train_samples_per_second": 21.339,
|
21 |
-
"train_steps_per_second": 0.164
|
22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "HuggingFaceTB/
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "HuggingFaceTB/SmolLM-360M",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
eval_results.json
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.985781990521327,
|
3 |
-
"eval_logits/chosen": -0.5211005210876465,
|
4 |
-
"eval_logits/rejected": -0.7105662226676941,
|
5 |
-
"eval_logps/chosen": -442.4708557128906,
|
6 |
-
"eval_logps/rejected": -418.5675354003906,
|
7 |
-
"eval_loss": 0.6833909749984741,
|
8 |
-
"eval_rewards/accuracies": 0.59375,
|
9 |
-
"eval_rewards/chosen": -0.008601006120443344,
|
10 |
-
"eval_rewards/margins": 0.021825773641467094,
|
11 |
-
"eval_rewards/rejected": -0.03042677976191044,
|
12 |
-
"eval_runtime": 8.5409,
|
13 |
-
"eval_samples": 750,
|
14 |
-
"eval_samples_per_second": 87.812,
|
15 |
-
"eval_steps_per_second": 2.81
|
16 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 723674912
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f841de9bf656ea5af3c8d89e5647ee0f39121b28adf4202d1eec02f69619c5e
|
3 |
size 723674912
|
onnx/model.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:454394e1f92c1479bf71926b2cc845a3e29040c0844ba0d97ce693a390bca40c
|
3 |
+
size 1448299818
|
onnx/model_bnb4.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:359ed0877d941aa2adf227aed8c209017dc0a7de6416ef322df6492356ba50e9
|
3 |
+
size 366990971
|
onnx/model_fp16.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e617d2597f06572ed87667f62a8a29e18bd5763fcb2606f7ad956b3f42d1cc6
|
3 |
+
size 724678342
|
onnx/model_int8.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c42d79415353fe2d1470c2b3e27ad1dd84595cd2d31c93773d93e2df92ab5bcb
|
3 |
+
size 363269280
|
onnx/model_q4.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8001535cd703e7c9afe6f6cf8f48e41ec6e162eb677ac39632787ad380ac2d7
|
3 |
+
size 386650043
|
onnx/model_q4f16.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1a788453e1393e8642f43ca729b7f2301ba61cc1f8ac1f1904c809869fc1ffb
|
3 |
+
size 272513495
|
onnx/model_quantized.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c42d79415353fe2d1470c2b3e27ad1dd84595cd2d31c93773d93e2df92ab5bcb
|
3 |
+
size 363269280
|
onnx/model_uint8.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f3309c58be313a8dce00de2f9d009e52c273d814ddc230e0a83e7cee8075c63
|
3 |
+
size 363269392
|
runs/Jul15_21-49-55_ip-26-0-167-245/events.out.tfevents.1721080291.ip-26-0-167-245.4053728.0
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5f5dce01aa2e28781812105dc503156ef6f8285cc8c15e77a4b2f17cbfb8eb53
|
3 |
-
size 9608
|
|
|
|
|
|
|
|
runs/Jul15_21-49-55_ip-26-0-167-245/events.out.tfevents.1721080634.ip-26-0-167-245.4053728.1
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:958e3bee85da7cc72b429819cff501ffb58ca95731aaf61bdefa71b132d36753
|
3 |
-
size 815
|
|
|
|
|
|
|
|
special_tokens_map.json
CHANGED
@@ -1,29 +1,23 @@
|
|
1 |
{
|
2 |
"additional_special_tokens": [
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
],
|
6 |
-
"bos_token":
|
7 |
-
|
8 |
-
|
9 |
-
"normalized": false,
|
10 |
-
"rstrip": false,
|
11 |
-
"single_word": false
|
12 |
-
},
|
13 |
-
"eos_token": {
|
14 |
-
"content": "<|im_end|>",
|
15 |
-
"lstrip": false,
|
16 |
-
"normalized": false,
|
17 |
-
"rstrip": false,
|
18 |
-
"single_word": false
|
19 |
-
},
|
20 |
-
"pad_token": {
|
21 |
-
"content": "<|im_end|>",
|
22 |
-
"lstrip": false,
|
23 |
-
"normalized": false,
|
24 |
-
"rstrip": false,
|
25 |
-
"single_word": false
|
26 |
-
},
|
27 |
"unk_token": {
|
28 |
"content": "<|endoftext|>",
|
29 |
"lstrip": false,
|
|
|
1 |
{
|
2 |
"additional_special_tokens": [
|
3 |
+
{
|
4 |
+
"content": "<|im_start|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"content": "<|im_end|>",
|
12 |
+
"lstrip": false,
|
13 |
+
"normalized": false,
|
14 |
+
"rstrip": false,
|
15 |
+
"single_word": false
|
16 |
+
}
|
17 |
],
|
18 |
+
"bos_token": "<|im_start|>",
|
19 |
+
"eos_token": "<|im_end|>",
|
20 |
+
"pad_token": "<|im_end|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
"unk_token": {
|
22 |
"content": "<|endoftext|>",
|
23 |
"lstrip": false,
|
test_prompts.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
BASE_PATH = "/fsx/loubna/projects/alignment-handbook/recipes/cosmo2/sft/data"
|
4 |
+
TEMPERATURE = 0.2
|
5 |
+
TOP_P = 0.9
|
6 |
+
|
7 |
+
CHECKPOINT = "loubnabnl/smollm-350M-instruct-add-basics"
|
8 |
+
|
9 |
+
print(f"💾 Loading the model and tokenizer: {CHECKPOINT}...")
|
10 |
+
device = "cuda"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
|
12 |
+
model_s = AutoModelForCausalLM.from_pretrained(CHECKPOINT).to(device)
|
13 |
+
|
14 |
+
print("🧪 Testing single-turn conversations...")
|
15 |
+
L = [
|
16 |
+
"Hi",
|
17 |
+
"Hello",
|
18 |
+
"Tell me a joke",
|
19 |
+
"Who are you?",
|
20 |
+
"What's your name?",
|
21 |
+
"How do I make pancakes?",
|
22 |
+
"Can you tell me what is gravity?",
|
23 |
+
"What is the capital of Morocco?",
|
24 |
+
"What's 2+2?",
|
25 |
+
"Hi, what is 2+1?",
|
26 |
+
"What's 3+5?",
|
27 |
+
"Write a poem about Helium",
|
28 |
+
"Hi, what are some popular dishes from Japan?",
|
29 |
+
]
|
30 |
+
|
31 |
+
|
32 |
+
for i in range(len(L)):
|
33 |
+
print(f"🔮 {L[i]}")
|
34 |
+
messages = [{"role": "user", "content": L[i]}]
|
35 |
+
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
36 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
37 |
+
outputs = model_s.generate(
|
38 |
+
inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
|
39 |
+
)
|
40 |
+
with open(
|
41 |
+
f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}.txt",
|
42 |
+
"a",
|
43 |
+
) as f:
|
44 |
+
f.write("=" * 50 + "\n")
|
45 |
+
f.write(tokenizer.decode(outputs[0]))
|
46 |
+
f.write("\n")
|
47 |
+
|
48 |
+
|
49 |
+
print("🧪 Now testing multi-turn conversations...")
|
50 |
+
# Multi-turn conversations
|
51 |
+
messages_1 = [
|
52 |
+
{"role": "user", "content": "Hi"},
|
53 |
+
{"role": "assistant", "content": "Hello! How can I help you today?"},
|
54 |
+
{"role": "user", "content": "What's 2+2?"},
|
55 |
+
]
|
56 |
+
messages_2 = [
|
57 |
+
{"role": "user", "content": "Hi"},
|
58 |
+
{"role": "assistant", "content": "Hello! How can I help you today?"},
|
59 |
+
{"role": "user", "content": "What's 2+2?"},
|
60 |
+
{"role": "assistant", "content": "4"},
|
61 |
+
{"role": "user", "content": "Why?"},
|
62 |
+
]
|
63 |
+
messages_3 = [
|
64 |
+
{"role": "user", "content": "Who are you?"},
|
65 |
+
{"role": "assistant", "content": "I am an AI assistant. How can I help you today?"},
|
66 |
+
{"role": "user", "content": "What's your name?"},
|
67 |
+
]
|
68 |
+
messages_4 = [
|
69 |
+
{"role": "user", "content": "Tell me a joke"},
|
70 |
+
{"role": "assistant", "content": "Sure! Why did the tomato turn red?"},
|
71 |
+
{"role": "user", "content": "Why?"},
|
72 |
+
]
|
73 |
+
messages_5 = [
|
74 |
+
{"role": "user", "content": "Can you tell me what is gravity?"},
|
75 |
+
{
|
76 |
+
"role": "assistant",
|
77 |
+
"content": "Sure! Gravity is a force that attracts objects toward each other. It is what keeps us on the ground and what makes things fall.",
|
78 |
+
},
|
79 |
+
{"role": "user", "content": "Who discovered it?"},
|
80 |
+
]
|
81 |
+
messages_6 = [
|
82 |
+
{"role": "user", "content": "How do I make pancakes?"},
|
83 |
+
{
|
84 |
+
"role": "assistant",
|
85 |
+
"content": "Sure! Here is a simple recipe for pancakes: Ingredients: 1 cup flour, 1 cup milk, 1 egg, 1 tbsp sugar, 1 tsp baking powder, 1/2 tsp salt. Instructions: 1. Mix all the dry ingredients together in a bowl. 2. Add the milk and egg and mix until smooth. 3. Heat a non-stick pan over medium heat. 4. Pour 1/4 cup of batter onto the pan. 5. Cook until bubbles form on the surface, then flip and cook for another minute. 6. Serve with your favorite toppings.",
|
86 |
+
},
|
87 |
+
{"role": "user", "content": "What are some popular toppings?"},
|
88 |
+
]
|
89 |
+
|
90 |
+
L = [messages_1, messages_2, messages_3, messages_4, messages_5, messages_6]
|
91 |
+
|
92 |
+
for i in range(len(L)):
|
93 |
+
input_text = tokenizer.apply_chat_template(L[i], tokenize=False)
|
94 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
95 |
+
outputs = model_s.generate(
|
96 |
+
inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
|
97 |
+
)
|
98 |
+
with open(
|
99 |
+
f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}_MT.txt",
|
100 |
+
"a",
|
101 |
+
) as f:
|
102 |
+
f.write("=" * 50 + "\n")
|
103 |
+
f.write(tokenizer.decode(outputs[0]))
|
104 |
+
f.write("\n")
|
105 |
+
|
106 |
+
print("🔥 Done!")
|
train_results.json
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.985781990521327,
|
3 |
-
"total_flos": 0.0,
|
4 |
-
"train_loss": 0.6860739244864538,
|
5 |
-
"train_runtime": 316.322,
|
6 |
-
"train_samples": 6750,
|
7 |
-
"train_samples_per_second": 21.339,
|
8 |
-
"train_steps_per_second": 0.164
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trainer_state.json
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"best_metric": null,
|
3 |
-
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.985781990521327,
|
5 |
-
"eval_steps": 100,
|
6 |
-
"global_step": 52,
|
7 |
-
"is_hyper_param_search": false,
|
8 |
-
"is_local_process_zero": true,
|
9 |
-
"is_world_process_zero": true,
|
10 |
-
"log_history": [
|
11 |
-
{
|
12 |
-
"epoch": 0.018957345971563982,
|
13 |
-
"grad_norm": 3.021558692298453,
|
14 |
-
"learning_rate": 8.333333333333333e-07,
|
15 |
-
"logits/chosen": -0.5616407990455627,
|
16 |
-
"logits/rejected": -0.42516714334487915,
|
17 |
-
"logps/chosen": -373.83282470703125,
|
18 |
-
"logps/rejected": -493.9554748535156,
|
19 |
-
"loss": 0.6931,
|
20 |
-
"rewards/accuracies": 0.0,
|
21 |
-
"rewards/chosen": 0.0,
|
22 |
-
"rewards/margins": 0.0,
|
23 |
-
"rewards/rejected": 0.0,
|
24 |
-
"step": 1
|
25 |
-
},
|
26 |
-
{
|
27 |
-
"epoch": 0.1895734597156398,
|
28 |
-
"grad_norm": 3.23447571449701,
|
29 |
-
"learning_rate": 4.907293218369499e-06,
|
30 |
-
"logits/chosen": -0.1685400903224945,
|
31 |
-
"logits/rejected": -0.19170813262462616,
|
32 |
-
"logps/chosen": -464.29693603515625,
|
33 |
-
"logps/rejected": -501.8587341308594,
|
34 |
-
"loss": 0.6936,
|
35 |
-
"rewards/accuracies": 0.4097222089767456,
|
36 |
-
"rewards/chosen": 0.0008683237829245627,
|
37 |
-
"rewards/margins": 0.0007554867188446224,
|
38 |
-
"rewards/rejected": 0.00011283738422207534,
|
39 |
-
"step": 10
|
40 |
-
},
|
41 |
-
{
|
42 |
-
"epoch": 0.3791469194312796,
|
43 |
-
"grad_norm": 3.9389946840001264,
|
44 |
-
"learning_rate": 3.941700805287169e-06,
|
45 |
-
"logits/chosen": -0.27595698833465576,
|
46 |
-
"logits/rejected": -0.21783480048179626,
|
47 |
-
"logps/chosen": -438.6907653808594,
|
48 |
-
"logps/rejected": -475.0658264160156,
|
49 |
-
"loss": 0.6894,
|
50 |
-
"rewards/accuracies": 0.4937500059604645,
|
51 |
-
"rewards/chosen": -0.003008360043168068,
|
52 |
-
"rewards/margins": 0.0011918289819732308,
|
53 |
-
"rewards/rejected": -0.00420018844306469,
|
54 |
-
"step": 20
|
55 |
-
},
|
56 |
-
{
|
57 |
-
"epoch": 0.5687203791469194,
|
58 |
-
"grad_norm": 3.2779169381175386,
|
59 |
-
"learning_rate": 2.3293939665883233e-06,
|
60 |
-
"logits/chosen": -0.012437907047569752,
|
61 |
-
"logits/rejected": -0.20707044005393982,
|
62 |
-
"logps/chosen": -476.6265563964844,
|
63 |
-
"logps/rejected": -474.9481506347656,
|
64 |
-
"loss": 0.6873,
|
65 |
-
"rewards/accuracies": 0.53125,
|
66 |
-
"rewards/chosen": -0.01446505170315504,
|
67 |
-
"rewards/margins": 0.007696834392845631,
|
68 |
-
"rewards/rejected": -0.02216188609600067,
|
69 |
-
"step": 30
|
70 |
-
},
|
71 |
-
{
|
72 |
-
"epoch": 0.7582938388625592,
|
73 |
-
"grad_norm": 2.8592366061141954,
|
74 |
-
"learning_rate": 7.936171419533653e-07,
|
75 |
-
"logits/chosen": -0.4586310386657715,
|
76 |
-
"logits/rejected": -0.5929983854293823,
|
77 |
-
"logps/chosen": -422.44293212890625,
|
78 |
-
"logps/rejected": -402.6429748535156,
|
79 |
-
"loss": 0.6827,
|
80 |
-
"rewards/accuracies": 0.574999988079071,
|
81 |
-
"rewards/chosen": -0.018473980948328972,
|
82 |
-
"rewards/margins": 0.014362658374011517,
|
83 |
-
"rewards/rejected": -0.03283664211630821,
|
84 |
-
"step": 40
|
85 |
-
},
|
86 |
-
{
|
87 |
-
"epoch": 0.9478672985781991,
|
88 |
-
"grad_norm": 3.296382116125289,
|
89 |
-
"learning_rate": 2.3285134909173113e-08,
|
90 |
-
"logits/chosen": 0.08067023754119873,
|
91 |
-
"logits/rejected": 0.19673587381839752,
|
92 |
-
"logps/chosen": -483.49212646484375,
|
93 |
-
"logps/rejected": -536.2080688476562,
|
94 |
-
"loss": 0.679,
|
95 |
-
"rewards/accuracies": 0.5874999761581421,
|
96 |
-
"rewards/chosen": -0.017066333442926407,
|
97 |
-
"rewards/margins": 0.01992700807750225,
|
98 |
-
"rewards/rejected": -0.03699334338307381,
|
99 |
-
"step": 50
|
100 |
-
},
|
101 |
-
{
|
102 |
-
"epoch": 0.985781990521327,
|
103 |
-
"step": 52,
|
104 |
-
"total_flos": 0.0,
|
105 |
-
"train_loss": 0.6860739244864538,
|
106 |
-
"train_runtime": 316.322,
|
107 |
-
"train_samples_per_second": 21.339,
|
108 |
-
"train_steps_per_second": 0.164
|
109 |
-
}
|
110 |
-
],
|
111 |
-
"logging_steps": 10,
|
112 |
-
"max_steps": 52,
|
113 |
-
"num_input_tokens_seen": 0,
|
114 |
-
"num_train_epochs": 1,
|
115 |
-
"save_steps": 500,
|
116 |
-
"stateful_callbacks": {
|
117 |
-
"TrainerControl": {
|
118 |
-
"args": {
|
119 |
-
"should_epoch_stop": false,
|
120 |
-
"should_evaluate": false,
|
121 |
-
"should_log": false,
|
122 |
-
"should_save": false,
|
123 |
-
"should_training_stop": false
|
124 |
-
},
|
125 |
-
"attributes": {}
|
126 |
-
}
|
127 |
-
},
|
128 |
-
"total_flos": 0.0,
|
129 |
-
"train_batch_size": 2,
|
130 |
-
"trial_name": null,
|
131 |
-
"trial_params": null
|
132 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:753772c371abf783b3f96c5ddc2c75471f7281c7efc51b1f2c0c1b6ddd4988f7
|
3 |
-
size 6520
|
|
|
|
|
|
|
|