tyzhu commited on
Commit
43bd230
1 Parent(s): bca394c

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-250/adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "Qwen/Qwen1.5-4B",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
checkpoint-250/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c61ba027c813960424b3eb40d778caf64086f758cff47d07b4470b08491b71
3
+ size 143269386
checkpoint-250/added_tokens.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "<|endoftext|>": 151643,
3
- "<|im_end|>": 151645,
4
- "<|im_start|>": 151644
5
  }
 
1
  {
2
+ "</s>": 2,
3
+ "<s>": 1,
4
+ "<unk>": 0
5
  }
checkpoint-250/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1dec6aaedcac34ab196bffc366b02aefe2fb0659d46f6808e94783c25d1d6fcc
3
- size 224537202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a062cbf92b3162ca6b2898e141b824ba94fce3c3fa5950b465a54009cd86eb08
3
+ size 286585234
checkpoint-250/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3440e54a7d7eecf9e0ae6163af3a0a7285b32b98c95601357c32605c4f71b7ba
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dc34686ffce674ceea4132dde9e2364deb3134693da9e54ffaabe419a6a9b09
3
  size 15024
checkpoint-250/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:968672697d7d95a0973828eeb5f2cb2f9604e89aa70158bec3419253ee735c8c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb03dc224530159a2021b8c7503173f06392798cc662ac104090b48769ac2d7e
3
  size 15024
checkpoint-250/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0f9cc627945a6783ee3adebdaacca7e956de0caa233520e84b0e328a71addcd
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c3935ff01fd7944272c9e5647090f0b94a975358eaabcc0e6f2b36f9e5722dc
3
  size 15024
checkpoint-250/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac51f2c0e8bd667dbc4617c30572e7a69f04fdc66af960dd112a0d3878624c33
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e16290622104234d57dfb4dfcc45eb682fe7f81ebbd5c13a542e166f7cbd17a8
3
  size 15024
checkpoint-250/special_tokens_map.json CHANGED
@@ -1,14 +1,6 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
5
- ],
6
- "eos_token": {
7
- "content": "<|endoftext|>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "pad_token": "<|endoftext|>"
14
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
6
  }
checkpoint-250/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-250/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-250/tokenizer_config.json CHANGED
@@ -1,24 +1,23 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "151643": {
5
- "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
- "151644": {
13
- "content": "<|im_start|>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
- "151645": {
21
- "content": "<|im_end|>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
@@ -26,18 +25,16 @@
26
  "special": true
27
  }
28
  },
29
- "additional_special_tokens": [
30
- "<|im_start|>",
31
- "<|im_end|>"
32
- ],
33
- "bos_token": null,
34
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
  "clean_up_tokenization_spaces": false,
36
- "eos_token": "<|endoftext|>",
37
- "errors": "replace",
38
- "model_max_length": 32768,
39
- "pad_token": "<|endoftext|>",
40
- "split_special_tokens": false,
41
- "tokenizer_class": "Qwen2Tokenizer",
42
- "unk_token": null
 
 
43
  }
 
1
  {
 
2
  "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
+ "content": "<s>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
+ "content": "</s>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  }
27
  },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
 
 
 
 
30
  "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
32
+ "legacy": false,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "</s>",
35
+ "padding_side": "left",
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
  }
checkpoint-250/trainer_state.json CHANGED
@@ -10,53 +10,37 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4,
13
- "grad_norm": 0.5897082090377808,
14
  "learning_rate": 0.0001,
15
- "loss": 2.3566,
16
  "step": 100
17
  },
18
  {
19
  "epoch": 0.8,
20
- "grad_norm": 0.5390245914459229,
21
  "learning_rate": 0.0001,
22
- "loss": 2.2503,
23
  "step": 200
24
  },
25
  {
26
  "epoch": 1.0,
27
- "eval_accuracy": 0.5155873015873016,
28
- "eval_loss": 2.3237216472625732,
29
- "eval_runtime": 6.3164,
30
- "eval_samples_per_second": 79.159,
31
- "eval_steps_per_second": 9.974,
32
  "step": 250
33
  },
34
  {
35
  "epoch": 1.0,
36
- "eval_exact_match": 17.2,
37
- "eval_f1": 25.586581359816655,
38
  "step": 250
39
  }
40
  ],
41
  "logging_steps": 100,
42
  "max_steps": 12500,
43
- "num_input_tokens_seen": 0,
44
  "num_train_epochs": 50,
45
  "save_steps": 500,
46
- "stateful_callbacks": {
47
- "TrainerControl": {
48
- "args": {
49
- "should_epoch_stop": false,
50
- "should_evaluate": false,
51
- "should_log": false,
52
- "should_save": true,
53
- "should_training_stop": false
54
- },
55
- "attributes": {}
56
- }
57
- },
58
- "total_flos": 7637387349852160.0,
59
- "train_batch_size": 2,
60
  "trial_name": null,
61
  "trial_params": null
62
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4,
 
13
  "learning_rate": 0.0001,
14
+ "loss": 2.0446,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.8,
 
19
  "learning_rate": 0.0001,
20
+ "loss": 1.7752,
21
  "step": 200
22
  },
23
  {
24
  "epoch": 1.0,
25
+ "eval_accuracy": 0.6076455696202532,
26
+ "eval_loss": 1.8067126274108887,
27
+ "eval_runtime": 5.0018,
28
+ "eval_samples_per_second": 99.965,
29
+ "eval_steps_per_second": 12.596,
30
  "step": 250
31
  },
32
  {
33
  "epoch": 1.0,
34
+ "eval_exact_match": 24.4,
35
+ "eval_f1": 35.27913510466144,
36
  "step": 250
37
  }
38
  ],
39
  "logging_steps": 100,
40
  "max_steps": 12500,
 
41
  "num_train_epochs": 50,
42
  "save_steps": 500,
43
+ "total_flos": 1.5945416663105536e+16,
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  "trial_name": null,
45
  "trial_params": null
46
  }
checkpoint-250/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3414153f69372b550d629c900d05ab7510b14d218f2d8546142e7b41d948d3e
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e32f95a567f2c80104b456ab796a180c8c7fa1c0685e00bbceb4995e8fa84ef1
3
+ size 4728