ZeroUniqueness commited on
Commit
60ffaed
·
1 Parent(s): 0d39764

2023-08-02 07:12:41 Autosave for checkpoint additions

Browse files
checkpoint-2000/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "v_proj",
18
- "k_proj",
19
- "up_proj",
20
- "down_proj",
21
- "gate_proj",
22
- "o_proj",
23
- "q_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/adapter_model/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f114695729d2a9f7dba52315b9336fe2e7c16956aef49a72c90a66b0a231dd
3
- size 500897101
 
 
 
 
checkpoint-2000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:17d8a1d597503c92e761f7f88787b4442344c5a8296388e41960fe31dee5b75a
3
- size 1001723453
 
 
 
 
checkpoint-2000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c24b0ef3ea23bbf417fded93c368fd3bc20ed068889347a22b480d6b8effc9e9
3
- size 14575
 
 
 
 
checkpoint-2000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa0675f07b828997f5584ecd7da1334892f1ae498d3abb409441e2c84a989d13
3
- size 627
 
 
 
 
checkpoint-2000/trainer_state.json DELETED
@@ -1,152 +0,0 @@
1
- {
2
- "best_metric": 0.7452704310417175,
3
- "best_model_checkpoint": "./qlora-out/checkpoint-2000",
4
- "epoch": 0.07456843518138771,
5
- "global_step": 2000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 0.00019999938245325715,
13
- "loss": 0.9023,
14
- "step": 100
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 0.00019999724773356797,
19
- "loss": 0.8027,
20
- "step": 200
21
- },
22
- {
23
- "epoch": 0.01,
24
- "learning_rate": 0.0001999935882494411,
25
- "loss": 0.8041,
26
- "step": 300
27
- },
28
- {
29
- "epoch": 0.01,
30
- "learning_rate": 0.00019998840405667672,
31
- "loss": 0.7944,
32
- "step": 400
33
- },
34
- {
35
- "epoch": 0.02,
36
- "learning_rate": 0.00019998169523432365,
37
- "loss": 0.81,
38
- "step": 500
39
- },
40
- {
41
- "epoch": 0.02,
42
- "learning_rate": 0.0001999734618846785,
43
- "loss": 0.7855,
44
- "step": 600
45
- },
46
- {
47
- "epoch": 0.03,
48
- "learning_rate": 0.00019996370413328385,
49
- "loss": 0.7849,
50
- "step": 700
51
- },
52
- {
53
- "epoch": 0.03,
54
- "learning_rate": 0.00019995242212892653,
55
- "loss": 0.7564,
56
- "step": 800
57
- },
58
- {
59
- "epoch": 0.03,
60
- "learning_rate": 0.00019993961604363532,
61
- "loss": 0.7724,
62
- "step": 900
63
- },
64
- {
65
- "epoch": 0.04,
66
- "learning_rate": 0.00019992528607267815,
67
- "loss": 0.7308,
68
- "step": 1000
69
- },
70
- {
71
- "epoch": 0.04,
72
- "eval_loss": 0.7677998542785645,
73
- "eval_runtime": 1774.3517,
74
- "eval_samples_per_second": 0.305,
75
- "eval_steps_per_second": 0.305,
76
- "step": 1000
77
- },
78
- {
79
- "epoch": 0.04,
80
- "learning_rate": 0.0001999094324345594,
81
- "loss": 0.7844,
82
- "step": 1100
83
- },
84
- {
85
- "epoch": 0.04,
86
- "learning_rate": 0.00019989205537101633,
87
- "loss": 0.7668,
88
- "step": 1200
89
- },
90
- {
91
- "epoch": 0.05,
92
- "learning_rate": 0.00019987315514701553,
93
- "loss": 0.7727,
94
- "step": 1300
95
- },
96
- {
97
- "epoch": 0.05,
98
- "learning_rate": 0.00019985273205074878,
99
- "loss": 0.7467,
100
- "step": 1400
101
- },
102
- {
103
- "epoch": 0.06,
104
- "learning_rate": 0.00019983078639362883,
105
- "loss": 0.7516,
106
- "step": 1500
107
- },
108
- {
109
- "epoch": 0.06,
110
- "learning_rate": 0.00019980731851028445,
111
- "loss": 0.7267,
112
- "step": 1600
113
- },
114
- {
115
- "epoch": 0.06,
116
- "learning_rate": 0.0001997823287585554,
117
- "loss": 0.7632,
118
- "step": 1700
119
- },
120
- {
121
- "epoch": 0.07,
122
- "learning_rate": 0.000199755817519487,
123
- "loss": 0.7392,
124
- "step": 1800
125
- },
126
- {
127
- "epoch": 0.07,
128
- "learning_rate": 0.00019972778519732436,
129
- "loss": 0.7528,
130
- "step": 1900
131
- },
132
- {
133
- "epoch": 0.07,
134
- "learning_rate": 0.0001996982322195061,
135
- "loss": 0.725,
136
- "step": 2000
137
- },
138
- {
139
- "epoch": 0.07,
140
- "eval_loss": 0.7452704310417175,
141
- "eval_runtime": 1787.7554,
142
- "eval_samples_per_second": 0.303,
143
- "eval_steps_per_second": 0.303,
144
- "step": 2000
145
- }
146
- ],
147
- "max_steps": 80463,
148
- "num_train_epochs": 3,
149
- "total_flos": 5.618921541299405e+17,
150
- "trial_name": null,
151
- "trial_params": null
152
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a281372a257be5e7ebbea8ac16ec51e707b224a5ed57c48e2f69549c5a031d8
3
- size 4027
 
 
 
 
{checkpoint-2000 → checkpoint-5000/adapter_model}/README.md RENAMED
File without changes
{checkpoint-2000 → checkpoint-5000/adapter_model}/adapter_config.json RENAMED
File without changes
{checkpoint-2000 → checkpoint-5000/adapter_model}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f114695729d2a9f7dba52315b9336fe2e7c16956aef49a72c90a66b0a231dd
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99a6fd84a005d613308e543278f3b8e5211714bb53b78a4198e5a97c2754079
3
  size 500897101