wangrongsheng commited on
Commit
28e10e1
1 Parent(s): 6b80f54

commit from root

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +3 -0
  2. adapter_config.json +19 -0
  3. adapter_model.bin +3 -0
  4. checkpoint-100/README.md +3 -0
  5. checkpoint-100/adapter_config.json +19 -0
  6. checkpoint-100/adapter_model.bin +3 -0
  7. checkpoint-100/finetuning_args.json +13 -0
  8. checkpoint-100/reward/adapter_config.json +19 -0
  9. checkpoint-100/reward/adapter_model.bin +3 -0
  10. checkpoint-100/training_args.bin +3 -0
  11. checkpoint-100/value_head.bin +3 -0
  12. checkpoint-1000/README.md +3 -0
  13. checkpoint-1000/adapter_config.json +19 -0
  14. checkpoint-1000/adapter_model.bin +3 -0
  15. checkpoint-1000/finetuning_args.json +13 -0
  16. checkpoint-1000/reward/adapter_config.json +19 -0
  17. checkpoint-1000/reward/adapter_model.bin +3 -0
  18. checkpoint-1000/training_args.bin +3 -0
  19. checkpoint-1000/value_head.bin +3 -0
  20. checkpoint-1100/README.md +3 -0
  21. checkpoint-1100/adapter_config.json +19 -0
  22. checkpoint-1100/adapter_model.bin +3 -0
  23. checkpoint-1100/finetuning_args.json +13 -0
  24. checkpoint-1100/reward/adapter_config.json +19 -0
  25. checkpoint-1100/reward/adapter_model.bin +3 -0
  26. checkpoint-1100/training_args.bin +3 -0
  27. checkpoint-1100/value_head.bin +3 -0
  28. checkpoint-1200/README.md +3 -0
  29. checkpoint-1200/adapter_config.json +19 -0
  30. checkpoint-1200/adapter_model.bin +3 -0
  31. checkpoint-1200/finetuning_args.json +13 -0
  32. checkpoint-1200/reward/adapter_config.json +19 -0
  33. checkpoint-1200/reward/adapter_model.bin +3 -0
  34. checkpoint-1200/training_args.bin +3 -0
  35. checkpoint-1200/value_head.bin +3 -0
  36. checkpoint-1300/README.md +3 -0
  37. checkpoint-1300/adapter_config.json +19 -0
  38. checkpoint-1300/adapter_model.bin +3 -0
  39. checkpoint-1300/finetuning_args.json +13 -0
  40. checkpoint-1300/reward/adapter_config.json +19 -0
  41. checkpoint-1300/reward/adapter_model.bin +3 -0
  42. checkpoint-1300/training_args.bin +3 -0
  43. checkpoint-1300/value_head.bin +3 -0
  44. checkpoint-1400/README.md +3 -0
  45. checkpoint-1400/adapter_config.json +19 -0
  46. checkpoint-1400/adapter_model.bin +3 -0
  47. checkpoint-1400/finetuning_args.json +13 -0
  48. checkpoint-1400/reward/adapter_config.json +19 -0
  49. checkpoint-1400/reward/adapter_model.bin +3 -0
  50. checkpoint-1400/training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:108a62208cac75b04191ec406c7c72ee9abb09b6895583d1e54fa9f117c33906
3
+ size 14700057
checkpoint-100/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-100/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:147fb19d469d5c6f53a7ba1bcb909a7ed2128c22f59a7e480bea913c1260074d
3
+ size 14700057
checkpoint-100/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-100/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-100/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272
checkpoint-100/value_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395abf9eaacf5e0044ef7e7197a8bdbff58623f174fac4c27d1f82112707653d
3
+ size 17395
checkpoint-1000/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c073adeac9b9a45b79c54bcd1dc914711d21fe24cbb2409d2437d7a911924a
3
+ size 14700057
checkpoint-1000/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-1000/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1000/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272
checkpoint-1000/value_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:733ef79dccc23227056bf9906facb14cdc257c45a14edf48c6c015e3175bf15a
3
+ size 17395
checkpoint-1100/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-1100/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1100/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f113366635ea7d8603f3f700df4f8a93ce616fc172d603358cb047801276cb6b
3
+ size 14700057
checkpoint-1100/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-1100/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1100/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272
checkpoint-1100/value_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced8059c47d165b4dd4ca5b7437c1e9a9811ab711f89cc7cfdae874717e0cb3c
3
+ size 17395
checkpoint-1200/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49c9db7b2dc6e86d3ba9228acbd0f316d36f7526220d99e03780281409c9a26
3
+ size 14700057
checkpoint-1200/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-1200/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1200/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272
checkpoint-1200/value_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56df50a35c1ac0f719d7edf4cd843f5ed7d578cdeab92b8738cb64154f7ed84
3
+ size 17395
checkpoint-1300/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-1300/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1300/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceb75e3a1cc017e3c6abd3e31c5a6a4de91ccdd6411a7b430d787239b7a334af
3
+ size 14700057
checkpoint-1300/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-1300/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1300/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272
checkpoint-1300/value_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11171e582456b91af5ee37b87fde770590492ed07e9a013769141f233cf37448
3
+ size 17395
checkpoint-1400/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
checkpoint-1400/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:635293e34c81a24270015784797554594f1ac2947dfbcd33612610d1ce794852
3
+ size 14700057
checkpoint-1400/finetuning_args.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "finetuning_type": "lora",
3
+ "lora_alpha": 32.0,
4
+ "lora_dropout": 0.1,
5
+ "lora_rank": 8,
6
+ "lora_target": [
7
+ "query_key_value"
8
+ ],
9
+ "name_module_trainable": "mlp",
10
+ "num_layer_trainable": 3,
11
+ "pre_seq_len": 16,
12
+ "prefix_projection": false
13
+ }
checkpoint-1400/reward/adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "THUDM/chatglm-6b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 32.0,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
checkpoint-1400/reward/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0601825e97bfd3f110a95de205bee2e904e126713799c8a249c6751f1ca4d299
3
+ size 3272