DatPySci commited on
Commit
e516cd9
·
verified ·
1 Parent(s): f097123

upload lora

Browse files
Files changed (32) hide show
  1. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  2. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
  3. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
  4. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
  5. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
  6. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
  7. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
  8. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
  9. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
  10. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
  11. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
  12. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
  13. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
  14. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
  15. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
  16. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
  17. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  18. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
  19. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
  20. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
  21. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
  22. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
  23. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
  24. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
  25. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
  26. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
  27. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
  28. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
  29. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
  30. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
  31. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
  32. qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336324d082a72fa09880e9b4fb100c9b0fd6e9f68dc2b588fee7a15854fb99d2
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_192/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_192/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10a69a2790acb3278279c2bdf080318cf5a4e1889eee0c8e54aec6021febfc01
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_256/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_256/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5efa0747f84432f8269301a3e961a63c840b1caad0d7b75e869baa68c70c0ea6
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_320/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_320/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:254361d38c908ca5267f0a825b2bf093db0dfd7debfb6ad5bafa716b5c1b2167
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_384/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_384/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca6c02eebe5cbec828cede7c637e9c55a5a5dd7847a73f08d4dc181f9e31886
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_448/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_448/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae9393a5d64153e4d797e2abfe7a207028d68c51414a17db16743b4f92d127c2
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_512/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_512/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b86b9dff49fcdf5c08eb6d9b36436f02cc76507a7a39c67c4379699272006cc
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_64/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "down_proj",
13
+ "up_proj",
14
+ "q_proj",
15
+ "gate_proj",
16
+ "k_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32_fixed_alpha/global_step_64/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351f7144facccda2c7133a43a155dd094130bc3725a808f07056733808d320d0
3
+ size 239536248
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f189b3f3e48edef35f34b28fc6ecfc3a3074496315f645270e25aa059c101a27
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_192/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_192/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e28d0a1cf69a78ef86fe6d788b35308ff66764708e36a17c86e19a97e4b0da
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_256/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_256/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb6601f0a60d3be058cc8e15f5b8a88a08206daf2b624265ccbe7439f0d6a2d
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_320/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_320/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453a21aa916055222e956eed6956c6cbeb5e5e894e0aa0528b4e81cde98fc11c
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_384/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_384/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dd8525a0876b99e2636f3cdebb94a78185eeafdd2a2249614d5d3557fed8582
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_448/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_448/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e75be03c61e37871fd6294e0aa0b605e7e52910f7eda730f18e140eae53b79d2
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_512/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_512/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bc7d52e6185653c0dbb453f767e78a77c27b2b883fb44ab9de773c600d26ac2
3
+ size 479005032
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_64/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "v_proj",
12
+ "q_proj",
13
+ "k_proj",
14
+ "o_proj",
15
+ "gate_proj",
16
+ "down_proj",
17
+ "up_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora-fixed-alpha/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64_fixed_alpha/global_step_64/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f546a3670362db1ea867f313c6d8387960cdff34ad0d7924d74db226d5f581
3
+ size 479005032