ishangarg183 commited on
Commit
62330fb
·
verified ·
1 Parent(s): a9ddeba

Upload v1 aligned-only activations

Browse files
Files changed (36) hide show
  1. v1/aligned_activations/llama32-3b-dpo/activations/aligned_activations.pt +3 -0
  2. v1/aligned_activations/llama32-3b-dpo/run_meta.json +21 -0
  3. v1/aligned_activations/llama32-3b-grpo/activations/aligned_activations.pt +3 -0
  4. v1/aligned_activations/llama32-3b-grpo/run_meta.json +21 -0
  5. v1/aligned_activations/llama32-3b-kto/activations/aligned_activations.pt +3 -0
  6. v1/aligned_activations/llama32-3b-kto/run_meta.json +21 -0
  7. v1/aligned_activations/llama32-3b-orpo/activations/aligned_activations.pt +3 -0
  8. v1/aligned_activations/llama32-3b-orpo/run_meta.json +21 -0
  9. v1/aligned_activations/llama32-3b-ppo/activations/aligned_activations.pt +3 -0
  10. v1/aligned_activations/llama32-3b-ppo/run_meta.json +21 -0
  11. v1/aligned_activations/llama32-3b-simpo/activations/aligned_activations.pt +3 -0
  12. v1/aligned_activations/llama32-3b-simpo/run_meta.json +21 -0
  13. v1/aligned_activations/qwen3-4b-dpo/activations/aligned_activations.pt +3 -0
  14. v1/aligned_activations/qwen3-4b-dpo/run_meta.json +21 -0
  15. v1/aligned_activations/qwen3-4b-grpo/activations/aligned_activations.pt +3 -0
  16. v1/aligned_activations/qwen3-4b-grpo/run_meta.json +21 -0
  17. v1/aligned_activations/qwen3-4b-kto/activations/aligned_activations.pt +3 -0
  18. v1/aligned_activations/qwen3-4b-kto/run_meta.json +21 -0
  19. v1/aligned_activations/qwen3-4b-orpo/activations/aligned_activations.pt +3 -0
  20. v1/aligned_activations/qwen3-4b-orpo/run_meta.json +21 -0
  21. v1/aligned_activations/qwen3-4b-ppo/activations/aligned_activations.pt +3 -0
  22. v1/aligned_activations/qwen3-4b-ppo/run_meta.json +21 -0
  23. v1/aligned_activations/qwen3-4b-simpo/activations/aligned_activations.pt +3 -0
  24. v1/aligned_activations/qwen3-4b-simpo/run_meta.json +21 -0
  25. v1/aligned_activations/smollm3-dpo/activations/aligned_activations.pt +3 -0
  26. v1/aligned_activations/smollm3-dpo/run_meta.json +21 -0
  27. v1/aligned_activations/smollm3-grpo/activations/aligned_activations.pt +3 -0
  28. v1/aligned_activations/smollm3-grpo/run_meta.json +21 -0
  29. v1/aligned_activations/smollm3-kto/activations/aligned_activations.pt +3 -0
  30. v1/aligned_activations/smollm3-kto/run_meta.json +21 -0
  31. v1/aligned_activations/smollm3-orpo/activations/aligned_activations.pt +3 -0
  32. v1/aligned_activations/smollm3-orpo/run_meta.json +21 -0
  33. v1/aligned_activations/smollm3-ppo/activations/aligned_activations.pt +3 -0
  34. v1/aligned_activations/smollm3-ppo/run_meta.json +21 -0
  35. v1/aligned_activations/smollm3-simpo/activations/aligned_activations.pt +3 -0
  36. v1/aligned_activations/smollm3-simpo/run_meta.json +21 -0
v1/aligned_activations/llama32-3b-dpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b23a18f6f39cdb53f0c23ed63ddf25279e94fbfd80960f1b2a6abcfa8eacb469
3
+ size 2250488957
v1/aligned_activations/llama32-3b-dpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/llama3-dpo-merged",
6
+ "aligned_run_id": "llama32-3b-dpo",
7
+ "layers": [
8
+ 12,
9
+ 13,
10
+ 14
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-dpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/llama32-3b-grpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84b272d6977e7a693bd0b53ed3146cf8d98522b090dc52d8fde47f66b7bf5eb0
3
+ size 2250488957
v1/aligned_activations/llama32-3b-grpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/Llama-3.2-3B-Instruct-GRPO-merged",
6
+ "aligned_run_id": "llama32-3b-grpo",
7
+ "layers": [
8
+ 12,
9
+ 13,
10
+ 14
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-grpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/llama32-3b-kto/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120aa82fabe924b959669a8c5875b0fbe7381daf738187f78d3b4127e079c136
3
+ size 2250488957
v1/aligned_activations/llama32-3b-kto/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/Llama-3.2-3B-Instruct-KTO-merged",
6
+ "aligned_run_id": "llama32-3b-kto",
7
+ "layers": [
8
+ 23,
9
+ 24,
10
+ 25
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-kto/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/llama32-3b-orpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f383e535fea3c9c3301cbf7902faddd7b276f93b8c6fc0e97453e00b68b41154
3
+ size 2250488957
v1/aligned_activations/llama32-3b-orpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/Llama-3.2-3B-ORPO-merged",
6
+ "aligned_run_id": "llama32-3b-orpo",
7
+ "layers": [
8
+ 24,
9
+ 25,
10
+ 26
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-orpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/llama32-3b-ppo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f0798093a3f863438b31be36469eee85e1d6e80ee2bacaa105357480a57c4d
3
+ size 2250488957
v1/aligned_activations/llama32-3b-ppo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/Llama-3.2-3B-Instruct-PPO-merged",
6
+ "aligned_run_id": "llama32-3b-ppo",
7
+ "layers": [
8
+ 10,
9
+ 11,
10
+ 12
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-ppo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/llama32-3b-simpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b42da21232e7f606a3a1cdf9fe732fc67926d283be00793de6c6d6c3ed5fda0b
3
+ size 2250488957
v1/aligned_activations/llama32-3b-simpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
5
+ "aligned_model": "MInAlA/Llama-3.2-3B-SimPO-merged",
6
+ "aligned_run_id": "llama32-3b-simpo",
7
+ "layers": [
8
+ 10,
9
+ 11,
10
+ 12
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/llama32-3b-simpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-dpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a96eb8fcf2b73306973d7db8463a011952e597bae82bbed42568819582537ca5
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-dpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-Instruct-2507-DPO-merged",
6
+ "aligned_run_id": "qwen3-4b-dpo",
7
+ "layers": [
8
+ 21,
9
+ 22,
10
+ 23
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-dpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-grpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:302eaef6b75ff7e1bf62764abeadd3127d4182be0b53103ee3a2e609720bc8f1
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-grpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-Instruct-2507-GRPO-merged",
6
+ "aligned_run_id": "qwen3-4b-grpo",
7
+ "layers": [
8
+ 19,
9
+ 20,
10
+ 21
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-grpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-kto/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f907e62d61c427ed5ccfbca84cbdd84f048d915e3542cfae19868ed6694c7762
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-kto/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-Instruct-2507-KTO-merged",
6
+ "aligned_run_id": "qwen3-4b-kto",
7
+ "layers": [
8
+ 23,
9
+ 24,
10
+ 25
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-kto/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-orpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce3ca3698eab2d2ffb870788dbd915df35ce0153f41893bfb7437d2dcac28b5a
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-orpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-ORPO-merged",
6
+ "aligned_run_id": "qwen3-4b-orpo",
7
+ "layers": [
8
+ 21,
9
+ 22,
10
+ 23
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-orpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-ppo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1029828f9fcd94c27291b08a9e6387f1b387081925d17463c3840c4f00bd88bf
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-ppo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-Instruct-2507-PPO-merged",
6
+ "aligned_run_id": "qwen3-4b-ppo",
7
+ "layers": [
8
+ 20,
9
+ 21,
10
+ 22
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-ppo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/qwen3-4b-simpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9af58d8e7c624c51183bb5df055244bf10a3840128e1e6d423e51a1a596dca3
3
+ size 1876214909
v1/aligned_activations/qwen3-4b-simpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "Qwen/Qwen3-4B-Instruct-2507",
5
+ "aligned_model": "MInAlA/Qwen3-4B-Instruct-2507-SimPO-merged",
6
+ "aligned_run_id": "qwen3-4b-simpo",
7
+ "layers": [
8
+ 20,
9
+ 21,
10
+ 22
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/qwen3-4b-simpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-dpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb8c1c41ead6020be5ed11ebc99732273db2ac887ee6116c75e84b07500e5eea
3
+ size 1501940797
v1/aligned_activations/smollm3-dpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-DPO-merged",
6
+ "aligned_run_id": "smollm3-dpo",
7
+ "layers": [
8
+ 17,
9
+ 18,
10
+ 19
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-dpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-grpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa4e7bb4913118f4b33346fdba2c954d6dc295f66ec1344001c8af8195e8f4d4
3
+ size 1501940861
v1/aligned_activations/smollm3-grpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-GRPO-merged",
6
+ "aligned_run_id": "smollm3-grpo",
7
+ "layers": [
8
+ 16,
9
+ 17,
10
+ 18
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-grpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-kto/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31d8c5693c3a55e2daac0f4068fdc2c72dad9aae619690e1285435921e7ad2d0
3
+ size 1501940797
v1/aligned_activations/smollm3-kto/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-KTO-merged",
6
+ "aligned_run_id": "smollm3-kto",
7
+ "layers": [
8
+ 18,
9
+ 19,
10
+ 20
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-kto/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-orpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e874ce49a6651f0a9fe52a06776440ba7fc286fdfcb4b2fd3f72659d8af885
3
+ size 1501940861
v1/aligned_activations/smollm3-orpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-ORPO-merged",
6
+ "aligned_run_id": "smollm3-orpo",
7
+ "layers": [
8
+ 17,
9
+ 18,
10
+ 19
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-orpo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-ppo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7959a77eade0cbb527ea4649caecfaf37491df8d4fd15f07037dffda1ab3b88
3
+ size 1501940797
v1/aligned_activations/smollm3-ppo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-PPO-merged",
6
+ "aligned_run_id": "smollm3-ppo",
7
+ "layers": [
8
+ 17,
9
+ 18,
10
+ 19
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-ppo/activations/aligned_activations.pt"
21
+ }
v1/aligned_activations/smollm3-simpo/activations/aligned_activations.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9d0abbd866e1896d2849d5f9b11228bbc3d4350eeaf5fd43750aeb125c6d1e
3
+ size 1501940861
v1/aligned_activations/smollm3-simpo/run_meta.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crosscoder_kind": "multilayer_sparc",
3
+ "extract_side": "aligned",
4
+ "base_model": "HuggingFaceTB/SmolLM3-3B",
5
+ "aligned_model": "MInAlA/SmolLM3-3B-SimPO-merged",
6
+ "aligned_run_id": "smollm3-simpo",
7
+ "layers": [
8
+ 17,
9
+ 18,
10
+ 19
11
+ ],
12
+ "center_layer": null,
13
+ "layer_window": 1,
14
+ "layer_policy": "matched_aligned_window",
15
+ "position": "last_prompt",
16
+ "dataset_name": "argilla/ultrafeedback-binarized-preferences-cleaned",
17
+ "max_prompt_tokens": 512,
18
+ "peft": false,
19
+ "topk_mode": "model_balanced_layer_agg",
20
+ "activation_artifact": "output/aligned_activations/smollm3-simpo/activations/aligned_activations.pt"
21
+ }