atticusg commited on
Commit
7d1a6ff
·
verified ·
1 Parent(s): 7d038ad

Upload 13 files

Browse files
.gitattributes CHANGED
@@ -45,3 +45,11 @@ mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(La
45
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-correct_symbol)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
46
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-last_token)_featurizer filter=lfs diff=lfs merge=lfs -text
47
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
45
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-correct_symbol)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
46
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-last_token)_featurizer filter=lfs diff=lfs merge=lfs -text
47
  mock_submission/4_answer_MCQA_Gemma2ForCausalLM_answer_pointer/ResidualStream(Layer-0,Token-last_token)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
48
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-7,Head-6,Token-all)_featurizer filter=lfs diff=lfs merge=lfs -text
49
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-7,Head-6,Token-all)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
50
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-8,Head-1,Token-all)_featurizer filter=lfs diff=lfs merge=lfs -text
51
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-8,Head-1,Token-all)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
52
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-7,Head-6,Token-all)_featurizer filter=lfs diff=lfs merge=lfs -text
53
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-7,Head-6,Token-all)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
54
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-8,Head-1,Token-all)_featurizer filter=lfs diff=lfs merge=lfs -text
55
+ ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-8,Head-1,Token-all)_inverse_featurizer filter=lfs diff=lfs merge=lfs -text
ioi_submission/ioi_linear_params.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gemma": {
3
+ "bias": 0.04835902899503708,
4
+ "token_coeff": 0.767971899360421,
5
+ "position_coeff": 2.004627879709005
6
+ },
7
+ "model_class": "Gemma2ForCausalLM"
8
+ }
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-7,Head-6,Token-all)_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da9921dbe379aed68b2674280910d2a87df985c25d8d6418b78e265075b3403b
3
+ size 330305
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-7,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-7,Head-6,Token-all)_inverse_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82bcc00d4fd483430b7defa28b46965f4b85d313db587d10adc14bd937e34ddc
3
+ size 330425
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-8,Head-1,Token-all)_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86c9d2d3b3924868d93ea66fa72f7ecd232014bb370137d3a7095930ae506605
3
+ size 330305
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-8,Head-1,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_submission/ioi_task_Gemma2ForCausalLM_output_position/AttentionHead(Layer-8,Head-1,Token-all)_inverse_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121329e6a6e66f1b4b8c9c6f8a200e040eb64725cde377965fcb5e1c368f6ea5
3
+ size 330425
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-7,Head-6,Token-all)_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86db1c499a0fa5d00ae455eb23f25cee16df064e2658d7c0eb2670de6b931a06
3
+ size 330305
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-7,Head-6,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-7,Head-6,Token-all)_inverse_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12f3f8f7c2ac5ad5d598846babe51bb114fc067c3061ee62fa63e3516fcdb7f6
3
+ size 330425
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-8,Head-1,Token-all)_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f2b67a9b41855fd1e5db6f215dbe058e8bb9774c4b2d3e20be85e38d666ce0
3
+ size 330305
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-8,Head-1,Token-all)_indices ADDED
@@ -0,0 +1 @@
 
 
1
+ null
ioi_submission/ioi_task_Gemma2ForCausalLM_output_token/AttentionHead(Layer-8,Head-1,Token-all)_inverse_featurizer ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7febbe5d2a0b1d0de5a01c72916aac38055ae5642600c1c5d98b2bfd32c5477d
3
+ size 330425