Spaces:
Runtime error
Runtime error
taka-yamakoshi
commited on
Commit
·
4400304
1
Parent(s):
b4bb2e9
test
Browse files
app.py
CHANGED
@@ -240,8 +240,8 @@ if __name__=='__main__':
|
|
240 |
#st.write(option_1_locs)
|
241 |
#st.write(option_2_locs)
|
242 |
#st.write(pron_locs)
|
243 |
-
for token_ids in [masked_ids_option_1['sent_1'],masked_ids_option_1['sent_2'],masked_ids_option_2['sent_1'],masked_ids_option_2['sent_2']]:
|
244 |
-
|
245 |
|
246 |
option_1_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_1_locs['sent_1'])+1]
|
247 |
option_1_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_1_locs['sent_2'])+1]
|
@@ -250,8 +250,6 @@ if __name__=='__main__':
|
|
250 |
assert np.all(option_1_tokens_1==option_1_tokens_2) and np.all(option_2_tokens_1==option_2_tokens_2)
|
251 |
option_1_tokens = option_1_tokens_1
|
252 |
option_2_tokens = option_2_tokens_1
|
253 |
-
st.write(option_1_tokens)
|
254 |
-
st.write(option_2_tokens)
|
255 |
|
256 |
interventions = [{'lay':[],'qry':[],'key':[],'val':[]} for i in range(num_layers)]
|
257 |
probs_original = run_intervention(interventions,1,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
@@ -262,11 +260,10 @@ if __name__=='__main__':
|
|
262 |
st.dataframe(df.style.highlight_max(axis=1))
|
263 |
|
264 |
multihead = True
|
265 |
-
for layer_id in range(num_layers)
|
266 |
interventions = [create_interventions(16,['lay','qry','key','val'],num_heads,multihead) if i==layer_id else {'lay':[],'qry':[],'key':[],'val':[]} for i in range(num_layers)]
|
267 |
if multihead:
|
268 |
probs = run_intervention(interventions,1,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
269 |
else:
|
270 |
probs = run_intervention(interventions,num_heads,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
271 |
-
|
272 |
st.write(probs_original-probs)
|
|
|
240 |
#st.write(option_1_locs)
|
241 |
#st.write(option_2_locs)
|
242 |
#st.write(pron_locs)
|
243 |
+
#for token_ids in [masked_ids_option_1['sent_1'],masked_ids_option_1['sent_2'],masked_ids_option_2['sent_1'],masked_ids_option_2['sent_2']]:
|
244 |
+
# st.write(' '.join([tokenizer.decode([token]) for token in token_ids]))
|
245 |
|
246 |
option_1_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_1_locs['sent_1'])+1]
|
247 |
option_1_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_1_locs['sent_2'])+1]
|
|
|
250 |
assert np.all(option_1_tokens_1==option_1_tokens_2) and np.all(option_2_tokens_1==option_2_tokens_2)
|
251 |
option_1_tokens = option_1_tokens_1
|
252 |
option_2_tokens = option_2_tokens_1
|
|
|
|
|
253 |
|
254 |
interventions = [{'lay':[],'qry':[],'key':[],'val':[]} for i in range(num_layers)]
|
255 |
probs_original = run_intervention(interventions,1,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
|
|
260 |
st.dataframe(df.style.highlight_max(axis=1))
|
261 |
|
262 |
multihead = True
|
263 |
+
for layer_id in range(num_layers):
|
264 |
interventions = [create_interventions(16,['lay','qry','key','val'],num_heads,multihead) if i==layer_id else {'lay':[],'qry':[],'key':[],'val':[]} for i in range(num_layers)]
|
265 |
if multihead:
|
266 |
probs = run_intervention(interventions,1,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
267 |
else:
|
268 |
probs = run_intervention(interventions,num_heads,model,masked_ids_option_1,masked_ids_option_2,option_1_tokens,option_2_tokens,pron_locs)
|
|
|
269 |
st.write(probs_original-probs)
|