dar-tau commited on
Commit
5e8b4c1
1 Parent(s): 24be39f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -20
app.py CHANGED
@@ -114,9 +114,8 @@ css = '''
114
  border: 2px solid #000;
115
  border-radius: 10px;
116
  padding: 10px;
117
- margin-left: 20px;
118
- margin-right: 20px;
119
- margin-top: 10px;
120
  background: pink;
121
  }
122
  .bubble textarea {
@@ -194,10 +193,22 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
194
  pass
195
  with gr.Tab('LLM Attacks'):
196
  pass
 
197
  with gr.Group():
198
  original_prompt_raw = gr.Textbox(value='Should I eat cake or vegetables?', container=True, label='Original Prompt')
199
  original_prompt_btn = gr.Button('Compute', variant='primary')
200
-
 
 
 
 
 
 
 
 
 
 
 
201
  with gr.Accordion(open=False, label='Settings'):
202
  with gr.Row():
203
  num_tokens = gr.Slider(1, 100, step=1, value=20, label='Max. # of Tokens')
@@ -211,26 +222,16 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
211
  top_k = gr.Slider(1, 1000, value=50, step=1, label='top k')
212
  top_p = gr.Slider(0., 1., value=0.95, label='top p')
213
 
214
- with gr.Group('Interpretation'):
215
- interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
216
-
217
  with gr.Group('Output'):
218
- tokens_container = []
219
- with gr.Row():
220
- for i in range(MAX_PROMPT_TOKENS):
221
- btn = gr.Button('', visible=False, elem_classes=['token_btn'])
222
- tokens_container.append(btn)
223
-
224
- progress_dummy = gr.Markdown('', elem_id='progress_dummy')
225
  interpretation_bubbles = [gr.Textbox('', container=False, visible=False, elem_classes=['bubble'])
226
  for i in range(model.config.num_hidden_layers)]
227
 
228
- for i, btn in enumerate(tokens_container):
229
- btn.click(partial(run_interpretation, i=i), [global_state, interpretation_prompt,
230
- num_tokens, do_sample, temperature,
231
- top_k, top_p, repetition_penalty, length_penalty
232
- ], [progress_dummy, *interpretation_bubbles])
233
-
234
  original_prompt_btn.click(get_hidden_states,
235
  [original_prompt_raw],
236
  [progress_dummy, global_state, *tokens_container])
 
114
  border: 2px solid #000;
115
  border-radius: 10px;
116
  padding: 10px;
117
+ margin-left: 5%;
118
+ width: 90%;
 
119
  background: pink;
120
  }
121
  .bubble textarea {
 
193
  pass
194
  with gr.Tab('LLM Attacks'):
195
  pass
196
+
197
  with gr.Group():
198
  original_prompt_raw = gr.Textbox(value='Should I eat cake or vegetables?', container=True, label='Original Prompt')
199
  original_prompt_btn = gr.Button('Compute', variant='primary')
200
+
201
+ tokens_container = []
202
+ with gr.Row():
203
+ for i in range(MAX_PROMPT_TOKENS):
204
+ btn = gr.Button('', visible=False, elem_classes=['token_btn'])
205
+ tokens_container.append(btn)
206
+
207
+ progress_dummy = gr.Markdown('', elem_id='progress_dummy')
208
+
209
+ with gr.Group('Interpretation'):
210
+ interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
211
+
212
  with gr.Accordion(open=False, label='Settings'):
213
  with gr.Row():
214
  num_tokens = gr.Slider(1, 100, step=1, value=20, label='Max. # of Tokens')
 
222
  top_k = gr.Slider(1, 1000, value=50, step=1, label='top k')
223
  top_p = gr.Slider(0., 1., value=0.95, label='top p')
224
 
 
 
 
225
  with gr.Group('Output'):
 
 
 
 
 
 
 
226
  interpretation_bubbles = [gr.Textbox('', container=False, visible=False, elem_classes=['bubble'])
227
  for i in range(model.config.num_hidden_layers)]
228
 
229
+ for i, btn in enumerate(tokens_container):
230
+ btn.click(partial(run_interpretation, i=i), [global_state, interpretation_prompt,
231
+ num_tokens, do_sample, temperature,
232
+ top_k, top_p, repetition_penalty, length_penalty
233
+ ], [progress_dummy, *interpretation_bubbles])
234
+
235
  original_prompt_btn.click(get_hidden_states,
236
  [original_prompt_raw],
237
  [progress_dummy, global_state, *tokens_container])