dar-tau commited on
Commit
b30e55a
1 Parent(s): 9a3579b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -11
app.py CHANGED
@@ -11,6 +11,10 @@ from interpret import InterpretationPrompt
11
  MAX_PROMPT_TOKENS = 30
12
 
13
  ## info
 
 
 
 
14
  model_info = {
15
  'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', device_map='cpu', token=os.environ['hf_token'],
16
  original_prompt_template='<s>[INST] {prompt} [/INST]',
@@ -161,6 +165,8 @@ css = '''
161
 
162
  # '''
163
 
 
 
164
  with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
165
  global_state = gr.State([])
166
  with gr.Row():
@@ -190,21 +196,21 @@ with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
190
  with gr.Group('Interpretation'):
191
  interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
192
 
193
- with gr.Blocks():
194
  gr.Markdown('''
195
  Here are some examples of prompts we can analyze their internal representations
196
  ''')
197
- with gr.Tab('Memory Recall'):
198
- pass
199
- with gr.Tab('Physics Understanding'):
200
- pass
201
- with gr.Tab('Common Sense'):
202
- pass
203
- with gr.Tab('LLM Attacks'):
204
- pass
205
-
206
  with gr.Group():
207
- original_prompt_raw = gr.Textbox(value='Should I eat cake or vegetables?', container=True, label='Original Prompt')
208
  original_prompt_btn = gr.Button('Compute', variant='primary')
209
 
210
  tokens_container = []
 
11
  MAX_PROMPT_TOKENS = 30
12
 
13
  ## info
14
+ dataset_info = [{'name': 'Commonsense', 'hf_repo': 'tau/commonsense_qa', 'text_col': 'question'}]
15
+
16
+
17
+
18
  model_info = {
19
  'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', device_map='cpu', token=os.environ['hf_token'],
20
  original_prompt_template='<s>[INST] {prompt} [/INST]',
 
165
 
166
  # '''
167
 
168
+ original_prompt_raw = gr.Textbox(value='Should I eat cake or vegetables?', container=True, label='Original Prompt')
169
+
170
  with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:
171
  global_state = gr.State([])
172
  with gr.Row():
 
196
  with gr.Group('Interpretation'):
197
  interpretation_prompt = gr.Text(suggested_interpretation_prompts[0], label='Interpretation Prompt')
198
 
199
+ with gr.Group():
200
  gr.Markdown('''
201
  Here are some examples of prompts we can analyze their internal representations
202
  ''')
203
+
204
+ for info in dataset_info:
205
+ with gr.Tab(info['name']):
206
+ num_examples = 10
207
+ dataset = load_dataset(info['hf_repo'], split='train', streaming=True)
208
+ dataset = dataset.shuffle(buffer_size=2000).take(num_examples)
209
+ dataset = [[row[info['text_col']]] for row in dataset]
210
+ gr.Examples(dataset, [original_prompt_raw])
211
+
212
  with gr.Group():
213
+ original_prompt_raw.render()
214
  original_prompt_btn = gr.Button('Compute', variant='primary')
215
 
216
  tokens_container = []