山越貴耀 commited on
Commit
282bf19
1 Parent(s): b103573

included more instructions

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -94,14 +94,14 @@ def plot_fig(df,sent_id,xlims,ylims,color_list):
94
  ax.axis('off')
95
  ax.set_title(df.cleaned_sentence.to_list()[sent_id])
96
  #fig.savefig(f'figures/{sent_id}.png')
97
- #plt.clf()
98
- #plt.close()
99
  buf = io.BytesIO()
100
  fig.savefig(buf, format="png", dpi=200)
101
  buf.seek(0)
102
  img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
103
  buf.close()
104
  img = cv2.imdecode(img_arr, 1)
 
 
105
  return img
106
 
107
  def pre_render_images(df,input_sent_id):
@@ -153,15 +153,17 @@ if __name__=='__main__':
153
 
154
  # Title
155
  st.header("Demo: Probing BERT's priors with serial reproduction chains")
156
-
 
 
157
  # Load BERT
158
  tokenizer,model = load_model('bert-base-uncased')
159
  mask_id = tokenizer.encode("[MASK]")[1:-1][0]
160
 
161
  # First step: load the dataframe containing sentences
162
- input_type = st.sidebar.radio(label='1. Choose the input type',options=('Use one of our example sentences','Use your own initial sentence'))
163
 
164
- if input_type=='Use one of our example sentences':
165
  sentence = st.sidebar.selectbox("Select the inital sentence",
166
  ('About 170 campers attend the camps each week.',
167
  'She grew up with three brothers and ten sisters.'))
@@ -173,7 +175,7 @@ if __name__=='__main__':
173
  st.session_state.df = load_data(sentence_num)
174
 
175
  else:
176
- sentence = st.sidebar.text_input('Type down your own sentence here',on_change=clear_df)
177
  num_steps = st.sidebar.number_input(label='How many steps do you want to run?',value=1000)
178
  if st.sidebar.button('Run chains'):
179
  chain = run_chains(tokenizer,model,mask_id,sentence,num_steps=num_steps)
@@ -182,15 +184,15 @@ if __name__=='__main__':
182
 
183
  if 'df' in st.session_state:
184
  df = st.session_state.df
185
- sent_id = st.sidebar.slider(label='2. Select the position in a chain to start exploring',
186
  min_value=0,max_value=len(df)-1,value=0)
187
 
188
- if input_type=='Use one of our example sentences':
189
  explore_type = st.sidebar.radio('3. Choose the way to explore',options=['In fixed increments','Click through each step','Autoplay'])
190
  else:
191
  explore_type = st.sidebar.radio('3. Choose the way to explore',options=['In fixed increments','Click through each step'])
192
  if explore_type=='Autoplay':
193
- if st.button('Create the video (this may take a few minutes)'):
194
  #st.write('Creating the video...')
195
  #x_tsne, y_tsne = df.x_tsne, df.y_tsne
196
  #xscale_unit = (max(x_tsne)-min(x_tsne))/10
@@ -211,10 +213,10 @@ if __name__=='__main__':
211
  #img = cv2.imread(f'figures/{sent_id}.png')
212
  # out.write(img)
213
  #out.release()
214
- cols = st.columns([1,2,1])
215
- with cols[1]:
216
- with open(f'sampling_video_{sentence_num}.mp4', 'rb') as f:
217
- st.video(f)
218
  else:
219
  if explore_type=='In fixed increments':
220
  button_labels = ['-500','-100','-10','-1','0','+1','+10','+100','+500']
 
94
  ax.axis('off')
95
  ax.set_title(df.cleaned_sentence.to_list()[sent_id])
96
  #fig.savefig(f'figures/{sent_id}.png')
 
 
97
  buf = io.BytesIO()
98
  fig.savefig(buf, format="png", dpi=200)
99
  buf.seek(0)
100
  img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
101
  buf.close()
102
  img = cv2.imdecode(img_arr, 1)
103
+ plt.clf()
104
+ plt.close()
105
  return img
106
 
107
  def pre_render_images(df,input_sent_id):
 
153
 
154
  # Title
155
  st.header("Demo: Probing BERT's priors with serial reproduction chains")
156
+ st.text("Explore sentences in the serial reproduction chains generated by BERT!")
157
+ st.text("Visit different positions in the chain using the widgets on the left.")
158
+ st.text("Check 'Show candidates' to see what words are proposed when each word is masked out.")
159
  # Load BERT
160
  tokenizer,model = load_model('bert-base-uncased')
161
  mask_id = tokenizer.encode("[MASK]")[1:-1][0]
162
 
163
  # First step: load the dataframe containing sentences
164
+ input_type = st.sidebar.radio(label='1. Choose the input type',options=('Use one of the example sentences','Use your own initial sentence'))
165
 
166
+ if input_type=='Use one of the example sentences':
167
  sentence = st.sidebar.selectbox("Select the inital sentence",
168
  ('About 170 campers attend the camps each week.',
169
  'She grew up with three brothers and ten sisters.'))
 
175
  st.session_state.df = load_data(sentence_num)
176
 
177
  else:
178
+ sentence = st.sidebar.text_input('Type down your own sentence here.',on_change=clear_df)
179
  num_steps = st.sidebar.number_input(label='How many steps do you want to run?',value=1000)
180
  if st.sidebar.button('Run chains'):
181
  chain = run_chains(tokenizer,model,mask_id,sentence,num_steps=num_steps)
 
184
 
185
  if 'df' in st.session_state:
186
  df = st.session_state.df
187
+ sent_id = st.sidebar.slider(label='2. Select a position in the chain to start exploring',
188
  min_value=0,max_value=len(df)-1,value=0)
189
 
190
+ if input_type=='Use one of the example sentences':
191
  explore_type = st.sidebar.radio('3. Choose the way to explore',options=['In fixed increments','Click through each step','Autoplay'])
192
  else:
193
  explore_type = st.sidebar.radio('3. Choose the way to explore',options=['In fixed increments','Click through each step'])
194
  if explore_type=='Autoplay':
195
+ #if st.button('Create the video (this may take a few minutes)'):
196
  #st.write('Creating the video...')
197
  #x_tsne, y_tsne = df.x_tsne, df.y_tsne
198
  #xscale_unit = (max(x_tsne)-min(x_tsne))/10
 
213
  #img = cv2.imread(f'figures/{sent_id}.png')
214
  # out.write(img)
215
  #out.release()
216
+ cols = st.columns([1,2,1])
217
+ with cols[1]:
218
+ with open(f'sampling_video_{sentence_num}.mp4', 'rb') as f:
219
+ st.video(f)
220
  else:
221
  if explore_type=='In fixed increments':
222
  button_labels = ['-500','-100','-10','-1','0','+1','+10','+100','+500']