山越貴耀 commited on
Commit
568fadb
1 Parent(s): ef480b7

stop caching

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -30,7 +30,7 @@ def load_data(sentence_num):
30
  df = df.loc[lambda d: (d['sentence_num']==sentence_num)&(d['iter_num']<1000)]
31
  return df
32
 
33
- @st.cache(show_spinner=False)
34
  def mask_prob(model,mask_id,sentences,position,temp=1):
35
  masked_sentences = sentences.clone()
36
  masked_sentences[:, position] = mask_id
@@ -38,7 +38,7 @@ def mask_prob(model,mask_id,sentences,position,temp=1):
38
  logits = model(masked_sentences)[0]
39
  return F.log_softmax(logits[:, position] / temp, dim = -1)
40
 
41
- @st.cache(show_spinner=False)
42
  def sample_words(probs,pos,sentences):
43
  candidates = [[tokenizer.decode([candidate]),torch.exp(probs)[0,candidate].item()]
44
  for candidate in torch.argsort(probs[0],descending=True)[:10]]
@@ -64,7 +64,7 @@ def run_chains(tokenizer,model,mask_id,input_text,num_steps):
64
  sentence,_ = sample_words(probs,pos,sentence)
65
  return pd.DataFrame(data=data_list,columns=['step','sentence','next_sample_loc'])
66
 
67
- @st.cache(suppress_st_warning=True,show_spinner=False)
68
  def run_tsne(chain):
69
  st.sidebar.write('Running t-SNE...')
70
  st.sidebar.write('This takes ~1 min for 1000 steps with ~10 token sentences')
@@ -81,7 +81,7 @@ def run_tsne(chain):
81
  def clear_df():
82
  del st.session_state['df']
83
 
84
- @st.cache(show_spinner=False)
85
  def plot_fig(df,sent_id,xlims,ylims,color_list):
86
  x_tsne, y_tsne = df.x_tsne, df.y_tsne
87
  fig = plt.figure(figsize=(5,5),dpi=200)
 
30
  df = df.loc[lambda d: (d['sentence_num']==sentence_num)&(d['iter_num']<1000)]
31
  return df
32
 
33
+ #@st.cache(show_spinner=False)
34
  def mask_prob(model,mask_id,sentences,position,temp=1):
35
  masked_sentences = sentences.clone()
36
  masked_sentences[:, position] = mask_id
 
38
  logits = model(masked_sentences)[0]
39
  return F.log_softmax(logits[:, position] / temp, dim = -1)
40
 
41
+ #@st.cache(show_spinner=False)
42
  def sample_words(probs,pos,sentences):
43
  candidates = [[tokenizer.decode([candidate]),torch.exp(probs)[0,candidate].item()]
44
  for candidate in torch.argsort(probs[0],descending=True)[:10]]
 
64
  sentence,_ = sample_words(probs,pos,sentence)
65
  return pd.DataFrame(data=data_list,columns=['step','sentence','next_sample_loc'])
66
 
67
+ #@st.cache(suppress_st_warning=True,show_spinner=False)
68
  def run_tsne(chain):
69
  st.sidebar.write('Running t-SNE...')
70
  st.sidebar.write('This takes ~1 min for 1000 steps with ~10 token sentences')
 
81
  def clear_df():
82
  del st.session_state['df']
83
 
84
+ #@st.cache(show_spinner=False)
85
  def plot_fig(df,sent_id,xlims,ylims,color_list):
86
  x_tsne, y_tsne = df.x_tsne, df.y_tsne
87
  fig = plt.figure(figsize=(5,5),dpi=200)