bhavitvyamalik commited on
Commit
47121bf
1 Parent(s): 54dc7b4

parameters beta_expander

Browse files
Files changed (1) hide show
  1. apps/mic.py +6 -5
apps/mic.py CHANGED
@@ -27,17 +27,18 @@ def app(state):
27
  st.write("\n")
28
  st.write(read_markdown("intro.md"))
29
 
30
- with st.beta_expander("Generation Parameters"):
 
 
 
31
  do_sample = st.sidebar.checkbox("Sample", value=False, help="Sample from the model instead of using beam search.")
32
  top_k = st.sidebar.number_input("Top K", min_value=10, max_value=200, value=50, step=1, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
33
  num_beams = st.sidebar.number_input(label="Number of Beams", min_value=2, max_value=10, value=4, step=1, help="Number of beams to be used in beam search.")
34
  temperature = st.sidebar.select_slider(label="Temperature", options = list(np.arange(0.0,1.1, step=0.1)), value=1.0, help ="The value used to module the next token probabilities.", format_func=lambda x: f"{x:.2f}")
35
  top_p = st.sidebar.select_slider(label = "Top-P", options = list(np.arange(0.0,1.1, step=0.1)),value=1.0, help="Nucleus Sampling : If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation.", format_func=lambda x: f"{x:.2f}")
 
 
36
 
37
- if st.sidebar.button("Clear All Cache"):
38
- caching.clear_cache()
39
-
40
- max_length = 64
41
 
42
  @st.cache
43
  def load_model(ckpt):
 
27
  st.write("\n")
28
  st.write(read_markdown("intro.md"))
29
 
30
+ # st.sidebar.title("Generation Parameters")
31
+ max_length = 64
32
+
33
+ with st.sidebar.beta_expander('Generation Parameters'):
34
  do_sample = st.sidebar.checkbox("Sample", value=False, help="Sample from the model instead of using beam search.")
35
  top_k = st.sidebar.number_input("Top K", min_value=10, max_value=200, value=50, step=1, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
36
  num_beams = st.sidebar.number_input(label="Number of Beams", min_value=2, max_value=10, value=4, step=1, help="Number of beams to be used in beam search.")
37
  temperature = st.sidebar.select_slider(label="Temperature", options = list(np.arange(0.0,1.1, step=0.1)), value=1.0, help ="The value used to module the next token probabilities.", format_func=lambda x: f"{x:.2f}")
38
  top_p = st.sidebar.select_slider(label = "Top-P", options = list(np.arange(0.0,1.1, step=0.1)),value=1.0, help="Nucleus Sampling : If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation.", format_func=lambda x: f"{x:.2f}")
39
+ if st.sidebar.button("Clear All Cache"):
40
+ caching.clear_cache()
41
 
 
 
 
 
42
 
43
  @st.cache
44
  def load_model(ckpt):