KennethTM commited on
Commit
6ee4621
1 Parent(s): d35712c

Changed default generation settings

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -1,12 +1,8 @@
1
- #streamlit run app.py
2
-
3
  import streamlit as st
4
  import galai as gal
5
- #import torch
6
 
7
  #https://github.com/paperswithcode/galai/blob/main/notebooks/Introduction%20to%20Galactica%20Models.ipynb
8
 
9
- #@st.cache(suppress_st_warning=True, allow_output_mutation=True)
10
  @st.cache_resource
11
  def load_model(model_name):
12
  model = gal.load_model(model_name, num_gpus=0) #, dtype=torch.float16
@@ -32,9 +28,9 @@ model = load_model(choose_model)
32
 
33
  st.sidebar.markdown("### Text generation settings")
34
 
35
- max_new_tokens = st.sidebar.slider("Max new tokens", value=60, min_value = 10, max_value = 200, step=10)
36
  penalty_alpha = st.sidebar.slider("Alpha penalty", value = 0.6, min_value = 0.0, max_value=2.0, step=0.1)
37
- top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=10, value = 4)
38
  new_doc = st.sidebar.checkbox("New document", value=True)
39
 
40
  #Main
 
 
 
1
  import streamlit as st
2
  import galai as gal
 
3
 
4
  #https://github.com/paperswithcode/galai/blob/main/notebooks/Introduction%20to%20Galactica%20Models.ipynb
5
 
 
6
  @st.cache_resource
7
  def load_model(model_name):
8
  model = gal.load_model(model_name, num_gpus=0) #, dtype=torch.float16
 
28
 
29
  st.sidebar.markdown("### Text generation settings")
30
 
31
+ max_new_tokens = st.sidebar.slider("Max new tokens", value=10, min_value = 10, max_value = 100, step=10)
32
  penalty_alpha = st.sidebar.slider("Alpha penalty", value = 0.6, min_value = 0.0, max_value=2.0, step=0.1)
33
+ top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=10, value = 2)
34
  new_doc = st.sidebar.checkbox("New document", value=True)
35
 
36
  #Main