Willder commited on
Commit
a747677
1 Parent(s): cce3acd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -22,6 +22,8 @@ def init_openai_settings():
22
 
23
 
24
  def init_session():
 
 
25
  if not st.session_state.get("chats"):
26
  st.session_state["chats"] = {}
27
 
@@ -76,13 +78,15 @@ def init_sidebar():
76
  format="%0.2f",
77
  help="""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.""",
78
  )
79
- st.session_state["params"]["max_tokens"] = chat_config_expander.number_input(
80
  "MAX_TOKENS",
81
  value=2000,
82
  step=1,
 
83
  max_value=4000,
84
  help="The maximum number of tokens to generate in the completion",
85
  )
 
86
  st.session_state["params"]["prompt"] = chat_config_expander.text_area(
87
  "Prompts",
88
  "You are a helpful assistant that answer questions as possible as you can.",
 
22
 
23
 
24
  def init_session():
25
+ if not st.session_state.get("params"):
26
+ st.session_state["params"] = dict()
27
  if not st.session_state.get("chats"):
28
  st.session_state["chats"] = {}
29
 
 
78
  format="%0.2f",
79
  help="""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.""",
80
  )
81
+ st.session_state["params"]["max_tokens"] = chat_config_expander.slider(
82
  "MAX_TOKENS",
83
  value=2000,
84
  step=1,
85
+ min_value=100,
86
  max_value=4000,
87
  help="The maximum number of tokens to generate in the completion",
88
  )
89
+
90
  st.session_state["params"]["prompt"] = chat_config_expander.text_area(
91
  "Prompts",
92
  "You are a helpful assistant that answer questions as possible as you can.",