Ben Burtenshaw commited on
Commit
1aeff9e
1 Parent(s): 2271f96

add max new tokens to pipeline params

Browse files
Files changed (1) hide show
  1. pages/3_🌱 Generate Dataset.py +29 -8
pages/3_🌱 Generate Dataset.py CHANGED
@@ -116,14 +116,33 @@ domain_expert_num_generations = st.slider(
116
  "Number of generations for domain expert response", 1, 10, 2
117
  )
118
 
119
- st.markdown(
120
- "Temperature is a hyperparameter that controls the randomness of the generated text. \
121
- Lower temperatures will generate more deterministic text, while higher temperatures \
122
- will add more variation to generations."
123
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
- self_instruct_temperature = st.slider("Temperature for self-instruction", 0.1, 1.0, 0.9)
126
- domain_expert_temperature = st.slider("Temperature for domain expert", 0.1, 1.0, 0.9)
 
 
 
 
127
 
128
  ###############################################################
129
  # ARGILLA API
@@ -178,11 +197,13 @@ if all(
178
  "domain_expert_temperature": domain_expert_temperature,
179
  "self_intruct_num_generations": self_intruct_num_generations,
180
  "domain_expert_num_generations": domain_expert_num_generations,
 
 
181
  },
182
  hub_username=hub_username,
183
  hub_token=hub_token,
184
  project_name=project_name,
185
- )
186
 
187
  st.success(
188
  f"Pipeline configuration pushed to the dataset repo {hub_username}/{project_name} on the Hub."
 
116
  "Number of generations for domain expert response", 1, 10, 2
117
  )
118
 
119
+ with st.expander("🔥 Advanced parameters"):
120
+ st.markdown(
121
+ "Temperature is a hyperparameter that controls the randomness of the generated text. \
122
+ Lower temperatures will generate more deterministic text, while higher temperatures \
123
+ will add more variation to generations."
124
+ )
125
+
126
+ self_instruct_temperature = st.slider(
127
+ "Temperature for self-instruction", 0.1, 1.0, 0.9
128
+ )
129
+ domain_expert_temperature = st.slider(
130
+ "Temperature for domain expert", 0.1, 1.0, 0.9
131
+ )
132
+
133
+ st.markdown(
134
+ "`max_new_tokens` is the maximum number of tokens (word like things) that can be generated by each model call. \
135
+ This is a way to control the length of the generated text. in some cases, you may want to increase this to \
136
+ generate longer responses. You should adapt this value to your model chice, but default of 2096 works \
137
+ in most cases."
138
+ )
139
 
140
+ self_instruct_max_new_tokens = st.number_input(
141
+ "Max new tokens for self-instruction", value=2096
142
+ )
143
+ domain_expert_max_new_tokens = st.number_input(
144
+ "Max new tokens for domain expert", value=2096
145
+ )
146
 
147
  ###############################################################
148
  # ARGILLA API
 
197
  "domain_expert_temperature": domain_expert_temperature,
198
  "self_intruct_num_generations": self_intruct_num_generations,
199
  "domain_expert_num_generations": domain_expert_num_generations,
200
+ "self_instruct_max_new_tokens": self_instruct_max_new_tokens,
201
+ "domain_expert_max_new_tokens": domain_expert_max_new_tokens,
202
  },
203
  hub_username=hub_username,
204
  hub_token=hub_token,
205
  project_name=project_name,
206
+ )
207
 
208
  st.success(
209
  f"Pipeline configuration pushed to the dataset repo {hub_username}/{project_name} on the Hub."