Spaces:
Running
Running
✨ add ability to change models
Browse filesSigned-off-by: peter szemraj <peterszemraj@gmail.com>
app.py
CHANGED
@@ -72,6 +72,24 @@ def generate_text(
|
|
72 |
formatted_email = postprocess(response)
|
73 |
return formatted_email, make_email_link(body=formatted_email)
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
def get_parser():
|
77 |
"""
|
@@ -105,6 +123,8 @@ Hello,
|
|
105 |
|
106 |
Following up on last week's bubblegum shipment, I"""
|
107 |
|
|
|
|
|
108 |
if __name__ == "__main__":
|
109 |
logging.info("\n\n\nStarting new instance of app.py")
|
110 |
args = get_parser().parse_args()
|
@@ -166,12 +186,21 @@ if __name__ == "__main__":
|
|
166 |
gr.Markdown(
|
167 |
"This demo generates text via beam search. See details about these parameters [here](https://huggingface.co/blog/how-to-generate), otherwise they should be fine as-is."
|
168 |
)
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
with gr.Row():
|
176 |
|
177 |
no_repeat_ngram_size = gr.Radio(
|
@@ -198,6 +227,7 @@ if __name__ == "__main__":
|
|
198 |
"The intended use of this model is to provide suggestions to _auto-complete_ the rest of your email. Said another way, it should serve as a **tool to write predictable emails faster**. It is not intended to write entire emails from scratch; at least **some input** is required to guide the direction of the model.\n\nPlease verify any suggestions by the model for A) False claims and B) negation statements **before** accepting/sending something."
|
199 |
)
|
200 |
gr.Markdown("---")
|
|
|
201 |
clear_button.click(
|
202 |
fn=clear,
|
203 |
inputs=[prompt_text],
|
@@ -215,6 +245,11 @@ if __name__ == "__main__":
|
|
215 |
outputs=[generated_email, email_link],
|
216 |
)
|
217 |
|
|
|
|
|
|
|
|
|
|
|
218 |
demo.launch(
|
219 |
enable_queue=True,
|
220 |
share=True, # for local testing
|
|
|
72 |
formatted_email = postprocess(response)
|
73 |
return formatted_email, make_email_link(body=formatted_email)
|
74 |
|
75 |
+
def load_emailgen_model(model_tag:str):
|
76 |
+
"""
|
77 |
+
load_emailgen_model - load a text generation pipeline for email generation
|
78 |
+
|
79 |
+
Args:
|
80 |
+
model_tag (str): the huggingface model tag to load
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
transformers.pipelines.TextGenerationPipeline: the text generation pipeline
|
84 |
+
"""
|
85 |
+
|
86 |
+
generator = pipeline(
|
87 |
+
"text-generation",
|
88 |
+
model_tag,
|
89 |
+
device=0 if use_gpu else -1,
|
90 |
+
)
|
91 |
+
|
92 |
+
return generator
|
93 |
|
94 |
def get_parser():
|
95 |
"""
|
|
|
123 |
|
124 |
Following up on last week's bubblegum shipment, I"""
|
125 |
|
126 |
+
available_models = ['postbot/distilgpt2-emailgen', 'postbot/distilgpt2-emailgen-V2', 'postbot/gpt2-medium-emailgen']
|
127 |
+
|
128 |
if __name__ == "__main__":
|
129 |
logging.info("\n\n\nStarting new instance of app.py")
|
130 |
args = get_parser().parse_args()
|
|
|
186 |
gr.Markdown(
|
187 |
"This demo generates text via beam search. See details about these parameters [here](https://huggingface.co/blog/how-to-generate), otherwise they should be fine as-is."
|
188 |
)
|
189 |
+
with gr.Row():
|
190 |
+
model_name = gr.Dropdown(
|
191 |
+
choices=available_models,
|
192 |
+
label="Choose a model",
|
193 |
+
value=model_tag,
|
194 |
+
)
|
195 |
+
load_model_button = gr.Button(
|
196 |
+
'Load Model',
|
197 |
+
variant='secondary',
|
198 |
+
)
|
199 |
+
num_beams = gr.Radio(
|
200 |
+
choices=[4, 8, 16],
|
201 |
+
label="Number of Beams",
|
202 |
+
value=4,
|
203 |
+
)
|
204 |
with gr.Row():
|
205 |
|
206 |
no_repeat_ngram_size = gr.Radio(
|
|
|
227 |
"The intended use of this model is to provide suggestions to _auto-complete_ the rest of your email. Said another way, it should serve as a **tool to write predictable emails faster**. It is not intended to write entire emails from scratch; at least **some input** is required to guide the direction of the model.\n\nPlease verify any suggestions by the model for A) False claims and B) negation statements **before** accepting/sending something."
|
228 |
)
|
229 |
gr.Markdown("---")
|
230 |
+
|
231 |
clear_button.click(
|
232 |
fn=clear,
|
233 |
inputs=[prompt_text],
|
|
|
245 |
outputs=[generated_email, email_link],
|
246 |
)
|
247 |
|
248 |
+
load_model_button.click(
|
249 |
+
fn=load_emailgen_model,
|
250 |
+
inputs=[model_name],
|
251 |
+
outputs=[generator],
|
252 |
+
)
|
253 |
demo.launch(
|
254 |
enable_queue=True,
|
255 |
share=True, # for local testing
|