Spaces:
Runtime error
Runtime error
File size: 8,190 Bytes
81d65e8 51de900 39f9011 81d65e8 88b1e11 81d65e8 32c181d 74f30e9 81d65e8 88b1e11 81d65e8 88b1e11 81d65e8 272dccb 81d65e8 4badf9e 646c2a1 4badf9e 81d65e8 88b1e11 81d65e8 f81af70 88b1e11 81d65e8 88b1e11 81d65e8 c96b784 81d65e8 f81af70 4badf9e 81d65e8 3b6fc32 81d65e8 3b6fc32 88b1e11 81d65e8 74f30e9 81d65e8 7659e10 9c05832 0e1ad86 9c05832 601bf30 7659e10 601bf30 4badf9e 9dd82fc 4badf9e 9dd82fc 4badf9e 96f4bcd 81d65e8 3b6fc32 81d65e8 88b1e11 9c05832 88b1e11 81d65e8 4badf9e cfc1783 7659e10 7546941 7659e10 81d65e8 88b1e11 51de900 81d65e8 4badf9e 646c2a1 4badf9e 81d65e8 88b1e11 81d65e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
import argparse
import logging
import time
import gradio as gr
import torch
from transformers import pipeline
from utils import postprocess, clear, make_email_link
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
use_gpu = torch.cuda.is_available()
def generate_text(
prompt: str,
gen_length=64,
num_beams=4,
no_repeat_ngram_size=2,
length_penalty=1.0,
# perma params (not set by user)
repetition_penalty=3.5,
abs_max_length=512,
verbose=False,
):
"""
generate_text - generate text from a prompt using a text generation pipeline
Args:
prompt (str): the prompt to generate text from
model_input (_type_): the text generation pipeline
max_length (int, optional): the maximum length of the generated text. Defaults to 128.
method (str, optional): the generation method. Defaults to "Sampling".
verbose (bool, optional): the verbosity of the output. Defaults to False.
Returns:
str: the generated text
"""
global generator
if verbose:
logging.info(f"Generating text from prompt:\n\n{prompt}")
logging.info(
f"params:\tmax_length={gen_length}, num_beams={num_beams}, no_repeat_ngram_size={no_repeat_ngram_size}, length_penalty={length_penalty}, repetition_penalty={repetition_penalty}, abs_max_length={abs_max_length}"
)
st = time.perf_counter()
input_tokens = generator.tokenizer(prompt)
input_len = len(input_tokens["input_ids"])
if input_len > abs_max_length:
logging.info(f"Input too long {input_len} > {abs_max_length}, may cause errors")
result = generator(
prompt,
max_length=gen_length + input_len,
min_length=input_len + 4,
num_beams=num_beams,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
do_sample=False,
early_stopping=True,
) # generate
response = result[0]["generated_text"]
rt = time.perf_counter() - st
if verbose:
logging.info(f"Generated text: {response}")
logging.info(f"Generation time: {rt:.2f}s")
formatted_email = postprocess(response)
return formatted_email, make_email_link(body=formatted_email)
def load_emailgen_model(model_tag:str):
"""
load_emailgen_model - load a text generation pipeline for email generation
Args:
model_tag (str): the huggingface model tag to load
Returns:
transformers.pipelines.TextGenerationPipeline: the text generation pipeline
"""
global generator
generator = pipeline(
"text-generation",
model_tag,
device=0 if use_gpu else -1,
)
def get_parser():
"""
get_parser - a helper function for the argparse module
"""
parser = argparse.ArgumentParser(
description="Text Generation demo for postbot",
)
parser.add_argument(
"-m",
"--model",
required=False,
type=str,
default="postbot/distilgpt2-emailgen-V2",
help="Pass an different huggingface model tag to use a custom model",
)
parser.add_argument(
"-v",
"--verbose",
required=False,
action="store_true",
help="Verbose output",
)
return parser
default_prompt = """
Hello,
Following up on last week's bubblegum shipment, I"""
available_models = ['postbot/distilgpt2-emailgen-V2', 'postbot/distilgpt2-emailgen', 'postbot/gpt2-medium-emailgen']
if __name__ == "__main__":
logging.info("\n\n\nStarting new instance of app.py")
args = get_parser().parse_args()
logging.info(f"received args:\t{args}")
model_tag = args.model
verbose = args.verbose
logging.info(f"Loading model: {model_tag}, use GPU = {use_gpu}")
generator = pipeline(
"text-generation",
model_tag,
device=0 if use_gpu else -1,
)
demo = gr.Blocks()
logging.info("launching interface...")
with demo:
gr.Markdown("# Auto-Complete Emails - Demo")
gr.Markdown(
"Enter part of an email, and a text-gen model will complete it! See details below. "
)
gr.Markdown("---")
with gr.Column():
gr.Markdown("## Generate Text")
gr.Markdown("Edit the prompt and parameters and press **Generate**!")
prompt_text = gr.Textbox(
lines=4,
label="Email Prompt",
value=default_prompt,
)
with gr.Row():
clear_button = gr.Button(
value="Clear Prompt",
)
num_gen_tokens = gr.Slider(
label="Generation Tokens",
value=64,
maximum=128,
minimum=32,
step=16,
)
generated_email = gr.Textbox(
label="Generated Result",
placeholder="The completed email will appear here",
)
email_link = gr.HTML("<p><em>A mailto: link will appear here</em></p>")
generate_button = gr.Button(
value="Generate!",
variant="primary",
)
gr.Markdown("## Advanced Options")
gr.Markdown(
"This demo generates text via beam search. See details about these parameters [here](https://huggingface.co/blog/how-to-generate), otherwise they should be fine as-is."
)
with gr.Row():
model_name = gr.Dropdown(
choices=available_models,
label="Choose a model",
value=model_tag,
)
load_model_button = gr.Button(
'Load Model',
variant='secondary',
)
num_beams = gr.Radio(
choices=[4, 8, 12, 16],
label="Number of Beams",
value=8,
)
with gr.Row():
no_repeat_ngram_size = gr.Radio(
choices=[1, 2, 3, 4],
label="no repeat ngram size",
value=2,
)
length_penalty = gr.Slider(
minimum=0.5,
maximum=1.0,
label="length penalty",
value=0.8,
step=0.1,
)
gr.Markdown("---")
with gr.Column():
gr.Markdown("## About")
gr.Markdown(
"[This model](https://huggingface.co/postbot/distilgpt2-emailgen) is a fine-tuned version of distilgpt2 on a dataset of 50k emails sourced from the internet, including the classic `aeslc` dataset.\n\nCheck out the model card for details on notebook & command line usage."
)
gr.Markdown(
"The intended use of this model is to provide suggestions to _auto-complete_ the rest of your email. Said another way, it should serve as a **tool to write predictable emails faster**. It is not intended to write entire emails from scratch; at least **some input** is required to guide the direction of the model.\n\nPlease verify any suggestions by the model for A) False claims and B) negation statements **before** accepting/sending something."
)
gr.Markdown("---")
clear_button.click(
fn=clear,
inputs=[prompt_text],
outputs=[prompt_text],
)
generate_button.click(
fn=generate_text,
inputs=[
prompt_text,
num_gen_tokens,
num_beams,
no_repeat_ngram_size,
length_penalty,
],
outputs=[generated_email, email_link],
)
load_model_button.click(
fn=load_emailgen_model,
inputs=[model_name],
outputs=[],
)
demo.launch(
enable_queue=True,
share=True, # for local testing
)
|