File size: 1,737 Bytes
e0bc5de
 
 
 
 
 
13bbeb4
e0bc5de
 
13bbeb4
e0bc5de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
  
def get_pipe(name):
    tokenizer = AutoTokenizer.from_pretrained(name)
    model = AutoModelForSeq2SeqLM.from_pretrained(name)
    pipe = pipeline(
	"summarization", model=model, tokenizer=tokenizer, framework="pt"
    )
    return pipe
model_names = ['bigscience/T0_3B'] #, 'bigscience/T0p', 'bigscience/T0pp']
#model_names = ['bigscience/T0_3B','bigscience/T0'] #, 'bigscience/T0p', 'bigscience/T0pp']
pipes = [get_pipe(name) for name in model_names]
def _fn(text, do_sample, min_length, max_length, temperature, top_p, pipe):
    out = pipe(
        text,
        do_sample=do_sample,
        min_length=min_length,
        max_length=max_length,
        temperature=temperature,
        top_p=top_p,
        truncation=True,
    )
    return out[0]["summary_text"]
def fn(*args):
    return [_fn(*args, pipe=pipe) for pipe in pipes]
import gradio as gr
interface = gr.Interface(
    fn,
    inputs=[
        gr.inputs.Textbox(lines=10, label="input text"),
        gr.inputs.Checkbox(label="do_sample", default=True),
        gr.inputs.Slider(1, 128, step=1, default=64, label="min_length"),
        gr.inputs.Slider(1, 128, step=1, default=64, label="max_length"),
        gr.inputs.Slider(0.0, 1.0, step=0.1, default=1, label="temperature"),
        gr.inputs.Slider(0.0, 1.0, step=0.1, default=1, label="top_p"),
    ],
    outputs=[
        gr.outputs.Textbox(label=f"output by {name}") for name in model_names
    ],
    #examples=[[ex] for ex in examples],
    title="T0 playground",
    description="""
    This is a playground for playing around with T0 models.
    See https://huggingface.co/bigscience/T0 for more details
""",
)
interface.launch()