JenkinsGage commited on
Commit
c9b941a
1 Parent(s): da53129

Delete GradioApp.py

Browse files
Files changed (1) hide show
  1. GradioApp.py +0 -36
GradioApp.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
- import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
-
5
-
6
- tokenizer = AutoTokenizer.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
7
- model = AutoModelForSeq2SeqLM.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
8
- torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
9
-
10
- def paraphrase(model, text, max_length=128, num_return_sequences=5, num_beams=25, temperature=0.7):
11
- input_ids = tokenizer(
12
- f'paraphrase: {text}',
13
- return_tensors="pt", padding="longest",
14
- max_length=max_length,
15
- truncation=True,
16
- ).input_ids
17
-
18
- outputs = model.generate(
19
- input_ids, temperature=temperature, repetition_penalty=1.5,
20
- num_return_sequences=num_return_sequences, no_repeat_ngram_size=5, num_beams=num_beams, max_length=max_length
21
- )
22
-
23
- res = tokenizer.batch_decode(outputs, skip_special_tokens=True)
24
-
25
- return res
26
-
27
- def fn(text, results_num=5, beams_num=25, temperature=0.7):
28
- return '\n'.join(paraphrase(model, text, num_return_sequences=results_num, num_beams=beams_num, temperature=temperature))
29
-
30
- demo = gr.Interface(
31
- fn=fn,
32
- inputs=[gr.Textbox(lines=3, placeholder='Enter Text To Paraphrase'), gr.Slider(minimum=1, maximum=10, step=1, value=5), gr.Slider(minimum=1, maximum=50, step=1, value=25), gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.7)],
33
- outputs=['text'],
34
- )
35
-
36
- demo.launch(share=True)