ColeGuion commited on
Commit
d10fd10
1 Parent(s): fa07e23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -7
app.py CHANGED
@@ -1,11 +1,45 @@
1
  import gradio as gr
 
2
 
3
- def respond(system_message, max_length, min_length, max_new_tokens, num_beams, temperature, top_p):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  # Your response generation logic here
5
  #response = correct_text(message, max_length, max_new_tokens, min_length, num_beams, temperature, top_p)
6
  #yield response
7
  #return f"System message: {system_message}, Max Length: {max_length}, Min Length: {min_length}, Max new tokens: {max_new_tokens}, Num Beams: {num_beams}, Temperature: {temperature}, Top-p: {top_p}"
8
- return message
 
9
 
10
 
11
  # Create the Gradio interface
@@ -17,7 +51,6 @@ with gr.Blocks() as demo:
17
  prompt_box = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
18
  output_box = gr.Textbox()
19
  submitBtn = gr.Button("Submit")
20
-
21
 
22
  with gr.Accordion("Generation Parameters:", open=False):
23
  max_length = gr.Slider(minimum=1, maximum=256, value=80, step=1, label="Max Length")
@@ -27,11 +60,11 @@ with gr.Blocks() as demo:
27
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
28
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
29
 
30
- #show_top_p = gr.Checkbox(value=True, label="Show Top-p Slider")
31
- #top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", visible=True)
32
- #show_top_p.change(lambda show: gr.update(visible=show), show_top_p, top_p_slider)
33
 
34
- submitBtn.click(respond, [prompt_box, max_length, min_length, max_tokens, num_beams, temperature, top_p], output_box)
35
 
36
 
37
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
+ # Load the model and tokenizer
5
+ model = AutoModelForSeq2SeqLM.from_pretrained("vennify/t5-base-grammar-correction")
6
+ tokenizer = AutoTokenizer.from_pretrained("vennify/t5-base-grammar-correction")
7
+
8
+ def correct_text(text, max_length, min_length, max_new_tokens, num_beams, temperature, top_p):
9
+ inputs = tokenizer.encode("grammar: " + text, return_tensors="pt")
10
+
11
+ if max_new_tokens > 0:
12
+ outputs = model.generate(
13
+ inputs,
14
+ max_length=max_length,
15
+ max_new_tokens=max_new_tokens,
16
+ min_length=min_length,
17
+ num_beams=num_beams,
18
+ temperature=temperature,
19
+ top_p=top_p,
20
+ early_stopping=True
21
+ )
22
+ else:
23
+ outputs = model.generate(
24
+ inputs,
25
+ max_length=max_length,
26
+ min_length=min_length,
27
+ num_beams=num_beams,
28
+ temperature=temperature,
29
+ top_p=top_p,
30
+ early_stopping=True
31
+ )
32
+
33
+ corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+ return corrected_text
35
+
36
+ def respond(prompt, max_length, min_length, max_new_tokens, num_beams, temperature, top_p):
37
  # Your response generation logic here
38
  #response = correct_text(message, max_length, max_new_tokens, min_length, num_beams, temperature, top_p)
39
  #yield response
40
  #return f"System message: {system_message}, Max Length: {max_length}, Min Length: {min_length}, Max new tokens: {max_new_tokens}, Num Beams: {num_beams}, Temperature: {temperature}, Top-p: {top_p}"
41
+ #response = correct_text(prompt, max_length, max_new_tokens, min_length, num_beams, temperature, top_p)
42
+ return prompt
43
 
44
 
45
  # Create the Gradio interface
 
51
  prompt_box = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
52
  output_box = gr.Textbox()
53
  submitBtn = gr.Button("Submit")
 
54
 
55
  with gr.Accordion("Generation Parameters:", open=False):
56
  max_length = gr.Slider(minimum=1, maximum=256, value=80, step=1, label="Max Length")
 
60
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
61
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
62
 
63
+ show_top_p = gr.Checkbox(value=True, label="Show Top-p Slider")
64
+ top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", visible=True)
65
+ show_top_p.change(lambda show: gr.update(visible=show), show_top_p, top_p_slider)
66
 
67
+ submitBtn.click(correct_text, [prompt_box, max_length, min_length, max_tokens, num_beams, temperature, top_p], output_box)
68
 
69
 
70