Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,48 +1,29 @@
|
|
1 |
-
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
-
|
|
|
5 |
|
6 |
-
def
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
#temperature = float(temperature)
|
14 |
-
temperature = float(temperature[0]) if isinstance(temperature, list) else float(temperature)
|
15 |
-
if temperature < 1e-2: temperature = 1e-2
|
16 |
-
top_p = float(top_p)
|
17 |
-
top_k = int(top_k) # Ensure top_k is an integer, as it was being treated like a float
|
18 |
-
|
19 |
-
generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty) # seed=42,)
|
20 |
|
21 |
-
formatted_prompt = "Fix grammatical errors in this sentence: " + prompt
|
22 |
-
print("\nPROMPT: \n\t" + formatted_prompt)
|
23 |
-
|
24 |
-
# Generate text from the HF inference
|
25 |
-
output = client.text_generation(formatted_prompt, **generate_kwargs, details=True, return_full_text=True)
|
26 |
-
#output = ""
|
27 |
-
|
28 |
-
#for response in stream:
|
29 |
-
# output += response.token.text
|
30 |
-
# yield output
|
31 |
-
return output
|
32 |
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
additional_inputs=[
|
36 |
-
gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
|
37 |
-
gr.Slider( label="Max new tokens", value=150, minimum=0, maximum=250, step=64, interactive=True, info="The maximum numbers of new tokens", ),
|
38 |
-
gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
|
39 |
-
gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
|
40 |
-
]
|
41 |
-
|
42 |
-
gr.ChatInterface(
|
43 |
-
fn=generate,
|
44 |
-
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
45 |
-
additional_inputs=additional_inputs,
|
46 |
-
title="My Grammarly Space",
|
47 |
-
concurrency_limit=20,
|
48 |
-
).launch(show_api=False)
|
|
|
1 |
+
#from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
|
5 |
+
# Load the model and tokenizer using the pipeline API
|
6 |
+
model_pipeline = pipeline("text-generation", model="grammarly/coedit-large")
|
7 |
|
8 |
+
def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, top_k=50):
|
9 |
+
# Generate text using the model
|
10 |
+
output = model_pipeline(input_text, temperature=temperature, max_length=max_new_tokens + len(input_text.split()), top_p=top_p, top_k=top_k, return_full_text=False)
|
11 |
+
# Extract and return the generated text
|
12 |
+
return output[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
|
16 |
+
# Define your Gradio interface
|
17 |
+
iface = gr.Interface(
|
18 |
+
fn=generate_text,
|
19 |
+
inputs=[
|
20 |
+
gr.inputs.Textbox(lines=2, label="Input Text"),
|
21 |
+
gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.9, label="Temperature"),
|
22 |
+
gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Max New Tokens"),
|
23 |
+
gr.inputs.Slider(minimum=0, maximum=1, step=0.01, default=0.95, label="Top-p"),
|
24 |
+
gr.inputs.Slider(minimum=0, maximum=100, step=1, default=50, label="Top-k")
|
25 |
+
],
|
26 |
+
outputs=[gr.outputs.Textbox(label="Generated Text")],
|
27 |
+
title="Text Generation with Grammarly Model"
|
28 |
+
)
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|