Spaces:
Runtime error
Runtime error
File size: 2,433 Bytes
ab02808 302abae ab02808 82aa38e efe183a ab02808 82aa38e 302abae ab02808 302abae ab02808 979c186 542acfb b003169 ab02808 2f4f40d ab02808 82aa38e 2f4f40d 82aa38e 6423075 82aa38e ab02808 82aa38e 726b7ed 2f4f40d 82aa38e 9ce9950 82aa38e 9ce9950 ab02808 2f4f40d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import re
# Initialize the model
model_id = "Detsutut/Igea-350M-v0.0.1"
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_8bit=True, device_map='auto')
tokenizer = AutoTokenizer.from_pretrained(model_id)
gen_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
# Define the function to generate text
def generate_text(input_text, max_new_tokens, temperature, top_p, split_output):
if split_output:
max_new_tokens=30
top_p=0.95
output = gen_pipeline(
input_text,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
return_full_text = False
)
generated_text = output[0]['generated_text']
if split_output:
sentences = re.split('(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', generated_text)
if sentences:
generated_text = sentences[0]
return f"<span>{input_text}</span><b style='color: blue;'>{generated_text}</b>"
# Create the Gradio interface
input_text = gr.Textbox(lines=2, placeholder="Enter your text here...", label="Input Text")
max_new_tokens = gr.Slider(minimum=1, maximum=200, value=30, step=1, label="Max New Tokens")
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p")
split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
gr.Markdown("# Igea Text Generation Interface ⚕️🩺")
gr.Markdown("⚠️ 🐢💬 This model runs on a **hardware-limited**, free-tier HuggingFace space, resulting in a **low output token throughput** (approx. 1 token/s)")
input_text.render()
with gr.Accordion("Advanced Options", open=False):
max_new_tokens.render()
temperature.render()
top_p.render()
split_output.render()
output = gr.HTML(label="Generated Text",elem_id="outbox")
btn = gr.Button("Generate")
btn.click(generate_text, [input_text, max_new_tokens, temperature, top_p, split_output], output)
# Launch the interface
if __name__ == "__main__":
iface.launch(inline=True)
|