Sunder3420's picture
Update app.py
205d668 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "ibm-granite/granite-3.3-2b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# ---------- CLEAN OUTPUT HELPER ----------
def clean_output(full_output, prompt):
cleaned = full_output.replace(prompt, "") # Remove the prompt
return cleaned.strip() # Remove blank spaces
# ---------- CLASSIFICATION ----------
def classify_text(text):
prompt = f"Classify this text as SPAM or HAM:\n\n{text}\n\nlabel:"
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=30)
raw = tokenizer.decode(outputs[0], skip_special_tokens=True)
return clean_output(raw, prompt)
# ---------- SUMMARIZATION ----------
def summarize_text(text):
prompt = f"Summarize the following text:\n\n{text}\n\nSummary:"
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=200)
raw = tokenizer.decode(outputs[0], skip_special_tokens=True)
return clean_output(raw, prompt)
# ---------- UI ----------
with gr.Blocks(title="Granite Classification & Summarization") as demo:
gr.Markdown("## 🧠 Text Classification & Summarization using IBM Granite Models")
with gr.Tab("πŸ“Œ Classification"):
inp = gr.Textbox(
label="Enter text for classification",
placeholder="Type your message here...",
lines=5,
)
out = gr.Textbox(
label="Classification Output",
lines=6,
max_lines=20,
interactive=False
)
gr.Button("Classify").click(classify_text, inp, out)
with gr.Tab("πŸ“ Summarization"):
inp2 = gr.Textbox(
label="Enter text to summarize",
placeholder="Paste long text here...",
lines=10,
)
out2 = gr.Textbox(
label="Summary Output",
lines=10,
max_lines=30,
interactive=False
)
gr.Button("Summarize").click(summarize_text, inp2, out2)
demo.launch()