from fastapi import FastAPI, Form from fastapi.responses import HTMLResponse from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch app = FastAPI() MODEL_ID = "ibm-granite/granite-4.0-tiny-preview" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForCausalLM.from_pretrained( MODEL_ID, torch_dtype=torch.float16 if torch.cuda.is_available() else "auto", device_map="auto" ) # Use pipeline for easier text generation (no device arg when using device_map="auto") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) @app.get("/", response_class=HTMLResponse) def index(): return """ Granite Tiny Summarizer

Granite 4.0 Tiny Summarization Demo


""" @app.post("/summarize", response_class=HTMLResponse) def summarize(text: str = Form(...)): prompt = ( "Below is a passage of text. Please provide a concise summary in 2-4 sentences.\n\n" f"Text:\n{text.strip()}\n\nSummary:" ) outputs = pipe( prompt, max_new_tokens=150, do_sample=True, temperature=0.7, top_p=0.95, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id ) output_text = outputs[0]['generated_text'] summary = output_text.split("Summary:")[-1].strip() return f"

Summary

{summary}
Back"