Spaces:
Runtime error
Runtime error
File size: 1,405 Bytes
82b8097 07c7ebd 82b8097 59426e0 82b8097 c75ac23 82b8097 b598eba 82b8097 b598eba ab95b6f 82b8097 be711d8 82b8097 44726ab 82b8097 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
"""
app.py
"""
import gradio as gr
import spaces
import torch
from transformers import (
LlavaNextProcessor,
LlavaNextForConditionalGeneration
)
# Load model and processor
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
model.to("cuda:0")
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
@spaces.GPU(duration=240)
def generate_caption(image):
"""
Generate a poem from an image.
"""
# Process the image and the prompt
inputs = processor("[INST] <image>\nWrite a poem about this picture [/INST]",
image,
return_tensors="pt").to('cuda')
# autoregressively complete prompt
output = model.generate(**inputs, max_new_tokens=200)
prompt_len = inputs["input_ids"].shape[1]
decoded_text = processor.batch_decode(output[:, prompt_len:])[0]
return decoded_text
# Define the Gradio interface
description = """Enter an image, and receive an inspired poem.
This is a demo of [`llava-hf/llava-v1.6-mistral-7b-hf`](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) hosted with ZeroGPU."""
iface = gr.Interface(
title="The Poet",
fn=generate_caption,
inputs=gr.Image(type="pil", label="Upload Image"),
outputs=gr.Textbox(label="Generated Poem"),
description=description
)
# Launch the interface
iface.launch()
|