#!/usr/bin/env python import gradio as gr import PIL.Image import spaces import torch from transformers import AutoModelForCausalLM, AutoProcessor DESCRIPTION = "# Capabara - Image Captioning with GIT" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_id = "microsoft/git-large-coco" processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id).to(device) @spaces.GPU def run(image: PIL.Image.Image) -> str: inputs = processor(images=image, return_tensors="pt").to(device) generated_ids = model.generate( pixel_values=inputs.pixel_values, num_beams=3, max_length=20, min_length=5 ) return processor.batch_decode(generated_ids, skip_special_tokens=True)[0] with gr.Blocks( title="Capabara - Image Captioning with GIT", theme=gr.themes.Default(font=[gr.themes.GoogleFont("Work Sans"), "sans-serif"]), css_paths="static/css/style.css", ) as demo: gr.Image( elem_id="capa-logo", value="./static/assets/TM_Capabara_210622.png", container=False, height=139, width=176, show_download_button=False, show_fullscreen_button=False, ) gr.HTML( elem_id="capa-title", value="""
This project uses code from Hysts. Thank you for your contributions!