import gradio as gr from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor import spaces import torch model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cuda").eval() processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner") @spaces.GPU def create_captions_rich(image): prompt = "caption en" model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda") input_len = model_inputs["input_ids"].shape[-1] with torch.inference_mode(): generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False) generation = generation[0][input_len:] decoded = processor.decode(generation, skip_special_tokens=True) return decoded css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css) as demo: gr.HTML("