Spaces:
Sleeping
Sleeping
| π¦ RADIOCAP13 β HuggingFace Space | |
| Below is a complete multi-file project layout for deploying your image-captioning model as a HuggingFace Space. You can copy/paste these into your repository. | |
| app.py import gradio as gr import torch from transformers import ViTModel from PIL import Image from torchvision import transforms import json IMG_SIZE = 224 SEQ_LEN = 32 VOCAB_SIZE = 75460 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = transforms.Compose([ transforms.Resize((IMG_SIZE, IMG_SIZE)), transforms.ToTensor(), ]) def preprocess_image(img): if img is None: raise ValueError("Image is None") if not isinstance(img, Image.Image): img = Image.fromarray(img) if img.mode != "RGB": img = img.convert("RGB") return transform(img) class SimpleTokenizer: def __init__(self, word2idx=None): self.word2idx = word2idx or {} self.idx2word = {v: k for k, v in self.word2idx.items()} @classmethod def load(cls, path): with open(f"{path}/vocab.json", "r") as f: word2idx = json.load(f) return cls(word2idx) class BiasDecoder(torch.nn.Module): def __init__(self, feature_dim=768, vocab_size=VOCAB_SIZE): super().__init__() self.token_emb = torch.nn.Embedding(vocab_size, feature_dim) self.pos_emb = torch.nn.Embedding(SEQ_LEN-1, feature_dim) self.final_layer = torch.nn.Linear(feature_dim, vocab_size) def forward(self, img_feat, target_seq): x = self.token_emb(target_seq) pos = torch.arange(x.size(1), device=x.device).clamp(max=self.pos_emb.num_embeddings - 1) x = x + self.pos_emb(pos) x = x + img_feat.unsqueeze(1) return self.final_layer(x) # Load models decoder = BiasDecoder().to(device) decoder.load_state_dict(torch.load("pytorch_model.bin", map_location=device)) decoder.eval() vit = ViTModel.from_pretrained("google/vit-base-patch16-224-in21k").to(device) vit.eval() tokenizer = SimpleTokenizer.load("./") pad_idx = tokenizer.word2idx["<PAD>"] @torch.no_grad() def generate_caption(img): img_tensor = preprocess_image(img).unsqueeze(0).to(device) img_feat = vit(pixel_values=img_tensor).pooler_output beams = [([tokenizer.word2idx["<SOS>"]], 0.0)] beam_size = 3 for _ in range(SEQ_LEN - 1): candidates = [] for seq, score in beams: inp = torch.tensor(seq + [pad_idx] * (SEQ_LEN - len(seq)), device=device).unsqueeze(0) logits = decoder(img_feat, inp) probs = torch.nn.functional.log_softmax(logits[0, len(seq)-1], dim=-1) top_p, top_i = torch.topk(probs, beam_size) for i in range(beam_size): candidates.append((seq + [top_i[i].item()], score + top_p[i].item())) beams = sorted(candidates, key=lambda x: x[1], reverse=True)[:beam_size] if all(s[-1] == tokenizer.word2idx["<EOS>"] for s, _ in beams): break words = [tokenizer.idx2word.get(i, "<UNK>") for i in beams[0][0][1:] if i != pad_idx] return " ".join(words) with gr.Blocks() as demo: gr.Markdown("# RADIOCAP13 β Image Captioning Demo") img_in = gr.Image(type="pil", label="Upload an Image") out = gr.Textbox(label="Generated Caption") btn = gr.Button("Generate Caption") btn.click(generate_caption, inputs=img_in, outputs=out) if __name__ == "__main__": demo.launch() requirements.txt gradio transformers torch torchvision Pillow README.md # RADIOCAP13 β Visual Captioning Model This Space runs a custom image captioning pipeline using: - ViT Base (Google) - Custom BiasDecoder - Custom 75k vocabulary ## π Running Locally | |
| pip install -r requirements.txt python app.py | |
| ## πΌοΈ Usage Upload an image and the model will generate a caption using beam search. ## π Files - `app.py` β Gradio interface + model code - `vocab.json` β tokenizer vocabulary - `pytorch_model.bin` β trained decoder weights | |
| If you'd like, I can add: | |
| example.jpg automatic CPU/GPU selector badge HuggingFace Space metadata (.huggingface.yaml) progress bars / image preview UI .huggingface.yaml (Space Metadata) # RADIOCAP13 Space Metadata sdk: gradio sdk_version: 4.0 python_version: 3.10 runtime: python3 # Enable GPU if available grants: - gpu: true app_file: app.py # Display information on the HF Space page space_description: | RADIOCAP13 β A custom image captioning model powered by ViT and a BiasDecoder. Upload an image and the model generates a caption using beam search. space_tags: - image-captioning - vision - vit - pytorch - gradio Example Image (example.jpg) | |
| Include a placeholder. You can drag an actual sample image later. | |
| (example.jpg placeholder β include any sample image in your repo) Enhanced UI with GPU Status + Progress Bar | |
| Below is an improved Gradio UI you can swap into app.py. | |
| with gr.Blocks() as demo: gr.Markdown("# RADIOCAP13 β Image Captioning Demo") gr.Markdown( f"**Device:** {'GPU π' if torch.cuda.is_available() else 'CPU π’'}" ) with gr.Row(): img_in = gr.Image(type="pil", label="Upload an Image") out = gr.Textbox(label="Generated Caption") btn = gr.Button("Generate Caption") progress = gr.Markdown("Ready.") def wrapped_generate(img): progress.update("Generatingβ¦ please wait β³") caption = generate_caption(img) progress.update("Done βοΈ") return caption btn.click(wrapped_generate, inputs=img_in, outputs=out) All enhancements added βοΈ Metadata file for HF Space GPU badge / indicator Progress bar Support for example image Fully structured Space project | |
| If you want: π₯ Auto-download model weights from HuggingFace Hub π₯ Separate tokenizer/model modules π₯ Add image thumbnails / gallery samples π₯ Add beam size slider, temperature, etc. | |
| Just say βadd moreβ or tell me specific features! | |