Spaces:
Sleeping
Sleeping
Harper - Final Assignment submission
Browse files- .gitattributes +15 -0
- Photos/20240325_133025.jpg +3 -0
- Photos/20250106_075208.jpg +3 -0
- Photos/20250805_161445.jpg +3 -0
- Photos/20250808_131957.jpg +3 -0
- Photos/20250808_164442.jpg +3 -0
- Photos/20250808_183114.jpg +3 -0
- Photos/20250813_113228.jpg +3 -0
- Photos/20250918_170635.jpg +3 -0
- Photos/20250920_114728.jpg +3 -0
- Photos/20251101_155708.jpg +3 -0
- Photos/20251106_170359.jpg +3 -0
- Photos/20251106_192036.jpg +3 -0
- Photos/20251107_100830.jpg +3 -0
- Photos/20251107_101822.jpg +3 -0
- Photos/20251107_150015.jpg +3 -0
- app.py +123 -33
- requirements.txt +7 -3
.gitattributes
CHANGED
|
@@ -56,3 +56,18 @@ images/6.png filter=lfs diff=lfs merge=lfs -text
|
|
| 56 |
images/7.png filter=lfs diff=lfs merge=lfs -text
|
| 57 |
images/8.png filter=lfs diff=lfs merge=lfs -text
|
| 58 |
images/9.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
images/7.png filter=lfs diff=lfs merge=lfs -text
|
| 57 |
images/8.png filter=lfs diff=lfs merge=lfs -text
|
| 58 |
images/9.png filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
Photos/20240325_133025.jpg filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
Photos/20250106_075208.jpg filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
Photos/20250805_161445.jpg filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
Photos/20250808_131957.jpg filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
Photos/20250808_164442.jpg filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
Photos/20250808_183114.jpg filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
Photos/20250813_113228.jpg filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
Photos/20250918_170635.jpg filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
Photos/20250920_114728.jpg filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
Photos/20251101_155708.jpg filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
Photos/20251106_170359.jpg filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
Photos/20251106_192036.jpg filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
Photos/20251107_100830.jpg filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
Photos/20251107_101822.jpg filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
Photos/20251107_150015.jpg filter=lfs diff=lfs merge=lfs -text
|
Photos/20240325_133025.jpg
ADDED
|
Git LFS Details
|
Photos/20250106_075208.jpg
ADDED
|
Git LFS Details
|
Photos/20250805_161445.jpg
ADDED
|
Git LFS Details
|
Photos/20250808_131957.jpg
ADDED
|
Git LFS Details
|
Photos/20250808_164442.jpg
ADDED
|
Git LFS Details
|
Photos/20250808_183114.jpg
ADDED
|
Git LFS Details
|
Photos/20250813_113228.jpg
ADDED
|
Git LFS Details
|
Photos/20250918_170635.jpg
ADDED
|
Git LFS Details
|
Photos/20250920_114728.jpg
ADDED
|
Git LFS Details
|
Photos/20251101_155708.jpg
ADDED
|
Git LFS Details
|
Photos/20251106_170359.jpg
ADDED
|
Git LFS Details
|
Photos/20251106_192036.jpg
ADDED
|
Git LFS Details
|
Photos/20251107_100830.jpg
ADDED
|
Git LFS Details
|
Photos/20251107_101822.jpg
ADDED
|
Git LFS Details
|
Photos/20251107_150015.jpg
ADDED
|
Git LFS Details
|
app.py
CHANGED
|
@@ -1,41 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
| 3 |
-
import torch
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
)
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
#
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
inputs
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
],
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
description="Upload an image and get a caption. Optionally, ask a question about the image."
|
| 39 |
)
|
| 40 |
|
| 41 |
-
|
|
|
|
|
|
| 1 |
+
# Caption Generator w/English-to-Spanish Translation
|
| 2 |
+
# A. Harper | ARIN 460 | December 2025
|
| 3 |
+
|
| 4 |
+
# Load into Hugging Face Space (using the Gradio Framework)
|
| 5 |
+
# Include requirements.txt file (list: gradio, pandas, torch, sentencepiece, tensorflow, Image, transformers)
|
| 6 |
+
|
| 7 |
+
# To run, navigate to the App tab. Click the red Generate button.
|
| 8 |
+
# The app will randomly select image, generate (English) caption,
|
| 9 |
+
# then generate Spanish translation.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Import gradio - app framework
|
| 13 |
import gradio as gr
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
|
| 16 |
+
# Two image datasources are available.
|
| 17 |
+
# Minor adjustments (add/remove # to deactivate/activate) to switch between datasources.
|
| 18 |
+
# AA comments refer to images in the DataFrame / from Coco database
|
| 19 |
+
# BB comments refer to images stored in local Gradio app folder
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Import os and random to support random selection of image (from folder)
|
| 23 |
+
import os
|
| 24 |
+
import random
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# Import pandas datasets, transformers, torch
|
| 28 |
+
import pandas as pd
|
| 29 |
+
|
| 30 |
+
from datasets import load_dataset
|
| 31 |
+
|
| 32 |
+
from transformers import (
|
| 33 |
+
BlipProcessor,
|
| 34 |
+
BlipForConditionalGeneration,
|
| 35 |
+
AutoTokenizer,
|
| 36 |
+
AutoModelForSeq2SeqLM,
|
| 37 |
+
MarianMTModel,
|
| 38 |
+
MarianTokenizer
|
| 39 |
)
|
| 40 |
|
| 41 |
+
from PIL import Image
|
| 42 |
+
import torch
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# AA: Load dataset. Initial image source.
|
| 46 |
+
#Load dataset (henryscheible/coco_val2014_tiny)
|
| 47 |
+
dataset = load_dataset("henryscheible/coco_val2014_tiny", split="validation")
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# Reduce dataset to 20 rows, i.e., get sample
|
| 51 |
+
samples = dataset.select(range(20))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
#Convert to dataframe
|
| 55 |
+
df = pd.DataFrame(samples)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# BB: Direct to Photos folder
|
| 59 |
+
IMAGE_FOLDER = "Photos"
|
| 60 |
+
|
| 61 |
+
image_paths = [
|
| 62 |
+
os.path.join(IMAGE_FOLDER, f)
|
| 63 |
+
for f in os.listdir(IMAGE_FOLDER)
|
| 64 |
+
if f.lower().endswith((".jpg", ".jpeg", ".png"))
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
#Load the image captioning model (Salesforce/blip-image-captioning-large)
|
| 68 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 69 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
#Load transformer for translating captions from English to Spanish
|
| 73 |
+
model_name = "Helsinki-NLP/opus-mt-en-es"
|
| 74 |
+
trans_tokenizer = MarianTokenizer.from_pretrained(model_name)
|
| 75 |
+
trans_model = MarianMTModel.from_pretrained(model_name)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
#Configure captioning function
|
| 79 |
+
|
| 80 |
+
def caption_random_image():
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# AA: pick random row - from DF
|
| 84 |
+
##sample = df.sample(1).iloc[0]
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# BB: Pick a random image path - image from folder
|
| 88 |
+
img_path = random.choice(image_paths)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# BB: Load into PIL - image from folder - image from folder
|
| 92 |
+
image = Image.open(img_path).convert("RGB")
|
| 93 |
+
|
| 94 |
|
| 95 |
+
# AA: Image - for DF
|
| 96 |
+
##image = sample["image"]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Unconditional image captioning
|
| 100 |
+
inputs = processor(image, return_tensors="pt")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
out = model.generate(**inputs)
|
| 104 |
+
caption_eng = processor.decode(out[0], skip_special_tokens=True)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# Translate caption from English to Spanish
|
| 108 |
+
trans_inputs = trans_tokenizer.encode(caption_eng, return_tensors="pt")
|
| 109 |
+
trans_out = trans_model.generate(trans_inputs)
|
| 110 |
+
caption_es = trans_tokenizer.decode(trans_out[0], skip_special_tokens=True)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
return image, caption_eng, caption_es
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
demo = gr.Interface(
|
| 119 |
+
fn=caption_random_image,
|
| 120 |
+
inputs=None,
|
| 121 |
+
outputs=[
|
| 122 |
+
gr.Image(type="pil", label="Random Image"),
|
| 123 |
+
gr.Textbox(label="Caption (English)"),
|
| 124 |
+
gr.Textbox(label="Caption (Spanish)")
|
| 125 |
],
|
| 126 |
+
title="Image Captioning (with English to Spanish translation)",
|
| 127 |
+
description="Selects a random image (from either the local folder or henryscheible/coco data subset); generates a BLIP caption; then translates the (English) caption to Spanish."
|
|
|
|
| 128 |
)
|
| 129 |
|
| 130 |
+
|
| 131 |
+
demo.launch()
|
requirements.txt
CHANGED
|
@@ -1,4 +1,8 @@
|
|
| 1 |
-
gradio
|
| 2 |
-
|
| 3 |
torch
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
pandas
|
| 3 |
torch
|
| 4 |
+
sentencepiece
|
| 5 |
+
tensorflow
|
| 6 |
+
Image
|
| 7 |
+
transformers
|
| 8 |
+
|