Spaces:
Build error
Build error
File size: 5,706 Bytes
a78bf29 6c0a88f a78bf29 70aaa1b a78bf29 6c0a88f a78bf29 2a06c48 c0c0d12 a78bf29 70aaa1b a78bf29 70aaa1b 6c0a88f 70aaa1b 6c0a88f 70aaa1b a78bf29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import jax
import flax
import matplotlib.pyplot as plt
import nmslib
import numpy as np
import os
import streamlit as st
from tempfile import NamedTemporaryFile
from torchvision.transforms import Compose, Resize, ToPILImage
from transformers import CLIPProcessor, FlaxCLIPModel
from PIL import Image
import utils
BASELINE_MODEL = "openai/clip-vit-base-patch32"
# MODEL_PATH = "/home/shared/models/clip-rsicd/bs128x8-lr5e-6-adam/ckpt-1"
MODEL_PATH = "flax-community/clip-rsicd-v2"
# IMAGE_VECTOR_FILE = "/home/shared/data/vectors/test-baseline.tsv"
# IMAGE_VECTOR_FILE = "/home/shared/data/vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
# IMAGES_DIR = "/home/shared/data/rsicd_images"
IMAGES_DIR = "./images"
DEMO_IMAGES_DIR = "./demo-images"
def split_image(X):
num_rows = X.shape[0] // 224
num_cols = X.shape[1] // 224
Xc = X[0 : num_rows * 224, 0 : num_cols * 224, :]
patches = []
for j in range(num_rows):
for i in range(num_cols):
patches.append(Xc[j * 224 : (j + 1) * 224,
i * 224 : (i + 1) * 224,
:])
return num_rows, num_cols, patches
def get_patch_probabilities(patches, searched_feature,
image_preprocesor,
model, processor):
images = [image_preprocesor(patch) for patch in patches]
text = "An aerial image of {:s}".format(searched_feature)
inputs = processor(images=images,
text=text,
return_tensors="jax",
padding=True)
outputs = model(**inputs)
probs = jax.nn.softmax(outputs.logits_per_text, axis=-1)
probs_np = np.asarray(probs)[0]
return probs_np
def get_image_ranks(probs):
temp = np.argsort(-probs)
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(probs))
return ranks
def app():
model, processor = utils.load_model(MODEL_PATH, BASELINE_MODEL)
st.title("Find Features in Images")
st.markdown("""
The CLIP model from OpenAI is trained in a self-supervised manner using
contrastive learning to project images and caption text onto a common
embedding space. We have fine-tuned the model (see [Model card](https://huggingface.co/flax-community/clip-rsicd-v2))
using the RSICD dataset (10k images and ~50k captions from the remote
sensing domain). Click here for [more information about our project](https://github.com/arampacha/CLIP-rsicd).
This demo shows the ability of the model to find specific features
(specified as text queries) in the image. As an example, say you wish to
find the parts of the following image that contain a `beach`, `houses`,
or `ships`. We partition the image into tiles of (224, 224) and report
how likely each of them are to contain each text features.
""")
st.image("demo-images/st_tropez_1.png")
st.image("demo-images/st_tropez_2.png")
st.markdown("""
For this image and the queries listed above, our model reports that the
two left tiles are most likely to contain a `beach`, the two top right
tiles are most likely to contain `houses`, and the two bottom right tiles
are likely to contain `boats`.
We have provided a few representative images from [Unsplash](https://unsplash.com/s/photos/aerial-view)
that you can experiment with. Use the image name to put in an initial feature
to look for, this will show the original image, and you will get more ideas
for features that you can ask the model to identify.
""")
# buf = st.file_uploader("Upload Image for Analysis", type=["png", "jpg"])
image_file = st.selectbox("Image File", index=0,
options=[
"St-Tropez-Port.jpg",
"Acopulco-Bay.jpg",
"Highway-through-Forest.jpg",
"Forest-with-River.jpg",
"Eagle-Bay-Coastline.jpg",
"Multistoreyed-Buildings.jpg",
"Street-View-Malayasia.jpg",
])
searched_feature = st.text_input("Feature to find")
if st.button("Find"):
# ftmp = NamedTemporaryFile()
# ftmp.write(buf.getvalue())
# image = plt.imread(ftmp.name)
image = plt.imread(os.path.join("demo-images", image_file))
if len(image.shape) != 3 and image.shape[2] != 3:
st.error("Image should be an RGB image")
if image.shape[0] < 224 or image.shape[1] < 224:
st.error("Image should be at least (224 x 224")
st.image(image, caption="Input Image")
st.markdown("---")
num_rows, num_cols, patches = split_image(image)
image_preprocessor = Compose([
ToPILImage(),
Resize(224)
])
num_rows, num_cols, patches = split_image(image)
patch_probs = get_patch_probabilities(
patches,
searched_feature,
image_preprocessor,
model,
processor)
patch_ranks = get_image_ranks(patch_probs)
for i in range(num_rows):
row_patches = patches[i * num_cols : (i + 1) * num_cols]
row_probs = patch_probs[i * num_cols : (i + 1) * num_cols]
row_ranks = patch_ranks[i * num_cols : (i + 1) * num_cols]
captions = ["p({:s})={:.3f}, rank={:d}".format(searched_feature, p, r + 1)
for p, r in zip(row_probs, row_ranks)]
st.image(row_patches, caption=captions)
|