from typing import Tuple
import gradio as gr
import numpy as np
import cv2
import SaRa.saraRC1 as sara
import warnings
warnings.filterwarnings("ignore")
ALPHA = 0.4
GENERATORS = ['itti', 'deepgaze']
MARKDOWN = """
Saliency Ranking 📚
Saliency Ranking is a fundamental 🌟 **Computer Vision** 🌟 process aimed at discerning the most visually significant features within an image 🖼️.
🌟 This demo showcases the **SaRa (Saliency-Driven Object Ranking)** model for Saliency Ranking 🎯, which can efficiently rank the visual saliency of an image without requiring any training. 🖼️
This technique is configured on the Saliency Map generator model by Itti, which works based on the primate visual cortex 🧠, and can work with or without depth information 🔄.
"""
IMAGE_EXAMPLES = [
['https://media.roboflow.com/supervision/image-examples/people-walking.png', 32],
['https://media.roboflow.com/supervision/image-examples/vehicles.png', 32],
['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 32],
]
def detect_and_annotate(image,
GRID_SIZE,
generator,
ALPHA=ALPHA,
mode=1)-> np.ndarray:
# Converting from PIL to OpenCV
image = np.array(image)
# Convert image from BGR to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Copy and convert the image for sara processing
sara_image = image.copy()
# sara_image = cv2.cvtColor(sara_image, cv2.COLOR_RGB2BGR)
# Resetting sara
sara.reset()
# Running sara (Original implementation on itti)
sara_info = sara.return_sara(sara_image, GRID_SIZE, generator, mode=mode)
# Generate saliency map
saliency_map = sara.return_saliency(image, generator=generator)
# Resize saliency map to match the image size
saliency_map = cv2.resize(saliency_map, (image.shape[1], image.shape[0]))
# Apply color map and convert to RGB
saliency_map = cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET)
saliency_map = cv2.cvtColor(saliency_map, cv2.COLOR_BGR2RGB)
# Overlay the saliency map on the original image
saliency_map = cv2.addWeighted(saliency_map, ALPHA, image, 1-ALPHA, 0)
# Extract and convert heatmap to RGB
heatmap = sara_info[0]
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
return saliency_map, heatmap
def process_image(
input_image: np.ndarray,
GRIDSIZE: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
# Validate GRID_SIZE
if GRIDSIZE is None or GRIDSIZE < 3:
GRIDSIZE = 9
itti_saliency_map, itti_heatmap = detect_and_annotate(
input_image, GRIDSIZE, 'itti')
_, itti_heatmap2 = detect_and_annotate(
input_image, GRIDSIZE, 'itti', mode=2)
# deepgaze_saliency_map, deepgaze_heatmap = detect_and_annotate(
# input_image, GRIDSIZE, 'deepgaze')
return (
itti_saliency_map,
itti_heatmap,
itti_heatmap2,
# deepgaze_saliency_map,
# deepgaze_heatmap,
)
grid_size_Component = gr.Slider(
minimum=3,
maximum=100,
value=32,
step=1,
label="Grid Size",
info=(
"The grid size for the Saliency Ranking (SaRa) model. The grid size determines "
"the number of regions the image is divided into. A higher grid size results in "
"more regions and a lower grid size results in fewer regions. The default grid "
"size is 9."
))
with gr.Blocks() as demo:
gr.Markdown(MARKDOWN)
with gr.Accordion("Configuration", open=False):
with gr.Row():
grid_size_Component.render()
with gr.Row():
input_image_component = gr.Image(
type='pil',
label='Input'
)
itti_saliency_map = gr.Image(
type='pil',
label='Itti Saliency Map'
)
with gr.Row():
itti_heatmap = gr.Image(
type='pil',
label='Saliency Ranking Heatmap 1'
)
itti_heatmap2 = gr.Image(
type='pil',
label='Saliency Ranking Heatmap 2'
)
# with gr.Row():
# deepgaze_saliency_map = gr.Image(
# type='pil',
# label='DeepGaze Saliency Map'
# )
# deepgaze_heatmap = gr.Image(
# type='pil',
# label='DeepGaze Saliency Ranking Heatmap'
# )
submit_button_component = gr.Button(
value='Submit',
scale=1,
variant='primary'
)
gr.Examples(
fn=process_image,
examples=IMAGE_EXAMPLES,
inputs=[
input_image_component,
grid_size_Component,
],
outputs=[
itti_saliency_map,
itti_heatmap,
itti_heatmap2,
# deepgaze_saliency_map,
# deepgaze_heatmap,
]
)
submit_button_component.click(
fn=process_image,
inputs=[
input_image_component,
grid_size_Component,
],
outputs=[
itti_saliency_map,
itti_heatmap,
itti_heatmap2,
# deepgaze_saliency_map,
# deepgaze_heatmap,
]
)
demo.launch(debug=False, show_error=True, max_threads=1)