Upload 70 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +9 -0
- app.py +138 -0
- home.py +41 -0
- image_augmentation.py +296 -0
- image_mask_gen.py +285 -0
- images/background.mp4 +3 -0
- images/genai shaolin.mp4 +3 -0
- images/image_annote.mp4 +3 -0
- images/image_aug.mp4 +3 -0
- images/pix_output_video (1).mp4 +3 -0
- images/redhulk.mp4 +3 -0
- images/with_replacement_output_video.mp4 +3 -0
- images/zoe.mp4 +3 -0
- requirements.txt +10 -0
- sam-2-meta-video-augmentation-with-yolo-and-genai.ipynb +1 -0
- sam2/__init__.py +9 -0
- sam2/__pycache__/__init__.cpython-312.pyc +0 -0
- sam2/__pycache__/build_sam.cpython-312.pyc +0 -0
- sam2/__pycache__/sam2_image_predictor.cpython-312.pyc +0 -0
- sam2/__pycache__/sam2_video_predictor.cpython-312.pyc +0 -0
- sam2/automatic_mask_generator.py +434 -0
- sam2/build_sam.py +89 -0
- sam2/csrc/connected_components.cu +289 -0
- sam2/modeling/__init__.py +5 -0
- sam2/modeling/__pycache__/__init__.cpython-312.pyc +0 -0
- sam2/modeling/__pycache__/memory_attention.cpython-312.pyc +0 -0
- sam2/modeling/__pycache__/memory_encoder.cpython-312.pyc +0 -0
- sam2/modeling/__pycache__/position_encoding.cpython-312.pyc +0 -0
- sam2/modeling/__pycache__/sam2_base.cpython-312.pyc +0 -0
- sam2/modeling/__pycache__/sam2_utils.cpython-312.pyc +0 -0
- sam2/modeling/backbones/__init__.py +5 -0
- sam2/modeling/backbones/__pycache__/__init__.cpython-312.pyc +0 -0
- sam2/modeling/backbones/__pycache__/hieradet.cpython-312.pyc +0 -0
- sam2/modeling/backbones/__pycache__/image_encoder.cpython-312.pyc +0 -0
- sam2/modeling/backbones/__pycache__/utils.cpython-312.pyc +0 -0
- sam2/modeling/backbones/hieradet.py +295 -0
- sam2/modeling/backbones/image_encoder.py +133 -0
- sam2/modeling/backbones/utils.py +95 -0
- sam2/modeling/memory_attention.py +169 -0
- sam2/modeling/memory_encoder.py +181 -0
- sam2/modeling/position_encoding.py +216 -0
- sam2/modeling/sam/__init__.py +5 -0
- sam2/modeling/sam/__pycache__/__init__.cpython-312.pyc +0 -0
- sam2/modeling/sam/__pycache__/mask_decoder.cpython-312.pyc +0 -0
- sam2/modeling/sam/__pycache__/prompt_encoder.cpython-312.pyc +0 -0
- sam2/modeling/sam/__pycache__/transformer.cpython-312.pyc +0 -0
- sam2/modeling/sam/mask_decoder.py +295 -0
- sam2/modeling/sam/prompt_encoder.py +182 -0
- sam2/modeling/sam/transformer.py +327 -0
- sam2/modeling/sam2_base.py +829 -0
.gitattributes
CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images/background.mp4 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
images/genai[[:space:]]shaolin.mp4 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
images/image_annote.mp4 filter=lfs diff=lfs merge=lfs -text
|
39 |
+
images/image_aug.mp4 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
images/pix_output_video[[:space:]](1).mp4 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
images/redhulk.mp4 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
images/with_replacement_output_video.mp4 filter=lfs diff=lfs merge=lfs -text
|
43 |
+
images/zoe.mp4 filter=lfs diff=lfs merge=lfs -text
|
44 |
+
sam_2_image_generation.ipynb filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import base64
|
3 |
+
|
4 |
+
# Set the page configuration
|
5 |
+
st.set_page_config(
|
6 |
+
page_title="MetaMorph AI",
|
7 |
+
page_icon="🌉",
|
8 |
+
initial_sidebar_state="expanded",
|
9 |
+
layout="wide",
|
10 |
+
menu_items={
|
11 |
+
'Get help': 'https://www.linkedin.com/in/gaurav-verma-4696bb106/',
|
12 |
+
'About': "MetaMorph: Revolutionize your media with cutting-edge image and video augmentation using the META Sam-2 model for stunning visual transformations!"
|
13 |
+
}
|
14 |
+
)
|
15 |
+
|
16 |
+
# Function to load video as base64
|
17 |
+
def get_base64_video(video_path):
|
18 |
+
with open(video_path, 'rb') as video_file:
|
19 |
+
video_bytes = video_file.read()
|
20 |
+
return base64.b64encode(video_bytes).decode('utf-8')
|
21 |
+
|
22 |
+
# Video file path
|
23 |
+
video_path = 'images/background.mp4'
|
24 |
+
|
25 |
+
# Get the base64 video
|
26 |
+
video_base64 = get_base64_video(video_path)
|
27 |
+
|
28 |
+
# Add video as background
|
29 |
+
background_video = f"""
|
30 |
+
<style>
|
31 |
+
.stApp {{
|
32 |
+
background: transparent;
|
33 |
+
}}
|
34 |
+
.video-container {{
|
35 |
+
position: fixed;
|
36 |
+
top: 0;
|
37 |
+
left: 0;
|
38 |
+
min-width: 100%;
|
39 |
+
min-height: 100%;
|
40 |
+
z-index: -1;
|
41 |
+
overflow: hidden;
|
42 |
+
}}
|
43 |
+
.video-container video {{
|
44 |
+
position: absolute;
|
45 |
+
top: 50%;
|
46 |
+
left: 50%;
|
47 |
+
width: auto;
|
48 |
+
height: auto;
|
49 |
+
min-width: 100%;
|
50 |
+
min-height: 100%;
|
51 |
+
transform: translate(-50%, -50%);
|
52 |
+
opacity: 0.5;
|
53 |
+
}}
|
54 |
+
.content {{
|
55 |
+
position: relative;
|
56 |
+
z-index: 1;
|
57 |
+
padding-top: 50px;
|
58 |
+
}}
|
59 |
+
</style>
|
60 |
+
<div class="video-container">
|
61 |
+
<video autoplay loop muted>
|
62 |
+
<source src="data:video/mp4;base64,{video_base64}" type="video/mp4">
|
63 |
+
</video>
|
64 |
+
</div>
|
65 |
+
"""
|
66 |
+
st.markdown(background_video, unsafe_allow_html=True)
|
67 |
+
|
68 |
+
# Content goes here
|
69 |
+
with st.container():
|
70 |
+
|
71 |
+
# Title
|
72 |
+
html_code = """
|
73 |
+
<div class="content">
|
74 |
+
<div class="title-container">
|
75 |
+
<h1 class="neon-text">
|
76 |
+
MetaMorphix AI 🐦🔥
|
77 |
+
</h1>
|
78 |
+
</div>
|
79 |
+
</div>
|
80 |
+
|
81 |
+
<style>
|
82 |
+
@keyframes rainbow-text-animation {
|
83 |
+
0% { color: white; }
|
84 |
+
16.67% { color: grey; }
|
85 |
+
33.33% { color: grey; }
|
86 |
+
50% { color: black; }
|
87 |
+
66.67% { color: grey; }
|
88 |
+
83.33% { color: white; }
|
89 |
+
100% { color: black; }
|
90 |
+
}
|
91 |
+
|
92 |
+
.title-container {
|
93 |
+
text-align: center;
|
94 |
+
margin: 1em 0;
|
95 |
+
padding-bottom: 10px;
|
96 |
+
border-bottom: 4px solid #fcdee9;
|
97 |
+
}
|
98 |
+
|
99 |
+
.neon-text {
|
100 |
+
font-family: Trebuchet MS , sans-serif;
|
101 |
+
font-size: 4em;
|
102 |
+
margin: 0;
|
103 |
+
animation: rainbow-text-animation 5s infinite linear;
|
104 |
+
text-shadow: 0 0 5px rgba(0, 255, 0, 0.8),
|
105 |
+
0 0 10px rgba(0, 255, 255, 0.7),
|
106 |
+
0 0 20px rgba(0, 255, 255, 0.6),
|
107 |
+
0 0 40px rgba(0, 0, 0, 0.6),
|
108 |
+
0 0 80px rgba(0, 0, 0, 0.6),
|
109 |
+
0 0 90px rgba(0, 0, 0, 0.6),
|
110 |
+
0 0 100px rgba(0, 0, 255, 0.6),
|
111 |
+
0 0 150px rgba(0, 0, 255, 0.6);
|
112 |
+
}
|
113 |
+
</style>
|
114 |
+
"""
|
115 |
+
st.markdown(html_code, unsafe_allow_html=True)
|
116 |
+
|
117 |
+
# Additional content
|
118 |
+
|
119 |
+
# Functionality for pages
|
120 |
+
from home import home_page
|
121 |
+
from image_augmentation import image_augmentation_page
|
122 |
+
from video_augmentation import image_annoter
|
123 |
+
from use_cases import use_case
|
124 |
+
def main():
|
125 |
+
st.sidebar.title("Navigation")
|
126 |
+
page = st.sidebar.selectbox("Go to", ("Home","Use Cases", "Image Augmentation", "Video Augmentation"))
|
127 |
+
|
128 |
+
if page == "Home":
|
129 |
+
home_page()
|
130 |
+
elif page == "Use Cases":
|
131 |
+
use_case()
|
132 |
+
elif page == "Image Augmentation":
|
133 |
+
image_augmentation_page()
|
134 |
+
elif page == "Video Augmentation":
|
135 |
+
image_annoter()
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
main()
|
home.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
def home_page():
|
6 |
+
st.title("Welcome to MetaMorphix AI")
|
7 |
+
st.write("""
|
8 |
+
This application uses the **META Sam-2 model** to perform advanced augmentation on images and videos.,
|
9 |
+
\n**YOLO** trained and pretrained model for Object Detection.
|
10 |
+
\n**Stability AI API** for Generative AI - Image to Image generation on mask.
|
11 |
+
\n**Image Annoter** for YOLO training Folder Input, Process Replica That of Roboflow app.
|
12 |
+
|
13 |
+
Navigate to the desired section using the sidebar.
|
14 |
+
|
15 |
+
\nScroll down to see the tutorial.
|
16 |
+
|
17 |
+
""")
|
18 |
+
st.divider()
|
19 |
+
st.header("For Image Augmentation")
|
20 |
+
st.write("""1. Navigate to Image Augmentation page & Upload a Image.
|
21 |
+
\n2. Mark coordinates on canvas **(green for Inclusive points & red for Exclusive points).**
|
22 |
+
\n3. Select Augmentaion method [Pixelated, Hue Change, Mask Replacement, Img2Img Generation] and proceed.""")
|
23 |
+
st.video("images/image_aug.mp4")
|
24 |
+
|
25 |
+
st.divider()
|
26 |
+
st.header("For Image Annotation on an Image Directory")
|
27 |
+
st.write("""1. Navigate to Video Augmentation page & Paste Local Directory link where train images are to annoted.
|
28 |
+
\n2. create Bounding box on canvas.
|
29 |
+
\n3. click on save annoptation and navigate through next button""")
|
30 |
+
st.video("images/image_annote.mp4")
|
31 |
+
|
32 |
+
st.warning("As of now Video Augmentation can only be happen on Jupyter notebook due to certain Limitation")
|
33 |
+
st.write("Go to following link to access Notebook and Use Kaggle GPU")
|
34 |
+
# Define the profile link
|
35 |
+
profile_url = "https://www.kaggle.com/code/gauravverma069/sam-2-meta-video-augmentation-with-yolo-and-genai"
|
36 |
+
st.markdown(f"[Visit my Kaggle Notebook link]({profile_url})")
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
image_augmentation.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_drawable_canvas import st_canvas
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import image_mask_gen
|
7 |
+
import torch
|
8 |
+
from sam2.build_sam import build_sam2
|
9 |
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
10 |
+
import os
|
11 |
+
import io
|
12 |
+
import warnings
|
13 |
+
from stability_sdk import client
|
14 |
+
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
|
15 |
+
|
16 |
+
import streamlit as st
|
17 |
+
import base64
|
18 |
+
|
19 |
+
|
20 |
+
# Function to display points on the image using matplotlib
|
21 |
+
def show_points(coords, labels, ax, marker_size=375):
|
22 |
+
pos_points = coords[labels == 1]
|
23 |
+
neg_points = coords[labels == 0]
|
24 |
+
ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
|
25 |
+
ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
|
26 |
+
|
27 |
+
def remove_duplicates(coords, labels):
|
28 |
+
unique_coords = []
|
29 |
+
unique_labels = []
|
30 |
+
seen = set()
|
31 |
+
|
32 |
+
for coord, label in zip(coords, labels):
|
33 |
+
coord_tuple = tuple(coord)
|
34 |
+
if coord_tuple not in seen:
|
35 |
+
seen.add(coord_tuple)
|
36 |
+
unique_coords.append(coord)
|
37 |
+
unique_labels.append(label)
|
38 |
+
|
39 |
+
return unique_coords, unique_labels
|
40 |
+
|
41 |
+
|
42 |
+
def image_augmentation_page():
|
43 |
+
pass
|
44 |
+
st.title("Image Augmentation")
|
45 |
+
st.write("Upload an image to apply augmentation techniques.")
|
46 |
+
|
47 |
+
# Initialize session state variables
|
48 |
+
if "inclusive_points" not in st.session_state:
|
49 |
+
st.session_state.inclusive_points = []
|
50 |
+
if "exclusive_points" not in st.session_state:
|
51 |
+
st.session_state.exclusive_points = []
|
52 |
+
|
53 |
+
# Upload an image
|
54 |
+
uploaded_file = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
|
55 |
+
|
56 |
+
if uploaded_file is not None:
|
57 |
+
# Open the uploaded image
|
58 |
+
image = Image.open(uploaded_file)
|
59 |
+
|
60 |
+
# Set the maximum width for display
|
61 |
+
max_display_width = 700 # You can adjust this value
|
62 |
+
|
63 |
+
# Calculate the scaling factor
|
64 |
+
scale_factor = min(max_display_width / image.size[0], 1)
|
65 |
+
|
66 |
+
# Resize the image for display
|
67 |
+
display_width = int(image.size[0] * scale_factor)
|
68 |
+
display_height = int(image.size[1] * scale_factor)
|
69 |
+
resized_image = image.resize((display_width, display_height))
|
70 |
+
|
71 |
+
# Inclusive Points Phase
|
72 |
+
st.subheader("Select Inclusive Points (Green)")
|
73 |
+
canvas_inclusive = st_canvas(
|
74 |
+
fill_color="rgba(0, 0, 0, 0)", # Transparent fill
|
75 |
+
stroke_width=1, # Stroke width for drawing
|
76 |
+
stroke_color="blue", # Color for the outline of clicks
|
77 |
+
background_image=resized_image,
|
78 |
+
update_streamlit=True,
|
79 |
+
height=display_height,
|
80 |
+
width=display_width,
|
81 |
+
drawing_mode="circle", # Drawing mode to capture clicks as circles
|
82 |
+
point_display_radius=3, # Radius of the circle that represents a click
|
83 |
+
key="canvas_inclusive"
|
84 |
+
)
|
85 |
+
|
86 |
+
# Process inclusive clicks
|
87 |
+
if canvas_inclusive.json_data is not None:
|
88 |
+
objects = canvas_inclusive.json_data["objects"]
|
89 |
+
new_clicks = [[(obj["left"] + obj["radius"]) / scale_factor, (obj["top"] + obj["radius"]) / scale_factor] for obj in objects]
|
90 |
+
st.session_state.inclusive_points.extend(new_clicks)
|
91 |
+
|
92 |
+
# Plot the inclusive points on the original image using Matplotlib
|
93 |
+
fig_inclusive, ax = plt.subplots()
|
94 |
+
ax.imshow(image)
|
95 |
+
ax.axis('off') # Hide the axes
|
96 |
+
|
97 |
+
# Prepare data for plotting
|
98 |
+
inclusive_points = np.array(st.session_state.inclusive_points)
|
99 |
+
labels_inclusive = np.array([1] * len(st.session_state.inclusive_points))
|
100 |
+
|
101 |
+
# Call the function to show inclusive points
|
102 |
+
if len(inclusive_points) > 0:
|
103 |
+
show_points(inclusive_points, labels_inclusive, ax)
|
104 |
+
|
105 |
+
st.pyplot(fig_inclusive)
|
106 |
+
|
107 |
+
# Divider
|
108 |
+
st.divider()
|
109 |
+
|
110 |
+
# Exclusive Points Phase
|
111 |
+
st.subheader("Select Exclusive Points (Red)")
|
112 |
+
canvas_exclusive = st_canvas(
|
113 |
+
fill_color="rgba(0, 0, 0, 0)", # Transparent fill
|
114 |
+
stroke_width=1, # Stroke width for drawing
|
115 |
+
stroke_color="blue", # Color for the outline of clicks
|
116 |
+
background_image=resized_image,
|
117 |
+
update_streamlit=True,
|
118 |
+
height=display_height,
|
119 |
+
width=display_width,
|
120 |
+
drawing_mode="circle", # Drawing mode to capture clicks as circles
|
121 |
+
point_display_radius=3, # Radius of the circle that represents a click
|
122 |
+
key="canvas_exclusive"
|
123 |
+
)
|
124 |
+
|
125 |
+
# Process exclusive clicks
|
126 |
+
if canvas_exclusive.json_data is not None:
|
127 |
+
objects = canvas_exclusive.json_data["objects"]
|
128 |
+
new_clicks = [[(obj["left"] + obj["radius"]) / scale_factor, (obj["top"] + obj["radius"]) / scale_factor] for obj in objects]
|
129 |
+
st.session_state.exclusive_points.extend(new_clicks)
|
130 |
+
|
131 |
+
# Plot the exclusive points on the original image using Matplotlib
|
132 |
+
fig_exclusive, ax = plt.subplots()
|
133 |
+
ax.imshow(image)
|
134 |
+
ax.axis('off') # Hide the axes
|
135 |
+
|
136 |
+
# Prepare data for plotting
|
137 |
+
exclusive_points = np.array(st.session_state.exclusive_points)
|
138 |
+
labels_exclusive = np.array([0] * len(st.session_state.exclusive_points))
|
139 |
+
|
140 |
+
# Call the function to show exclusive points
|
141 |
+
if len(exclusive_points) > 0:
|
142 |
+
show_points(exclusive_points, labels_exclusive, ax)
|
143 |
+
|
144 |
+
st.pyplot(fig_exclusive)
|
145 |
+
|
146 |
+
# Grouping coordinates and labels
|
147 |
+
coordinates = st.session_state.inclusive_points + st.session_state.exclusive_points
|
148 |
+
labels = [1] * len(st.session_state.inclusive_points) + [0] * len(st.session_state.exclusive_points)
|
149 |
+
|
150 |
+
# # Display grouped coordinates and labels
|
151 |
+
# st.subheader("Coordinates and Labels")
|
152 |
+
# st.write("Coordinates: ", tuple(coordinates))
|
153 |
+
# st.write("Labels: ", labels)
|
154 |
+
|
155 |
+
# Provide an option to clear the coordinates
|
156 |
+
if st.button("Clear All Points"):
|
157 |
+
st.session_state.inclusive_points = []
|
158 |
+
st.session_state.exclusive_points = []
|
159 |
+
# global unique_coordinates, unique_labels
|
160 |
+
unique_coordinates, unique_labels = remove_duplicates(coordinates, labels)
|
161 |
+
|
162 |
+
st.write("Unique Coordinates:", tuple(unique_coordinates))
|
163 |
+
st.write("Unique Labels:", tuple(unique_labels))
|
164 |
+
|
165 |
+
# image_mask_gen.show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)
|
166 |
+
sam2_checkpoint = "sam2_hiera_base_plus.pt"
|
167 |
+
model_cfg = "sam2_hiera_b+.yaml"
|
168 |
+
|
169 |
+
sam2_model = build_sam2(model_cfg, sam2_checkpoint, device="cpu")
|
170 |
+
|
171 |
+
predictor = SAM2ImagePredictor(sam2_model)
|
172 |
+
|
173 |
+
image = image
|
174 |
+
predictor.set_image(image)
|
175 |
+
|
176 |
+
input_point = np.array(unique_coordinates)
|
177 |
+
input_label = np.array(unique_labels)
|
178 |
+
|
179 |
+
masks, scores, logits = predictor.predict(
|
180 |
+
point_coords=input_point,
|
181 |
+
point_labels=input_label,
|
182 |
+
multimask_output=True,
|
183 |
+
)
|
184 |
+
sorted_ind = np.argsort(scores)[::-1]
|
185 |
+
masks = masks[sorted_ind]
|
186 |
+
scores = scores[sorted_ind]
|
187 |
+
logits = logits[sorted_ind]
|
188 |
+
|
189 |
+
mask_input = logits[np.argmax(scores), :, :]
|
190 |
+
|
191 |
+
masks, scores, _ = predictor.predict(
|
192 |
+
point_coords=input_point,
|
193 |
+
point_labels=input_label,
|
194 |
+
mask_input=mask_input[None, :, :],
|
195 |
+
multimask_output=False,
|
196 |
+
)
|
197 |
+
image_mask_gen.show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)
|
198 |
+
|
199 |
+
|
200 |
+
# Get masked images
|
201 |
+
original_image = Image.open(uploaded_file)
|
202 |
+
# st.image(original_image, caption='Original Image', use_column_width=True)
|
203 |
+
|
204 |
+
with st.container(border=True):# Display masked images
|
205 |
+
col1, col2 = st.columns(2)
|
206 |
+
with col1:
|
207 |
+
mask_images = image_mask_gen.show_masks_1(original_image, masks, scores)
|
208 |
+
for idx, (img, score) in enumerate(mask_images):
|
209 |
+
st.image(img, caption=f'Mask {idx+1}, Score: {score:.3f}', use_column_width=True)
|
210 |
+
with col2:
|
211 |
+
inverse_mask_images = image_mask_gen.show_inverse_masks(original_image, masks, scores)
|
212 |
+
for idx, (img, score) in enumerate(inverse_mask_images):
|
213 |
+
st.image(img, caption=f'Inverse Mask {idx+1}, Score: {score:.3f}', use_column_width=True)
|
214 |
+
|
215 |
+
if st.checkbox("Proceed to Image Augmentation"):
|
216 |
+
|
217 |
+
image_aug_select = st.sidebar.selectbox("Select Augmentation for Mask",["Pixelate","Hue Change","Mask Replacement","Generative Img2Img"])
|
218 |
+
if image_aug_select == "Pixelate":
|
219 |
+
|
220 |
+
if st.sidebar.toggle("Proceed to Pixelate Mask"):
|
221 |
+
pixelation_level = st.slider("Select Pixelation Level", min_value=5, max_value=50, value=10)
|
222 |
+
combined_image = image_mask_gen.combine_pixelated_mask(original_image, masks[0], pixelation_level)
|
223 |
+
st.image(combined_image, caption="Combined Pixelated Image", use_column_width=True)
|
224 |
+
elif image_aug_select == "Hue Change":
|
225 |
+
|
226 |
+
if st.sidebar.toggle("Proceed to Hue Change"):
|
227 |
+
# Hue shift slider
|
228 |
+
hue_shift = st.slider("Select Hue Shift", min_value=-180, max_value=180, value=0)
|
229 |
+
# Apply hue change and show the result
|
230 |
+
combined_image = image_mask_gen.combine_hue_changed_mask(original_image, masks[0], hue_shift) # Assuming single mask
|
231 |
+
st.image(combined_image, caption="Combined Hue Changed Image", use_column_width=True)
|
232 |
+
elif image_aug_select == "Mask Replacement":
|
233 |
+
|
234 |
+
if st.sidebar.toggle("Proceed to replace Mask"):
|
235 |
+
replacement_file = st.file_uploader("Upload the replacement image", type=["png", "jpg", "jpeg"])
|
236 |
+
if replacement_file is not None:
|
237 |
+
replacement_image = Image.open(replacement_file) #.convert("RGBA")
|
238 |
+
combined_image = image_mask_gen.combine_mask_replaced_image(original_image, replacement_image, masks[0]) # Assuming single mask
|
239 |
+
st.image(combined_image, caption="Masked Area Replaced Image", use_column_width=True)
|
240 |
+
elif image_aug_select == "Generative Img2Img":
|
241 |
+
|
242 |
+
msk_img = None
|
243 |
+
mask_images_x = image_mask_gen.show_masks_1(original_image, masks, scores)
|
244 |
+
for idx, (img, score) in enumerate(mask_images_x):
|
245 |
+
msk_img = img
|
246 |
+
# st.image(img, caption=f'Mask {idx+1}, Score: {score:.3f}', use_column_width=True)
|
247 |
+
|
248 |
+
rgb_image = msk_img.convert("RGB")
|
249 |
+
# st.image(rgb_image)
|
250 |
+
resized_image = image_mask_gen.resize_image(rgb_image)
|
251 |
+
# st.image(resized_image, caption=f"Resized size: {resized_image.size[0]}x{resized_image.size[1]}", use_column_width=True)
|
252 |
+
width, height = resized_image.size
|
253 |
+
|
254 |
+
# User input for the prompt and API key
|
255 |
+
prompt = st.text_input("Enter your prompt:", "A Beautiful day, in the style reference of starry night by vincent van gogh")
|
256 |
+
api_key = st.text_input("Enter your Stability AI API key:")
|
257 |
+
|
258 |
+
if prompt and api_key:
|
259 |
+
# Set up our connection to the API.
|
260 |
+
os.environ['STABILITY_KEY'] = api_key
|
261 |
+
stability_api = client.StabilityInference(
|
262 |
+
key=os.environ['STABILITY_KEY'], # API Key reference.
|
263 |
+
verbose=True, # Print debug messages.
|
264 |
+
engine="stable-diffusion-xl-1024-v1-0", # Set the engine to use for generation.
|
265 |
+
)
|
266 |
+
style_preset_selector = st.sidebar.selectbox("Select Style Preset",["3d-model", "analog-film", "anime", "cinematic", "comic-book", "digital-art", "enhance", "fantasy-art", "isometric", "line-art", "low-poly", "modeling-compound", "neon-punk",
|
267 |
+
"origami", "photographic", "pixel-art", "tile-texture"],index = 5)
|
268 |
+
if st.sidebar.toggle("Proceed to Generate Image"):
|
269 |
+
# Set up our initial generation parameters.
|
270 |
+
answers2 = stability_api.generate(
|
271 |
+
prompt=prompt,
|
272 |
+
init_image=resized_image, # Assign our uploaded image as our Initial Image for transformation.
|
273 |
+
start_schedule=0.6,
|
274 |
+
steps=250,
|
275 |
+
cfg_scale=10.0,
|
276 |
+
width=width,
|
277 |
+
height=height,
|
278 |
+
sampler=generation.SAMPLER_K_DPMPP_SDE,
|
279 |
+
style_preset=style_preset_selector
|
280 |
+
)
|
281 |
+
|
282 |
+
# Process the response from the API
|
283 |
+
for resp in answers2:
|
284 |
+
for artifact in resp.artifacts:
|
285 |
+
if artifact.finish_reason == generation.FILTER:
|
286 |
+
warnings.warn(
|
287 |
+
"Your request activated the API's safety filters and could not be processed."
|
288 |
+
"Please modify the prompt and try again.")
|
289 |
+
if artifact.type == generation.ARTIFACT_IMAGE:
|
290 |
+
img2 = Image.open(io.BytesIO(artifact.binary))
|
291 |
+
# Display the generated image
|
292 |
+
st.image(img2, caption="Generated Image", use_column_width=True)
|
293 |
+
|
294 |
+
# Combine the generated image with the original image using the mask
|
295 |
+
combined_img = image_mask_gen.combine_mask_and_inverse_gen(original_image, img2, masks[0])
|
296 |
+
st.image(combined_img, caption="Combined Image", use_column_width=True)
|
image_mask_gen.py
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
def apply_mask(image_cv, mask, color=(0, 255, 0), alpha=0.5):
|
7 |
+
""" Apply a mask to an image with given color and alpha blend """
|
8 |
+
mask_bgr = np.zeros_like(image_cv)
|
9 |
+
mask_bgr[mask > 0] = color
|
10 |
+
return cv2.addWeighted(image_cv, 1 - alpha, mask_bgr, alpha, 0)
|
11 |
+
|
12 |
+
def draw_points(image_cv, points, labels):
|
13 |
+
""" Draw points on the image with different colors based on labels """
|
14 |
+
for coord, label in zip(points, labels):
|
15 |
+
color = (0, 255, 0) if label == 1 else (255, 0, 0) # Green for inclusive, Red for exclusive
|
16 |
+
cv2.circle(image_cv, tuple(map(int, coord)), 5, color, -1)
|
17 |
+
return image_cv
|
18 |
+
|
19 |
+
def draw_boxes(image_cv, boxes):
|
20 |
+
""" Draw boxes on the image """
|
21 |
+
for box in boxes:
|
22 |
+
x, y, w, h = map(int, box)
|
23 |
+
cv2.rectangle(image_cv, (x, y), (x + w, y + h), (255, 0, 0), 2) # Red boxes
|
24 |
+
return image_cv
|
25 |
+
|
26 |
+
def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True):
|
27 |
+
image_cv = np.array(image.convert("RGB"))[..., ::-1] # Convert PIL image to BGR format for OpenCV
|
28 |
+
|
29 |
+
for i, (mask, score) in enumerate(zip(masks, scores)):
|
30 |
+
image_with_mask = apply_mask(image_cv, mask)
|
31 |
+
|
32 |
+
if point_coords is not None:
|
33 |
+
assert input_labels is not None
|
34 |
+
image_with_mask = draw_points(image_with_mask, point_coords, input_labels)
|
35 |
+
|
36 |
+
if box_coords is not None:
|
37 |
+
image_with_mask = draw_boxes(image_with_mask, box_coords)
|
38 |
+
|
39 |
+
# Convert back to RGB and then to PIL for Streamlit
|
40 |
+
image_with_mask = cv2.cvtColor(image_with_mask, cv2.COLOR_BGR2RGB)
|
41 |
+
image_pil = Image.fromarray(image_with_mask)
|
42 |
+
|
43 |
+
# Display the final image with all overlays
|
44 |
+
st.image(image_pil, caption=f"Mask {i+1}, Score: {score:.3f}", use_column_width=True)
|
45 |
+
|
46 |
+
|
47 |
+
def apply_mask_to_image(image, mask):
|
48 |
+
# Ensure the image is a NumPy array in BGR format
|
49 |
+
if isinstance(image, Image.Image):
|
50 |
+
image = np.array(image)
|
51 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
52 |
+
|
53 |
+
# Create an alpha channel based on the mask
|
54 |
+
alpha_channel = (mask * 255).astype(np.uint8)
|
55 |
+
|
56 |
+
# Create an image with the mask applied only on masked areas
|
57 |
+
masked_image = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
|
58 |
+
for c in range(3): # Apply the mask only to the RGB channels
|
59 |
+
masked_image[..., c] = image[..., c] * mask
|
60 |
+
|
61 |
+
# Add the alpha channel to make areas outside the mask transparent
|
62 |
+
masked_image[..., 3] = alpha_channel
|
63 |
+
|
64 |
+
return masked_image
|
65 |
+
|
66 |
+
def show_masks_1(image, masks, scores):
|
67 |
+
mask_images = []
|
68 |
+
for i, (mask, score) in enumerate(zip(masks, scores)):
|
69 |
+
# Apply the mask to the image
|
70 |
+
masked_image = apply_mask_to_image(image, mask)
|
71 |
+
|
72 |
+
# Convert the masked image to PIL format for Streamlit
|
73 |
+
pil_image = Image.fromarray(cv2.cvtColor(masked_image, cv2.COLOR_BGRA2RGBA))
|
74 |
+
mask_images.append((pil_image, score))
|
75 |
+
|
76 |
+
return mask_images
|
77 |
+
|
78 |
+
|
79 |
+
def apply_inverse_mask_to_image(image, mask):
|
80 |
+
# Ensure the image is a NumPy array in BGR format
|
81 |
+
if isinstance(image, Image.Image):
|
82 |
+
image = np.array(image)
|
83 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
84 |
+
|
85 |
+
# Create an alpha channel that is transparent inside the mask and opaque outside
|
86 |
+
alpha_channel = (1 - mask) * 255
|
87 |
+
|
88 |
+
# Create an image with the mask applied to the inverse areas
|
89 |
+
inverse_masked_image = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
|
90 |
+
for c in range(3): # Apply the inverse mask to RGB channels
|
91 |
+
inverse_masked_image[..., c] = image[..., c] * (1 - mask)
|
92 |
+
|
93 |
+
# Add the alpha channel to make areas inside the mask transparent
|
94 |
+
inverse_masked_image[..., 3] = alpha_channel.astype(np.uint8)
|
95 |
+
|
96 |
+
return inverse_masked_image
|
97 |
+
|
98 |
+
def show_inverse_masks(image, masks, scores):
|
99 |
+
mask_images = []
|
100 |
+
for i, (mask, score) in enumerate(zip(masks, scores)):
|
101 |
+
# Apply the inverse mask to the image
|
102 |
+
inverse_masked_image = apply_inverse_mask_to_image(image, mask)
|
103 |
+
|
104 |
+
# Convert the masked image to PIL format for Streamlit
|
105 |
+
pil_image = Image.fromarray(cv2.cvtColor(inverse_masked_image, cv2.COLOR_BGRA2RGBA))
|
106 |
+
mask_images.append((pil_image, score))
|
107 |
+
|
108 |
+
return mask_images
|
109 |
+
|
110 |
+
import streamlit as st
|
111 |
+
import cv2
|
112 |
+
import numpy as np
|
113 |
+
from PIL import Image
|
114 |
+
|
115 |
+
def combine_mask_and_inverse(image, mask):
|
116 |
+
|
117 |
+
# Ensure the image is a NumPy array in BGR format
|
118 |
+
if isinstance(image, Image.Image):
|
119 |
+
image = np.array(image)
|
120 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
|
121 |
+
|
122 |
+
# Apply the mask to get the masked region (in original color)
|
123 |
+
masked_region = cv2.bitwise_and(image, image, mask=mask.astype(np.uint8))
|
124 |
+
|
125 |
+
# Apply the inverse mask to get the inverse-masked region (in original color)
|
126 |
+
inverse_mask = 1 - mask
|
127 |
+
inverse_masked_region = cv2.bitwise_and(image, image, mask=inverse_mask.astype(np.uint8))
|
128 |
+
|
129 |
+
# Combine both masked and inverse-masked regions
|
130 |
+
combined_image = cv2.add(masked_region, inverse_masked_region)
|
131 |
+
|
132 |
+
# Convert to RGBA format for transparency
|
133 |
+
combined_image_rgba = cv2.cvtColor(combined_image, cv2.COLOR_BGR2RGBA)
|
134 |
+
|
135 |
+
return combined_image_rgba
|
136 |
+
|
137 |
+
def show_combined_masks(image, masks, scores):
|
138 |
+
|
139 |
+
mask_images = []
|
140 |
+
for i, (mask, score) in enumerate(zip(masks, scores)):
|
141 |
+
# Combine masked and inverse masked areas
|
142 |
+
combined_image = combine_mask_and_inverse(image, mask)
|
143 |
+
|
144 |
+
# Convert the combined image to PIL format for Streamlit
|
145 |
+
pil_image = Image.fromarray(combined_image)
|
146 |
+
mask_images.append((pil_image, score))
|
147 |
+
|
148 |
+
return mask_images
|
149 |
+
|
150 |
+
|
151 |
+
def pixelate_area(image, mask, pixelation_level):
|
152 |
+
"""
|
153 |
+
Apply pixelation to the masked area of an image.
|
154 |
+
"""
|
155 |
+
pixelated_image = image.copy()
|
156 |
+
h, w, _ = image.shape
|
157 |
+
|
158 |
+
for y in range(0, h, pixelation_level):
|
159 |
+
for x in range(0, w, pixelation_level):
|
160 |
+
block = (slice(y, min(y + pixelation_level, h)), slice(x, min(x + pixelation_level, w)))
|
161 |
+
if np.any(mask[block]):
|
162 |
+
mean_color = image[block].mean(axis=(0, 1)).astype(int)
|
163 |
+
pixelated_image[block] = mean_color
|
164 |
+
|
165 |
+
return pixelated_image
|
166 |
+
|
167 |
+
def combine_pixelated_mask(image, mask, pixelation_level=10):
|
168 |
+
"""
|
169 |
+
Combine the pixelated masked areas with the original image.
|
170 |
+
"""
|
171 |
+
image_np = np.array(image)
|
172 |
+
mask_np = np.array(mask)
|
173 |
+
|
174 |
+
pixelated_mask = pixelate_area(image_np, mask_np, pixelation_level)
|
175 |
+
combined_image = Image.fromarray(pixelated_mask)
|
176 |
+
return combined_image
|
177 |
+
|
178 |
+
|
179 |
+
def change_hue(image, mask, hue_shift):
|
180 |
+
|
181 |
+
# Convert the image from RGB to HSV
|
182 |
+
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
|
183 |
+
hsv_image = cv2.cvtColor(hsv_image, cv2.COLOR_RGB2HSV)
|
184 |
+
|
185 |
+
# Apply the hue shift to the masked area
|
186 |
+
hsv_image[..., 0] = (hsv_image[..., 0] + hue_shift) % 180
|
187 |
+
|
188 |
+
# Convert back to RGB format
|
189 |
+
rgb_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
|
190 |
+
|
191 |
+
# Combine the hue-changed area with the original image using the mask
|
192 |
+
hue_changed_image = np.array(image).copy()
|
193 |
+
hue_changed_image[mask] = np.concatenate((rgb_image[mask], hue_changed_image[mask][..., 3:]), axis=-1)
|
194 |
+
|
195 |
+
return hue_changed_image
|
196 |
+
|
197 |
+
def combine_hue_changed_mask(image, mask, hue_shift):
|
198 |
+
|
199 |
+
image_np = np.array(image)
|
200 |
+
mask_np = np.array(mask).astype(bool)
|
201 |
+
|
202 |
+
hue_changed_area = change_hue(image_np, mask_np, hue_shift)
|
203 |
+
combined_image = Image.fromarray(hue_changed_area)
|
204 |
+
|
205 |
+
return combined_image
|
206 |
+
|
207 |
+
def replace_masked_area(original_image, replacement_image, mask):
|
208 |
+
# Ensure the replacement image is the same size as the original image
|
209 |
+
replacement_image = cv2.resize(replacement_image, (original_image.shape[1], original_image.shape[0]))
|
210 |
+
|
211 |
+
# Create a copy of the original image
|
212 |
+
replaced_image = original_image.copy()
|
213 |
+
|
214 |
+
# Replace the masked area with the corresponding area from the replacement image
|
215 |
+
replaced_image[mask] = replacement_image[mask]
|
216 |
+
|
217 |
+
return replaced_image
|
218 |
+
|
219 |
+
def combine_mask_replaced_image(original_image, replacement_image, mask):
|
220 |
+
|
221 |
+
# Convert images to NumPy arrays
|
222 |
+
original_np = np.array(original_image)
|
223 |
+
replacement_np = np.array(replacement_image)
|
224 |
+
mask_np = np.array(mask).astype(bool)
|
225 |
+
|
226 |
+
# Replace the masked area
|
227 |
+
replaced_area = replace_masked_area(original_np, replacement_np, mask_np)
|
228 |
+
combined_image = Image.fromarray(replaced_area)
|
229 |
+
|
230 |
+
return combined_image
|
231 |
+
|
232 |
+
import streamlit as st
|
233 |
+
from PIL import Image
|
234 |
+
|
235 |
+
def resize_image(image, max_size=1024):
|
236 |
+
# Get the current width and height of the image
|
237 |
+
width, height = image.size
|
238 |
+
|
239 |
+
# Calculate the scaling factor
|
240 |
+
if width > height:
|
241 |
+
scaling_factor = max_size / width
|
242 |
+
else:
|
243 |
+
scaling_factor = max_size / height
|
244 |
+
|
245 |
+
# Only resize if the image is larger than the max_size
|
246 |
+
if scaling_factor < 1:
|
247 |
+
# Calculate new dimensions
|
248 |
+
new_width = int(width * scaling_factor)
|
249 |
+
new_height = int(height * scaling_factor)
|
250 |
+
|
251 |
+
# Resize the image
|
252 |
+
image_resized = image.resize((new_width, new_height))
|
253 |
+
return image_resized
|
254 |
+
else:
|
255 |
+
# Return the original image if it's already within the size limits
|
256 |
+
return image
|
257 |
+
|
258 |
+
|
259 |
+
def combine_mask_and_inverse_gen(original_img, generated_img, mask):
|
260 |
+
# Ensure images are in RGBA mode
|
261 |
+
original_img = original_img.convert("RGBA")
|
262 |
+
generated_img = generated_img.convert("RGBA")
|
263 |
+
|
264 |
+
# Resize the generated image to match the original image size
|
265 |
+
generated_img = generated_img.resize(original_img.size)
|
266 |
+
|
267 |
+
# Convert images to arrays
|
268 |
+
orig_array = np.array(original_img)
|
269 |
+
gen_array = np.array(generated_img)
|
270 |
+
|
271 |
+
# Resize the mask to match the original image size
|
272 |
+
mask = Image.fromarray((mask * 255).astype(np.uint8)) # Convert mask to image for resizing
|
273 |
+
mask = mask.resize(original_img.size, Image.NEAREST) # Resize the mask
|
274 |
+
bool_mask = np.array(mask).astype(bool)
|
275 |
+
|
276 |
+
# Ensure the mask has the correct shape (H, W, 1)
|
277 |
+
if bool_mask.ndim == 2:
|
278 |
+
bool_mask = bool_mask[:, :, np.newaxis]
|
279 |
+
|
280 |
+
# Combine images using the mask
|
281 |
+
combined_array = np.where(bool_mask, gen_array, orig_array)
|
282 |
+
|
283 |
+
# Convert combined array back to image
|
284 |
+
combined_img = Image.fromarray(combined_array, "RGBA")
|
285 |
+
return combined_img
|
images/background.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92ca11934ec6540cf3fb0d5225aff2742683ce986f6269852ed18a751fb76a54
|
3 |
+
size 28245879
|
images/genai shaolin.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:699d86e4be50ca525808198a816a14fdb584bfc3bcaff61afa755c368ed8fb82
|
3 |
+
size 1060558
|
images/image_annote.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6acf56d66dbb2fa3fc2f4f0ba9e4591282f41cf0829c4a81686e89010963a66f
|
3 |
+
size 30740936
|
images/image_aug.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02edb90435a0024388ec09a6c6a28cf7e670e42de0da5792e29592460c4f44dd
|
3 |
+
size 70042465
|
images/pix_output_video (1).mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d68ee725423da70f72c91ee747f566364c1298303e3e0c2d2c863f0a0b4e01a
|
3 |
+
size 2042041
|
images/redhulk.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d98e9dae8d17acfec3baad33cc7f6445309e9eaf270ced2284b93d17eb42666f
|
3 |
+
size 2452133
|
images/with_replacement_output_video.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70884dd88bb7935ff2d492df82a1940487dc8f2bb0194547b7236f43b009faa9
|
3 |
+
size 8324371
|
images/zoe.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e24090d2db21ddd34892666e9b1eb907bd1bf3cfe5516c268b8b522180a6eb16
|
3 |
+
size 2368843
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch>=2.3.1
|
2 |
+
torchvision>=0.18.1
|
3 |
+
numpy>=1.24.4
|
4 |
+
tqdm>=4.66.1
|
5 |
+
hydra-core>=1.3.2
|
6 |
+
iopath>=0.1.10
|
7 |
+
pillow>=9.4.0
|
8 |
+
streamlit-drawable-canvas>=0.9.3
|
9 |
+
opencv-python>=4.10.0.84
|
10 |
+
stability-sdk>=0.8.6
|
sam-2-meta-video-augmentation-with-yolo-and-genai.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.14","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[],"dockerImageVersionId":30762,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# Video Augmentation using META SAM-2 Model with YOLO model and Stability AI","metadata":{}},{"cell_type":"markdown","source":"### Importing Images with Annoted text file for Yolov8n Model Training","metadata":{}},{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### upload your image directory with .txt annoted file in the format required by yolo model for training, with video on which model has to predict.\n\n### incase if wants to use pre_trained YOLO model, jump to section of pretrained model., or incase want to manually put coordinates on a frame jump to section of video segmenting.","metadata":{}},{"cell_type":"markdown","source":"### Installing Required Libraries","metadata":{}},{"cell_type":"code","source":"!pip install ultralytics opencv-python\n!pip install -U ipywidgets","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Yolov8n Model training ","metadata":{}},{"cell_type":"markdown","source":"## Yaml file creation and model training\n","metadata":{}},{"cell_type":"code","source":"from ultralytics import YOLO\nimport cv2\nimport matplotlib.pyplot as plt\n\n# Load YOLOv8 model configuration (e.g., YOLOv8 nano model)\nmodel = YOLO('yolov8n.yaml')\n\n# Create a dataset.yaml file for YOLOv8 training\ndataset_yaml_content = \"\"\"\ntrain: \"/kaggle/input/yolov-train-data/Bottle\"\nval: \"/kaggle/input/yolov-train-data/Bottle\"\nnc: 1 # Number of classes (1 in this case)\nnames: ['bottle']\n\"\"\"\n\n# Save the dataset.yaml file\nwith open('dataset.yaml', 'w') as f:\n f.write(dataset_yaml_content)\n\n \n\n# Train the model with the specified dataset and parameters\nmodel.train(\n data='dataset.yaml', # Path to the dataset.yaml file\n epochs=100, # Increase epochs for better results with small datasets\n imgsz=1024, # Use the resized image dimensions\n batch=1, # Set batch size to 4 due to limited data\n patience=50, # Early stopping if no improvement\n lr0=0.0001, # Start with a lower learning rate\n augment=True, # Enable data augmentation\n# weights='yolov8n.pt' # Start training with pre-trained weights (optional)\n)\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### Note: You may have to enter wandb.ai api if using Kaggle","metadata":{}},{"cell_type":"markdown","source":"## prediction on an Image","metadata":{}},{"cell_type":"code","source":"# Load a test image\nimg = cv2.imread('/kaggle/input/yolov-train-data/Bottle/IMG202408142240012.jpg')\n\n# Predict\nresults = model.predict(img)\n\n# Alternatively, you can use matplotlib to display the results\nplt.imshow(results[0].plot()) # `plot` returns an image with bounding boxes drawn\nplt.axis('off')\nplt.show()","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## Predicting on Video & detecting the First Frame, and its center coordinates","metadata":{}},{"cell_type":"code","source":"# Process the video\nvideo_path = '/kaggle/input/yolov-train-data/VID202408142242002.mp4'\ncap = cv2.VideoCapture(video_path)\n\nx_center=0\ny_center=0\nframe_number = 0\nobject_detected = False\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n frame_number += 1\n\n # Run YOLOv8 detection\n results = model(frame)\n\n for r in results:\n if r.boxes: # Check if any object is detected\n for box in r.boxes:\n # Get the bounding box coordinates\n x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()\n\n # Calculate the center coordinates\n x_center = int((x1 + x2) / 2)\n y_center = int((y1 + y2) / 2)\n \n # Print the first frame number and center coordinates\n print(f\"First detection at frame: {frame_number}\")\n print(f\"Center coordinates: (x={x_center}, y={y_center})\")\n\n object_detected = True\n break\n\n if object_detected:\n break\n\ncap.release()\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"print(\"x_center:\",x_center)\nprint(\"y_center:\",y_center)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Using Yolov8s pretrained model for direct detection and getting the frame","metadata":{}},{"cell_type":"markdown","source":"#### just mention class name and it will return frame no. and coordinates","metadata":{}},{"cell_type":"code","source":"# Load the YOLOv8s model\nmodel = YOLO('yolov8s.pt') # Make sure the model is trained on the \"bottle\" class\n\n# Process the video\nvideo_path = '/kaggle/input/yolov-train-data/VID202408142242002.mp4'\ncap = cv2.VideoCapture(video_path)\n\nx_center = 0\ny_center = 0\nframe_number = 0\nobject_detected = False\nconfidence_threshold = 0.8 # Set the confidence threshold\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n frame_number += 1\n\n # Run YOLOv8 detection\n results = model(frame)\n\n for r in results:\n for box in r.boxes:\n # Get the class label for the detected object\n cls = int(box.cls[0].cpu().numpy())\n class_name = model.names[cls]\n\n # Check if the detected object is a \"bottle\" and has confidence > 0.8\n confidence = box.conf[0].cpu().numpy()\n if class_name == 'bottle' and confidence > confidence_threshold:\n # Get the bounding box coordinates\n x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()\n\n # Calculate the center coordinates\n x_center = int((x1 + x2) / 2)\n y_center = int((y1 + y2) / 2)\n \n # Print the first frame number and center coordinates\n print(f\"First bottle detection at frame: {frame_number}\")\n print(f\"Center coordinates: (x={x_center}, y={y_center}) with confidence {confidence:.2f}\")\n\n object_detected = True\n break # Exit the loop after the first detection\n\n if object_detected:\n break # Exit the main loop after the first detection\n\ncap.release()\n\n# If no bottle was detected with confidence > 0.8\nif not object_detected:\n print(\"No requested Object detected in the video with confidence greater than 0.8.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"print(\"x_center:\",x_center)\nprint(\"y_center:\",y_center)\nprint(\"Frame No.:\",frame_number)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"#### clearing GPU cache","metadata":{}},{"cell_type":"code","source":"import torch\ntorch.cuda.empty_cache()\nprint(\"Done\")","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Video segmenting","metadata":{}},{"cell_type":"markdown","source":"### importing SAM-2 model (may take a while to download)","metadata":{}},{"cell_type":"code","source":"!git clone https://github.com/facebookresearch/segment-anything-2.git\n%cd /kaggle/working/segment-anything-2\n%pip install -e .\n%cd /kaggle/working/segment-anything-2/checkpoints\n!bash /kaggle/working/segment-anything-2/checkpoints/download_ckpts.sh\n%cd /kaggle/working/segment-anything-2","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nfrom PIL import Image","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# use bfloat16 for the entire notebook\ntorch.autocast(device_type=\"cuda\", dtype=torch.float16).__enter__()\n\nif torch.cuda.get_device_properties(0).major >= 8:\n # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)\n torch.backends.cuda.matmul.allow_tf32 = True\n torch.backends.cudnn.allow_tf32 = True","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## video to frames","metadata":{}},{"cell_type":"code","source":"import cv2\nimport os\nimport shutil\n\ndef video_to_frames(video_path, output_folder):\n # Ensure the output folder is clean\n if os.path.exists(output_folder):\n shutil.rmtree(output_folder)\n os.makedirs(output_folder)\n \n # Open the video file\n video_capture = cv2.VideoCapture(video_path)\n \n frame_count = 0\n success = True\n\n while success:\n success, frame = video_capture.read()\n if success:\n # Save the frame with a consistent naming convention\n frame_filename = os.path.join(output_folder, f\"{frame_count:05d}.jpg\")\n cv2.imwrite(frame_filename, frame)\n frame_count += 1\n\n video_capture.release()\n print(f\"Extracted {frame_count} frames to {output_folder}\")\n return frame_count\n\n# Example usage\nvideo_path = \"/kaggle/input/shaolin-soccer/Untitled video - Made with Clipchamp.mp4\"\noutput_folder = \"/kaggle/working/output_frames\"\ntotal_frames = video_to_frames(video_path, output_folder)\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## reordering Frames to video propagation\n","metadata":{}},{"cell_type":"code","source":"frame_number =0 ","metadata":{"execution":{"iopub.status.busy":"2024-08-23T05:45:01.624801Z","iopub.execute_input":"2024-08-23T05:45:01.625582Z","iopub.status.idle":"2024-08-23T05:45:01.636025Z","shell.execute_reply.started":"2024-08-23T05:45:01.625533Z","shell.execute_reply":"2024-08-23T05:45:01.634951Z"},"trusted":true},"execution_count":1,"outputs":[]},{"cell_type":"markdown","source":"### (replace it with **frame_number** if using YOLO model)\n\n#### frame_number = frame_number","metadata":{}},{"cell_type":"code","source":"import os\nimport shutil\n\ndef reorder_frames(video_dir, ann_frame_idx, output_dir):\n # Ensure the output directory is clean\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n \n # Get and sort the list of frame filenames\n frame_names = [\n p for p in os.listdir(video_dir)\n if os.path.splitext(p)[-1] in [\".jpg\", \".jpeg\", \".JPG\", \".JPEG\"]\n ]\n frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))\n \n total_frames = len(frame_names)\n \n # Copy and reorder the frames to the new directory\n for i in range(total_frames):\n if i >= ann_frame_idx:\n new_idx = i - ann_frame_idx\n else:\n new_idx = total_frames - ann_frame_idx + i\n old_path = os.path.join(video_dir, frame_names[i])\n new_path = os.path.join(output_dir, f\"{new_idx:05d}.jpg\")\n shutil.copy2(old_path, new_path)\n \n print(f\"Frames reordered and copied to {output_dir} successfully.\")\n return len(os.listdir(output_dir))\n\n# Example usage\nreordered_dir = \"/kaggle/working/reordered_frames\"\nann_frame_idx = frame_number # Frame index to start as 0\nreordered_count = reorder_frames(output_folder, ann_frame_idx, reordered_dir)\n\n# Verify total frame consistency\nif total_frames == reordered_count:\n print(\"Frame count matches after reordering.\")\nelse:\n print(f\"Frame count mismatch! Extracted: {total_frames}, Reordered: {reordered_count}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## Importing Model and creating predictor","metadata":{}},{"cell_type":"code","source":"from sam2.build_sam import build_sam2_video_predictor\n\nsam2_checkpoint = \"/kaggle/working/segment-anything-2/checkpoints/sam2_hiera_base_plus.pt\"\nmodel_cfg = \"sam2_hiera_b+.yaml\"\n\npredictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## checking image where object is detected","metadata":{}},{"cell_type":"code","source":"frame_no = frame_number\n\ndef show_mask(mask, ax, obj_id=None, random_color=False):\n if random_color:\n color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n else:\n cmap = plt.get_cmap(\"tab10\")\n cmap_idx = 0 if obj_id is None else obj_id\n color = np.array([*cmap(cmap_idx)[:3], 0.6])\n h, w = mask.shape[-2:]\n mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n ax.imshow(mask_image)\n\n\ndef show_points(coords, labels, ax, marker_size=200):\n pos_points = coords[labels==1]\n neg_points = coords[labels==0]\n ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n \n# `video_dir` a directory of JPEG frames with filenames like `<frame_index>.jpg`\nvideo_dir = \"/kaggle/working/reordered_frames\"\n\n# scan all the JPEG frame names in this directory\nframe_names = [\n p for p in os.listdir(video_dir)\n if os.path.splitext(p)[-1] in [\".jpg\", \".jpeg\", \".JPG\", \".JPEG\"]\n]\nframe_names.sort(key=lambda p: int(os.path.splitext(p)[0]))\n\n# take a look the first video frame\nframe_idx = frame_no\nplt.figure(figsize=(12, 8))\nplt.title(f\"frame {frame_idx}\")\nplt.imshow(Image.open(os.path.join(video_dir, frame_names[frame_idx])))","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"inference_state = predictor.init_state(video_path=video_dir)\npredictor.reset_state(inference_state)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### Masking the image object where object is detected in frame with coordinates","metadata":{}},{"cell_type":"code","source":"x_center= 1050\ny_center = 650","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### in case using Yolo model replace,\n\n### x_center =x_center\n### y_center =y_center","metadata":{}},{"cell_type":"code","source":"ann_frame_idx = 0 # the frame index we interact with\nann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)\nx = x_center\ny = y_center\n\npoints = np.array([[x,y]], dtype=np.float32)\nlabels = np.array([1], np.int32)\n_, out_obj_ids, out_mask_logits = predictor.add_new_points(\n inference_state=inference_state,\n frame_idx=ann_frame_idx,\n obj_id=ann_obj_id,\n points=points,\n labels=labels,\n)\n\nplt.figure(figsize=(12, 8))\nplt.title(f\"frame {ann_frame_idx}\")\nplt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx])))\nshow_points(points, labels, plt.gca())\nshow_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0])","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### Note: provide additional points if object not detected properly\n\n### in the format\n#### points = np.array([[x,y],[x1,y1],[x2,y2]], dtype=np.float32)\n#### labels = np.array([1,1,1], np.int32)\n\n#### in labels 1 indicate inclusive and 0 excluding point","metadata":{}},{"cell_type":"code","source":"def count_files_in_folder(folder_path):\n \"\"\"\n Count the number of files in a given folder.\n \n Args:\n - folder_path (str): Path to the folder.\n \n Returns:\n - int: Number of files in the folder.\n \"\"\"\n return len([f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))])\n\n# Example usage\nfolder_path = \"/kaggle/working/reordered_frames\" # Replace with your actual folder path\nnum_files = count_files_in_folder(folder_path)\nprint(f\"Number of files in the folder: {num_files}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## Mask generation\n### Propagating into Video with reordered Frames","metadata":{}},{"cell_type":"markdown","source":"### if Addition points are provided also change them in below code","metadata":{}},{"cell_type":"code","source":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport shutil # Importing shutil to remove directories\n\ndef apply_mask_to_image(frame, mask):\n \"\"\"\n Apply a mask to an image frame, setting non-mask areas to zero.\n \"\"\"\n h, w, _ = frame.shape\n mask_resized = np.resize(mask, (h, w)) # Resize mask to match frame dimensions\n mask_3d = np.repeat(mask_resized[:, :, np.newaxis], 3, axis=2) # Expand mask dimensions for RGB channels\n masked_frame = frame * mask_3d # Apply the mask to the frame\n return masked_frame\n\ndef show_mask(mask, ax, obj_id=None, random_color=False):\n if random_color:\n color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n else:\n cmap = plt.get_cmap(\"tab10\")\n cmap_idx = 0 if obj_id is None else obj_id\n color = np.array([*cmap(cmap_idx)[:3], 0.6])\n h, w = mask.shape[-2:]\n mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n ax.imshow(mask_image)\n\ndef show_points(coords, labels, ax, marker_size=200):\n pos_points = coords[labels == 1]\n neg_points = coords[labels == 0]\n ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n\n# `video_dir` a directory of JPEG frames with filenames like `<frame_index>.jpg`\nvideo_dir = \"/kaggle/working/reordered_frames\"\n\n# Scan all the JPEG frame names in this directory\nframe_names = [\n p for p in os.listdir(video_dir)\n if os.path.splitext(p)[-1] in [\".jpg\", \".jpeg\", \".JPG\", \".JPEG\"]\n]\nframe_names.sort(key=lambda p: int(os.path.splitext(p)[0]))\n\n# Initialize predictor and inference state\ninference_state = predictor.init_state(video_path=video_dir)\n\n# Reset the predictor state\npredictor.reset_state(inference_state)\n\n# Frame and object IDs\nann_frame_idx = 0 # frames are reordered\nann_obj_id = 1 # Give a unique ID to each object we interact with (can be any integer)\n\n# Add a 2nd positive click at (x, y) = (250, 220) to refine the mask\npoints = np.array([[x,y]], dtype=np.float32)\nlabels = np.array([1], np.int32) # 1 means positive click, 0 means negative click\n_, out_obj_ids, out_mask_logits = predictor.add_new_points(\n inference_state=inference_state,\n frame_idx=ann_frame_idx,\n obj_id=ann_obj_id,\n points=points,\n labels=labels,\n)\n\n# Run propagation throughout the video and collect the results in a dict\nvideo_segments = {} # video_segments contains the per-frame segmentation results\nfor out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):\n video_segments[out_frame_idx] = {\n out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()\n for i, out_obj_id in enumerate(out_obj_ids)\n }\n\n# Create an output directory for images\noutput_dir = '/kaggle/working/mask_segmentation_images'\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\nelse:\n # If the directory exists, clear its kaggle/workings\n for filename in os.listdir(output_dir):\n file_path = os.path.join(output_dir, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(f\"Failed to delete {file_path}. Reason: {e}\")\n\n# Render and save masked images every few frames\nvis_frame_stride = 1\nplt.close(\"all\")\nfor out_frame_idx in range(0, len(frame_names), vis_frame_stride):\n frame = np.array(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))\n masked_frame = frame.copy() # Create a copy of the frame for modification\n for out_obj_id, out_mask in video_segments[out_frame_idx].items():\n masked_frame = apply_mask_to_image(masked_frame, out_mask)\n\n # Convert masked frame to Image object for saving\n masked_image = Image.fromarray(masked_frame.astype('uint8'))\n masked_image.save(os.path.join(output_dir, f'frame_{out_frame_idx}.png'))\n\n # Optional: Display the masked frame\n# plt.figure(figsize=(6, 4))\n# plt.title(f\"frame {out_frame_idx}\")\n# plt.imshow(masked_frame)\n# plt.show()\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### we can also display the masked frame(s) by un-commenting the last 4 rows","metadata":{}},{"cell_type":"markdown","source":"## restore Original order of the video frames\n\n### this will restore the original order of the frames","metadata":{}},{"cell_type":"code","source":"import os\nimport shutil\n\ndef restore_original_order(video_dir, ann_frame_idx, output_dir):\n \"\"\"\n Restore the original order of frames from a directory and save them into a new directory.\n \n Args:\n - video_dir (str): Directory containing the reordered frames.\n - ann_frame_idx (int): The frame index used to start the reordering.\n - output_dir (str): Directory to save the restored frames.\n \"\"\"\n # Ensure the output directory is clean\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n \n # Get a list of all frame filenames in the original directory\n frame_names = [\n p for p in os.listdir(video_dir)\n if p.endswith(\".png\") and p.startswith(\"frame_\")\n ]\n \n # Ensure frames are sorted numerically by extracting the number from the filename\n frame_names.sort(key=lambda p: int(p.split('_')[-1].split('.')[0]))\n\n # Calculate total number of frames\n total_frames = len(frame_names)\n\n # Calculate the original frame indices\n original_indices = {}\n for i in range(total_frames):\n if i < (total_frames - ann_frame_idx):\n original_idx = i + ann_frame_idx\n else:\n original_idx = i - (total_frames - ann_frame_idx)\n original_indices[frame_names[i]] = f\"frame_{original_idx:03d}.png\"\n \n # Copy and rename the files into the new directory\n for old_name, new_name in original_indices.items():\n old_path = os.path.join(video_dir, old_name)\n new_path = os.path.join(output_dir, new_name)\n shutil.copy2(old_path, new_path)\n \n print(f\"Frames restored to original order and saved to {output_dir} successfully.\")\n\n# Example usage\nvideo_dir = \"/kaggle/working/mask_segmentation_images\" # Replace with your original frames directory\nann_frame_idx = 0 # The frame index used to start the reordering\noutput_dir = \"/kaggle/working/restored_frames\" # Replace with your desired output folder path\nrestore_original_order(video_dir, ann_frame_idx, output_dir)\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## converting mask Frames back to video","metadata":{}},{"cell_type":"code","source":"import cv2\nimport os\n\ndef frames_to_video(frames_folder, output_video_path, fps=30):\n # Check if the output video file already exists and delete it\n if os.path.exists(output_video_path):\n try:\n os.remove(output_video_path)\n print(f\"Existing file {output_video_path} removed.\")\n except Exception as e:\n print(f\"Failed to remove {output_video_path}. Reason: {e}\")\n return\n\n # Get a list of frame files and sort them by name\n frame_files = [f for f in os.listdir(frames_folder) if f.endswith('.png')]\n frame_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0])) # Sort by frame number\n\n # Check if there are any frames to process\n if not frame_files:\n print(\"No frames found in the specified folder.\")\n return\n\n # Read the first frame to get the dimensions\n first_frame_path = os.path.join(frames_folder, frame_files[0])\n first_frame = cv2.imread(first_frame_path)\n if first_frame is None:\n print(f\"Failed to read the first frame at {first_frame_path}\")\n return\n height, width, _ = first_frame.shape\n\n # Initialize the video writer\n fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for mp4 format\n video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))\n\n # Write each frame to the video\n for frame_file in frame_files:\n frame_path = os.path.join(frames_folder, frame_file)\n frame = cv2.imread(frame_path)\n if frame is None:\n print(f\"Failed to read frame at {frame_path}\")\n continue\n video_writer.write(frame)\n\n # Release the video writer\n video_writer.release()\n print(f\"Video saved to {output_video_path}\")\n\n# Example usage\nframes_folder = r'/kaggle/working/restored_frames' # Replace with the folder containing your frames\noutput_video_path = r\"/kaggle/working/mask_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## Inverse Mask Generation","metadata":{}},{"cell_type":"markdown","source":"### similarly in case of additional points make changes here also","metadata":{}},{"cell_type":"code","source":"def clear_output_directory(directory):\n \"\"\"\n Remove all files in the given directory.\n \"\"\"\n if os.path.exists(directory):\n for file in os.listdir(directory):\n file_path = os.path.join(directory, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(f\"Failed to delete {file_path}. Reason: {e}\")\n\ndef apply_inverse_mask_to_image(frame, mask):\n \"\"\"\n Apply the inverse of a mask to an image frame, setting mask areas to zero.\n \"\"\"\n h, w, _ = frame.shape\n mask_resized = np.resize(mask, (h, w)) # Resize mask to match frame dimensions\n inverse_mask = 1 - mask_resized # Invert the mask\n mask_3d = np.repeat(inverse_mask[:, :, np.newaxis], 3, axis=2) # Expand mask dimensions for RGB channels\n masked_frame = frame * mask_3d # Apply the inverse mask to the frame\n return masked_frame\n\ndef save_masked_image(masked_frame, out_frame_idx, output_dir):\n \"\"\"\n Save the masked image to the output directory.\n \"\"\"\n # Convert masked frame to Image object for saving\n masked_image = Image.fromarray(masked_frame.astype('uint8'))\n masked_image.save(os.path.join(output_dir, f'frame_{out_frame_idx}.png'))\n\ndef show_mask(mask, ax, obj_id=None, random_color=False):\n if random_color:\n color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n else:\n cmap = plt.get_cmap(\"tab10\")\n cmap_idx = 0 if obj_id is None else obj_id\n color = np.array([*cmap(cmap_idx)[:3], 0.6])\n h, w = mask.shape[-2:]\n mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n ax.imshow(mask_image)\n\ndef show_points(coords, labels, ax, marker_size=200):\n pos_points = coords[labels == 1]\n neg_points = coords[labels == 0]\n ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n\n# `video_dir` a directory of JPEG frames with filenames like `<frame_index>.jpg`\nvideo_dir = \"/kaggle/working/reordered_frames\"\n\n# Scan all the JPEG frame names in this directory\nframe_names = [\n p for p in os.listdir(video_dir)\n if os.path.splitext(p)[-1] in [\".jpg\", \".jpeg\", \".JPG\", \".JPEG\"]\n]\nframe_names.sort(key=lambda p: int(os.path.splitext(p)[0]))\n\n# Initialize predictor and inference state\ninference_state = predictor.init_state(video_path=video_dir)\n\n# Reset the predictor state\npredictor.reset_state(inference_state)\n\n# Frame and object IDs\nann_frame_idx = 0 # The frame index we interact with\nann_obj_id = 1 # Give a unique ID to each object we interact with (can be any integer)\n\n# Add a 2nd positive click at (x, y) = (250, 220) to refine the mask\npoints = np.array([[x,y]], dtype=np.float32)\nlabels = np.array([1], np.int32) # 1 means positive click, 0 means negative click\n_, out_obj_ids, out_mask_logits = predictor.add_new_points(\n inference_state=inference_state,\n frame_idx=ann_frame_idx,\n obj_id=ann_obj_id,\n points=points,\n labels=labels,\n)\n\n# Run propagation throughout the video and collect the results in a dict\nvideo_segments = {} # video_segments contains the per-frame segmentation results\nfor out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):\n video_segments[out_frame_idx] = {\n out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()\n for i, out_obj_id in enumerate(out_obj_ids)\n }\n\n# Create an output directory for images\noutput_dir = '/kaggle/working/inverse_segmentation_images'\nos.makedirs(output_dir, exist_ok=True)\n\n# Clear the output directory\nclear_output_directory(output_dir)\n\n# Render and save inverse masked images every few frames\nvis_frame_stride = 1\nplt.close(\"all\")\nfor out_frame_idx in range(0, len(frame_names), vis_frame_stride):\n frame = np.array(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))\n masked_frame = frame.copy() # Create a copy of the frame for modification\n for out_obj_id, out_mask in video_segments[out_frame_idx].items():\n masked_frame = apply_inverse_mask_to_image(masked_frame, out_mask)\n\n # Save the inverse masked frame\n save_masked_image(masked_frame, out_frame_idx, output_dir)\n\n # Optional: Display the inverse masked frame\n # plt.figure(figsize=(6, 4))\n # plt.title(f\"frame {out_frame_idx}\")\n # plt.imshow(masked_frame)\n # plt.show()\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## restoring to original frames of inverse mask","metadata":{}},{"cell_type":"code","source":"video_dir = \"/kaggle/working/inverse_segmentation_images\" # Replace with your original frames directory\nann_frame_idx = 0 # The frame index used to start the reordering\noutput_dir = \"/kaggle/working/inverse_restored_frames\" # Replace with your desired output folder path\nrestore_original_order(video_dir, ann_frame_idx, output_dir)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## converting inverse mask frames to video","metadata":{}},{"cell_type":"code","source":"frames_folder = r'/kaggle/working/inverse_restored_frames' # Replace with the folder containing your frames\noutput_video_path = r\"/kaggle/working/inverse_mask_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Video mask Pixelation","metadata":{}},{"cell_type":"code","source":"def pixelate_area(image, mask, pixelation_level):\n \"\"\"\n Apply pixelation to the masked area of an image.\n\n Parameters:\n - image: NumPy array of the image to be pixelated.\n - mask: Boolean NumPy array indicating the masked area.\n - pixelation_level: Int, the size of the blocks used for pixelation.\n \"\"\"\n # Create a copy of the image to modify\n pixelated_image = image.copy()\n\n # Get image dimensions\n h, w, _ = image.shape\n\n # Loop through the masked area and apply pixelation\n for y in range(0, h, pixelation_level):\n for x in range(0, w, pixelation_level):\n # Define the block area\n block = (slice(y, min(y + pixelation_level, h)), slice(x, min(x + pixelation_level, w)))\n\n # Check if the block is within the masked area\n if np.any(mask[block]):\n # Compute the mean color of the block\n mean_color = image[block].mean(axis=(0, 1)).astype(int)\n\n # Apply the mean color to the block\n pixelated_image[block] = mean_color\n\n return pixelated_image\n\ndef combine_pixelated_mask(masked_image_path, inverse_masked_image_path, save_path, pixelation_level=10):\n \"\"\"\n Combine the pixelated masked areas from the masked image with the inverse-masked image.\n\n Parameters:\n - masked_image_path: String, path to the masked image.\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - save_path: String, path where the combined image will be saved.\n - pixelation_level: Int, the size of the blocks used for pixelation.\n \"\"\"\n # Open images\n masked_image = Image.open(masked_image_path).convert(\"RGBA\")\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Ensure images are the same size by resizing the inverse image\n if masked_image.size != inverse_masked_image.size:\n inverse_masked_image = inverse_masked_image.resize(masked_image.size)\n\n # Convert images to numpy arrays\n masked_array = np.array(masked_image)\n inverse_masked_array = np.array(inverse_masked_image)\n\n # Create a mask where the original mask was applied (non-zero areas in any color channel)\n mask = np.any(masked_array[..., :3] > 0, axis=-1)\n\n # Pixelate the masked area\n pixelated_mask = pixelate_area(masked_array, mask, pixelation_level)\n\n # Replace inverse-masked image values with pixelated masked image values where mask is true\n combined_array = inverse_masked_array.copy()\n combined_array[mask] = pixelated_mask[mask]\n\n # Convert back to image\n combined_image = Image.fromarray(combined_array)\n\n # Save the combined image\n combined_image.save(save_path)\n print(f\"Combined image saved as {save_path}\")\n\n# # Display the combined image\n# plt.imshow(combined_image)\n# plt.axis('off')\n# plt.show()\n\n# Directory paths\nmasked_images_dir = \"/kaggle/working/restored_frames\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/pixelated_combined_images\"\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Get and sort the list of image files\nimage_files = [f for f in os.listdir(masked_images_dir) if f.startswith(\"frame_\") and f.endswith(\".png\")]\nimage_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0]))\n\n# Iterate over the sorted files\nfor image_name in image_files:\n masked_image_path = os.path.join(masked_images_dir, image_name)\n inverse_image_path = os.path.join(inverse_images_dir, image_name)\n save_path = os.path.join(output_dir, f\"pixelated_combined_{image_name}\")\n\n # Check if the corresponding inverse image exists before combining\n if os.path.exists(inverse_image_path):\n combine_pixelated_mask(masked_image_path, inverse_image_path, save_path, pixelation_level=20)\n else:\n print(f\"Warning: Missing inverse file for {image_name}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## converting frames of pixels to video","metadata":{}},{"cell_type":"code","source":"def frames_to_video(frames_folder, output_video_path, fps=30):\n # Get a list of frame files and sort them by name\n frame_files = [f for f in os.listdir(frames_folder) if f.endswith('.png')]\n\n # Sort by frame number, assuming the filename format is \"frame_<number>.png\"\n frame_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0]))\n\n if not frame_files:\n print(\"No frame files found in the specified directory.\")\n return\n\n # Read the first frame to get the dimensions\n first_frame_path = os.path.join(frames_folder, frame_files[0])\n first_frame = cv2.imread(first_frame_path)\n if first_frame is None:\n print(f\"Error reading the first frame: {first_frame_path}\")\n return\n\n height, width, _ = first_frame.shape\n\n # Initialize the video writer\n fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for mp4 format\n video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))\n\n # Write each frame to the video\n for frame_file in frame_files:\n frame_path = os.path.join(frames_folder, frame_file)\n frame = cv2.imread(frame_path)\n if frame is not None:\n video_writer.write(frame)\n else:\n print(f\"Error reading frame: {frame_path}\")\n\n # Release the video writer\n video_writer.release()\n print(f\"Video saved to {output_video_path}\")\n\n# Example usage\nframes_folder = '/kaggle/working/pixelated_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/pixelated_combined_images_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## side by side video of original with pixelated video.","metadata":{}},{"cell_type":"code","source":"from PIL import Image\nimport os\nimport subprocess\nimport shutil\n\n# Directories for the input frames and output combined frames (switched)\ndir1 = '/kaggle/working/output_frames' # Formerly dir2https://accounts.google.com/b/0/AddMailService\ndir2 = '/kaggle/working/pixelated_combined_images' # Formerly dir1\noutput_dir = '/kaggle/working/combined_frames_pix'\nvideo_output = '/kaggle/working/pix_output_video.mp4'\n\n# Ensure the output directory exists and is empty\nif os.path.exists(output_dir):\n shutil.rmtree(output_dir) # Remove the directory and its contents\nos.makedirs(output_dir) # Recreate the empty directory\n\n# Remove the previous video if it exists\nif os.path.exists(video_output):\n os.remove(video_output)\n\n# Get sorted lists of the frames\nframes1 = sorted([f for f in os.listdir(dir1) if f.endswith('.jpg')])\nframes2 = sorted([f for f in os.listdir(dir2) if f.endswith('.png')])\n\n# Iterate over both directories and combine images\nfor idx, (f1, f2) in enumerate(zip(frames1, frames2), start=1):\n img1 = Image.open(os.path.join(dir1, f1))\n img2 = Image.open(os.path.join(dir2, f2))\n \n # Assuming both images have the same height, concatenate side by side\n combined_img = Image.new('RGB', (img1.width + img2.width, img1.height))\n combined_img.paste(img1, (0, 0))\n combined_img.paste(img2, (img1.width, 0))\n \n # Save combined image with a sequential name like combined_frame_001.png\n combined_img.save(os.path.join(output_dir, f\"combined_frame_{idx:03d}.png\"))\n\nprint(f\"Frames combined and saved in {output_dir}\")\n\n# List the files in the output directory to verify they exist\nprint(\"Files in output directory:\", os.listdir(output_dir))\n\n# Convert the combined frames into a video using ffmpeg\nsubprocess.run([\n 'ffmpeg', '-framerate', '30', '-i', \n f'{output_dir}/combined_frame_%03d.png', '-c:v', \n 'libx264', '-pix_fmt', 'yuv420p', video_output\n])\n\nprint(f\"Video saved as {video_output}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Masked area Hue change in video","metadata":{}},{"cell_type":"code","source":"import matplotlib.colors as mcolors\n\ndef change_hue(image, mask, hue_shift):\n \"\"\"\n Change the hue of the masked area in an image.\n\n Parameters:\n - image: NumPy array of the image to be modified (in RGB).\n - mask: Boolean NumPy array indicating the masked area.\n - hue_shift: Float, amount to shift the hue (0 to 1 for a complete cycle).\n \"\"\"\n # Convert the image to float in the range [0, 1]\n float_image = image.astype('float32') / 255.0\n\n # Convert to HSV\n hsv_image = mcolors.rgb_to_hsv(float_image)\n\n # Change the hue in the masked area\n hsv_image[..., 0][mask] = (hsv_image[..., 0][mask] + hue_shift) % 1.0\n\n # Convert back to RGB\n modified_float_image = mcolors.hsv_to_rgb(hsv_image)\n\n # Scale back to [0, 255]\n modified_image = (modified_float_image * 255).astype('uint8')\n\n return modified_image\n\ndef combine_hue_modified_mask(masked_image_path, inverse_masked_image_path, save_path, hue_shift=0.1):\n \"\"\"\n Combine the hue-modified masked areas from the masked image with the inverse-masked image.\n\n Parameters:\n - masked_image_path: String, path to the masked image.\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - save_path: String, path where the combined image will be saved.\n - hue_shift: Float, amount to shift the hue (0 to 1 for a complete cycle).\n \"\"\"\n # Open images\n masked_image = Image.open(masked_image_path).convert(\"RGBA\")\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Ensure images are the same size by resizing the inverse image\n if masked_image.size != inverse_masked_image.size:\n inverse_masked_image = inverse_masked_image.resize(masked_image.size)\n\n # Convert images to numpy arrays\n masked_array = np.array(masked_image)\n inverse_masked_array = np.array(inverse_masked_image)\n\n # Create a mask where the original mask was applied (non-zero areas in any color channel)\n mask = np.any(masked_array[..., :3] > 0, axis=-1)\n\n # Change the hue of the masked area\n hue_modified_mask = change_hue(masked_array[..., :3], mask, hue_shift)\n\n # Replace inverse-masked image values with hue-modified masked image values where mask is true\n combined_array = inverse_masked_array.copy()\n combined_array[mask] = np.dstack((hue_modified_mask, masked_array[..., 3]))[mask] # Preserve alpha channel\n\n # Convert back to image\n combined_image = Image.fromarray(combined_array)\n\n # Save the combined image\n combined_image.save(save_path)\n print(f\"Combined image saved as {save_path}\")\n\n# # Display the combined image\n# plt.imshow(combined_image)\n# plt.axis('off')\n# plt.show()\n\n# Directory paths\nmasked_images_dir = \"/kaggle/working/restored_frames\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/hue_combined_images\"\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Get and sort the list of image files\nimage_files = [f for f in os.listdir(masked_images_dir) if f.startswith(\"frame_\") and f.endswith(\".png\")]\nimage_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0]))\n\n# Iterate over the sorted files\nfor image_name in image_files:\n masked_image_path = os.path.join(masked_images_dir, image_name)\n inverse_image_path = os.path.join(inverse_images_dir, image_name)\n save_path = os.path.join(output_dir, f\"hue_modified_combined_{image_name}\")\n\n # Check if the corresponding inverse image exists before combining\n if os.path.exists(inverse_image_path):\n combine_hue_modified_mask(masked_image_path, inverse_image_path, save_path, hue_shift=0.25)\n else:\n print(f\"Warning: Missing inverse file for {image_name}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## converting back hue change to video","metadata":{}},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/hue_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/hue_combined_images_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## side by side video of original with Hue video.","metadata":{}},{"cell_type":"code","source":"from PIL import Image\nimport os\nimport subprocess\nimport shutil\n\n# Directories for the input frames and output combined frames (switched)\ndir1 = '/kaggle/working/output_frames' # Formerly dir2https://accounts.google.com/b/0/AddMailService\ndir2 = '/kaggle/working/hue_combined_images' # Formerly dir1\noutput_dir = '/kaggle/working/hue_with_og_combined_frames'\nvideo_output = '/kaggle/working/hue_with_og_output_video.mp4'\n\n# Ensure the output directory exists and is empty\nif os.path.exists(output_dir):\n shutil.rmtree(output_dir) # Remove the directory and its contents\nos.makedirs(output_dir) # Recreate the empty directory\n\n# Remove the previous video if it exists\nif os.path.exists(video_output):\n os.remove(video_output)\n\n# Get sorted lists of the frames\nframes1 = sorted([f for f in os.listdir(dir1) if f.endswith('.jpg')])\nframes2 = sorted([f for f in os.listdir(dir2) if f.endswith('.png')])\n\n# Iterate over both directories and combine images\nfor idx, (f1, f2) in enumerate(zip(frames1, frames2), start=1):\n img1 = Image.open(os.path.join(dir1, f1))\n img2 = Image.open(os.path.join(dir2, f2))\n \n # Assuming both images have the same height, concatenate side by side\n combined_img = Image.new('RGB', (img1.width + img2.width, img1.height))\n combined_img.paste(img1, (0, 0))\n combined_img.paste(img2, (img1.width, 0))\n \n # Save combined image with a sequential name like combined_frame_001.png\n combined_img.save(os.path.join(output_dir, f\"combined_frame_{idx:03d}.png\"))\n\nprint(f\"Frames combined and saved in {output_dir}\")\n\n# List the files in the output directory to verify they exist\nprint(\"Files in output directory:\", os.listdir(output_dir))\n\n# Convert the combined frames into a video using ffmpeg\nsubprocess.run([\n 'ffmpeg', '-framerate', '30', '-i', \n f'{output_dir}/combined_frame_%03d.png', '-c:v', \n 'libx264', '-pix_fmt', 'yuv420p', video_output\n])\n\nprint(f\"Video saved as {video_output}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Mask replacement with another video","metadata":{}},{"cell_type":"markdown","source":"### replacement video Link required","metadata":{}},{"cell_type":"code","source":"import os\nimport numpy as np\nfrom PIL import Image\nimport cv2\n\ndef replace_area_with_frames(image, mask, replacement_frames, frame_idx):\n \"\"\"\n Replace the masked area of an image with a different video frame.\n\n Parameters:\n - image: NumPy array of the image to modify.\n - mask: Boolean NumPy array indicating the masked area.\n - replacement_frames: List of NumPy arrays, each representing a video frame to use as a replacement.\n - frame_idx: Int, the index of the current frame in the replacement sequence.\n \"\"\"\n # Create a copy of the image to modify\n modified_image = image.copy()\n\n # Get the replacement frame, use the last one if index exceeds available frames\n replacement_frame = replacement_frames[min(frame_idx, len(replacement_frames) - 1)]\n\n # Resize the replacement frame to match the image size\n replacement_frame_resized = cv2.resize(replacement_frame, (image.shape[1], image.shape[0]))\n\n # Replace the masked area with the replacement frame\n modified_image[mask] = replacement_frame_resized[mask]\n\n return modified_image\n\ndef combine_mask_with_frames(masked_image_path, inverse_masked_image_path, replacement_frames, save_path, frame_idx):\n \"\"\"\n Combine the masked areas from the masked image with the inverse-masked image, using video frames to fill the masked area.\n\n Parameters:\n - masked_image_path: String, path to the masked image.\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - replacement_frames: List of NumPy arrays, each representing a video frame to use as a replacement.\n - save_path: String, path where the combined image will be saved.\n - frame_idx: Int, the index of the current frame in the replacement sequence.\n \"\"\"\n # Open images\n masked_image = Image.open(masked_image_path).convert(\"RGBA\")\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Ensure images are the same size by resizing the inverse image\n if masked_image.size != inverse_masked_image.size:\n inverse_masked_image = inverse_masked_image.resize(masked_image.size)\n\n # Convert images to numpy arrays\n masked_array = np.array(masked_image)\n inverse_masked_array = np.array(inverse_masked_image)\n\n # Create a mask where the original mask was applied (non-zero areas in any color channel)\n mask = np.any(masked_array[..., :3] > 0, axis=-1)\n\n # Replace the masked area with frames from the video\n replaced_area = replace_area_with_frames(masked_array, mask, replacement_frames, frame_idx)\n\n # Replace inverse-masked image values with the replaced area image values where mask is true\n combined_array = inverse_masked_array.copy()\n combined_array[mask] = replaced_area[mask]\n\n # Convert back to image\n combined_image = Image.fromarray(combined_array)\n\n # Save the combined image\n combined_image.save(save_path)\n print(f\"Combined image saved as {save_path}\")\n\n# Directory paths\nmasked_images_dir = \"/kaggle/working/restored_frames\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/mask_replaced_combined_images\"\nreplacement_video_path = \"/kaggle/input/viedo-with-replacementy/Untitled video - Made with Clipchamp (1).mp4\" # input replacement video link\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Load the replacement video frames\nreplacement_frames = []\ncap = cv2.VideoCapture(replacement_video_path)\nwhile cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n replacement_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA))\ncap.release()\n\n# Get and sort the list of image files\nimage_files = [f for f in os.listdir(masked_images_dir) if f.startswith(\"frame_\") and f.endswith(\".png\")]\nimage_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0]))\n\n# Iterate over the sorted files\nfor frame_idx, image_name in enumerate(image_files):\n masked_image_path = os.path.join(masked_images_dir, image_name)\n inverse_image_path = os.path.join(inverse_images_dir, image_name)\n save_path = os.path.join(output_dir, f\"frame_combined_{image_name}\")\n\n # Check if the corresponding inverse image exists before combining\n if os.path.exists(inverse_image_path):\n combine_mask_with_frames(masked_image_path, inverse_image_path, replacement_frames, save_path, frame_idx)\n else:\n print(f\"Warning: Missing inverse file for {image_name}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### replaced mask to video ","metadata":{}},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/mask_replaced_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/mask_replaced_combined_images_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## side by side video of original with mask replaced video.","metadata":{}},{"cell_type":"code","source":"from PIL import Image\nimport os\nimport subprocess\nimport shutil\n\n# Directories for the input frames and output combined frames (switched)\ndir1 = '/kaggle/working/output_frames' \ndir2 = '/kaggle/working/mask_replaced_combined_images' \noutput_dir = '/kaggle/working/mask_replacement_with_orginal_combined_frames'\nvideo_output = '/kaggle/working/mask_replacement_with_orginal_output_video.mp4'\n\n# Ensure the output directory exists and is empty\nif os.path.exists(output_dir):\n shutil.rmtree(output_dir) # Remove the directory and its contents\nos.makedirs(output_dir) # Recreate the empty directory\n\n# Remove the previous video if it exists\nif os.path.exists(video_output):\n os.remove(video_output)\n\n# Get sorted lists of the frames\nframes1 = sorted([f for f in os.listdir(dir1) if f.endswith('.jpg')])\nframes2 = sorted([f for f in os.listdir(dir2) if f.endswith('.png')])\n\n# Iterate over both directories and combine images\nfor idx, (f1, f2) in enumerate(zip(frames1, frames2), start=1):\n img1 = Image.open(os.path.join(dir1, f1))\n img2 = Image.open(os.path.join(dir2, f2))\n \n # Assuming both images have the same height, concatenate side by side\n combined_img = Image.new('RGB', (img1.width + img2.width, img1.height))\n combined_img.paste(img1, (0, 0))\n combined_img.paste(img2, (img1.width, 0))\n \n # Save combined image with a sequential name like combined_frame_001.png\n combined_img.save(os.path.join(output_dir, f\"combined_frame_{idx:03d}.png\"))\n\nprint(f\"Frames combined and saved in {output_dir}\")\n\n# List the files in the output directory to verify they exist\nprint(\"Files in output directory:\", os.listdir(output_dir))\n\n# Convert the combined frames into a video using ffmpeg\nsubprocess.run([\n 'ffmpeg', '-framerate', '30', '-i', \n f'{output_dir}/combined_frame_%03d.png', '-c:v', \n 'libx264', '-pix_fmt', 'yuv420p', video_output\n])\n\nprint(f\"Video saved as {video_output}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Masked area glow effect in video","metadata":{}},{"cell_type":"code","source":"from PIL import Image, ImageFilter\n\ndef apply_blur_to_masked_area(image, mask, blur_radius=10):\n \"\"\"\n Apply a blur effect to the masked area of an image.\n\n Parameters:\n - image: PIL Image object of the original image.\n - mask: Boolean NumPy array indicating the masked area.\n - blur_radius: Integer, the radius of the Gaussian blur for the blur effect.\n \"\"\"\n # Convert image to numpy array\n image_array = np.array(image)\n\n # Create a mask image\n mask_image = Image.fromarray((mask * 255).astype('uint8'), mode='L')\n\n # Apply a Gaussian blur to the mask image\n blurred_mask_image = mask_image.filter(ImageFilter.GaussianBlur(radius=blur_radius))\n\n # Convert the blurred mask to RGB\n blurred_mask_image = blurred_mask_image.convert('RGB')\n blurred_mask_array = np.array(blurred_mask_image)\n\n # Create an image with the same dimensions as the original image\n blurred_area = np.zeros_like(image_array[..., :3])\n blurred_area[mask] = blurred_mask_array[mask]\n\n # Combine the blurred area with the original image\n combined_array = np.where(blurred_area > 0, blurred_area, image_array[..., :3])\n combined_image = Image.fromarray(np.uint8(combined_array))\n\n # Preserve the alpha channel from the original image\n alpha_channel = image_array[..., 3]\n combined_image = Image.fromarray(np.dstack((combined_array, alpha_channel)))\n\n return combined_image\n\ndef combine_and_apply_blur(masked_image_path, inverse_masked_image_path, save_path, blur_radius):\n \"\"\"\n Apply a blur effect to the masked image and save the result.\n\n Parameters:\n - masked_image_path: String, path to the masked image (used to extract the mask).\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - save_path: String, path where the final image will be saved.\n - blur_radius: Integer, the radius of the Gaussian blur for the blur effect.\n \"\"\"\n # Open inverse-masked image\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Extract the mask from the masked image\n masked_image = Image.open(masked_image_path).convert(\"L\")\n mask = np.array(masked_image) > 0\n\n # Apply blur effect to the masked area\n blurred_image = apply_blur_to_masked_area(inverse_masked_image, mask, blur_radius)\n\n # Save the final image\n blurred_image.save(save_path)\n print(f\"Final image with blur effect saved as {save_path}\")\n\n# # Display the final image\n# plt.imshow(blurred_image)\n# plt.axis('off')\n# plt.show()\n\n# Directory paths\nmasked_images_dir = \"/kaggle/working/restored_frames\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/blur_combined_images\"\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Get and sort the list of image files\nimage_files = [f for f in os.listdir(masked_images_dir) if f.startswith(\"frame_\") and f.endswith(\".png\")]\nimage_files.sort(key=lambda f: int(f.split('_')[-1].split('.')[0]))\n\n# Define blur radius\nblur_radius = 10\n\n# Iterate over the sorted files\nfor image_name in image_files:\n masked_image_path = os.path.join(masked_images_dir, image_name)\n inverse_image_path = os.path.join(inverse_images_dir, image_name)\n save_path = os.path.join(output_dir, f\"blur_combined_{image_name}\")\n\n # Check if the corresponding inverse image exists before combining\n if os.path.exists(inverse_image_path):\n combine_and_apply_blur(masked_image_path, inverse_image_path, save_path, blur_radius)\n else:\n print(f\"Warning: Missing inverse file for {image_name}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### converting glow effect frames into video ","metadata":{}},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/blur_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/blur_combined_images_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Generative AI propagation in video","metadata":{}},{"cell_type":"code","source":"!pip install stability-sdk","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## Single API mask video generation","metadata":{}},{"cell_type":"markdown","source":"### single API key code uses less stability AI credits can generate upto ~110 frames using 25 credits at below given configuration in code.\n\n### to generate API key from stability AI , signup on statbility ai platform (gives 25 $ free credit on new account) , copy API key and paste in the below code","metadata":{}},{"cell_type":"markdown","source":"#### Note: Due to generate high no. of frames quality is significantly poor for single API key","metadata":{}},{"cell_type":"code","source":"import os\nimport io\nimport warnings\nfrom PIL import Image\nfrom stability_sdk import client\nimport stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation\n\n# Our Host URL should not be prepended with \"https\" nor should it have a trailing slash.\nos.environ['STABILITY_HOST'] = 'grpc.stability.ai:443'\n\n# Sign up for an account at the following link to get an API Key.\n# https://platform.stability.ai/\n\n# Click on the following link once you have created an account to be taken to your API Key.\n# https://platform.stability.ai/account/keys\n\n# Paste your API Key below.\n\nos.environ['STABILITY_KEY'] = 'sk-23mieeVXXXXXXXXXAegcZW3DZpGIz0M5'","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Set up our connection to the API.\nstability_api = client.StabilityInference(\n key=os.environ['STABILITY_KEY'], # API Key reference.\n verbose=True, # Print debug messages.\n engine=\"stable-diffusion-xl-1024-v1-0\", # Set the engine to use for generation.\n # Check out the following link for a list of available engines: https://platform.stability.ai/docs/features/api-parameters#engine\n)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import os\nimport io\nimport warnings\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndef clear_output_directory(directory):\n \"\"\"\n Remove all files in the given directory.\n \"\"\"\n if os.path.exists(directory):\n for file in os.listdir(directory):\n file_path = os.path.join(directory, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(f\"Failed to delete {file_path}. Reason: {e}\")\n\ndef resize_image(image_path, output_path, max_size=1024):\n \"\"\"\n Resize an image if it exceeds the max_size dimension.\n \"\"\"\n # Open the image\n image = Image.open(image_path)\n\n # Get the current width and height of the image\n width, height = image.size\n\n # Calculate the scaling factor\n if width > height:\n scaling_factor = max_size / width\n else:\n scaling_factor = max_size / height\n\n # Only resize if the image is larger than the max_size\n if scaling_factor < 1:\n # Calculate new dimensions\n new_width = int(width * scaling_factor)\n new_height = int(height * scaling_factor)\n\n # Resize the image\n image_resized = image.resize((new_width, new_height))\n\n # Save the resized image\n image_resized.save(output_path)\n print(f\"Image resized to {new_width}x{new_height} and saved as {output_path}\")\n else:\n # Save the original image without resizing\n image.save(output_path)\n print(f\"Image is already within the size limits and saved as {output_path}\")\n\ndef generate_image_from_masked(input_image_path, output_image_path):\n \"\"\"\n Generate a new image from a masked image using an image-to-image model.\n \"\"\"\n # Open and possibly resize the image\n resized_image_path = '/kaggle/working/temp_resized_image.jpg'\n resize_image(input_image_path, resized_image_path)\n\n # Open the resized image\n img = Image.open(resized_image_path)\n\n # Get the dimensions of the image\n width, height = img.size\n\n # Set up our initial generation parameters.\n answers = stability_api.generate(\n prompt=\"bottle with glowing effect holding magical potion, alphonse mucha and simon stalenhag style\",\n seed = 69696969,\n init_image=img, # Assign our previously generated img as our Initial Image for transformation.\n start_schedule=0.6, # Set the strength of our prompt in relation to our initial image.\n steps=30, # Amount of inference steps performed on image generation. Defaults to 30.\n cfg_scale=10.0, # Influences how strongly your generation is guided to match your prompt.\n width=width, # Generation width\n height=height, # Generation height\n sampler=generation.SAMPLER_DDIM, # Sampler type\n style_preset=\"comic-book\" # Style preset\n )\n\n # Process the response and save the image\n for resp in answers:\n for artifact in resp.artifacts:\n if artifact.finish_reason == generation.FILTER:\n warnings.warn(\n \"Your request activated the API's safety filters and could not be processed.\"\n \"Please modify the prompt and try again.\")\n if artifact.type == generation.ARTIFACT_IMAGE:\n img2 = Image.open(io.BytesIO(artifact.binary))\n img2.save(output_image_path)\n print(f\"Generated image saved as {output_image_path}\")\n\n# Directory paths\nmasked_images_dir = '/kaggle/working/restored_frames'\noutput_gen_dir = '/kaggle/working/mask_gen'\nos.makedirs(output_gen_dir, exist_ok=True)\n\n# Clear the output directory\nclear_output_directory(output_gen_dir)\n\n# Iterate over each masked image and apply image-to-image generation\nfor masked_image_name in os.listdir(masked_images_dir):\n masked_image_path = os.path.join(masked_images_dir, masked_image_name)\n output_image_path = os.path.join(output_gen_dir, f\"gen_{masked_image_name}\")\n\n # Generate new image from the masked image\n generate_image_from_masked(masked_image_path, output_image_path)\n\n # Optional: Display the generated image\n out_img = Image.open(output_image_path)\n plt.imshow(out_img)\n plt.title(f\"Generated from {masked_image_name}\")\n plt.show()\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/mask_gen' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/mask_gen_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"from PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\ndef combine_masked_regions(masked_image_path, inverse_masked_image_path, save_path):\n \"\"\"\n Combine the original mask areas from the masked image with the inverse-masked image.\n\n Parameters:\n - masked_image_path: String, path to the masked image.\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - save_path: String, path where the combined image will be saved.\n \"\"\"\n # Open images\n masked_image = Image.open(masked_image_path).convert(\"RGBA\")\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Ensure images are the same size by resizing the inverse image\n if masked_image.size != inverse_masked_image.size:\n inverse_masked_image = inverse_masked_image.resize(masked_image.size)\n\n # Convert images to numpy arrays\n masked_array = np.array(masked_image)\n inverse_masked_array = np.array(inverse_masked_image)\n\n # Create a mask where the original mask was applied (non-zero areas in any color channel)\n mask = np.any(masked_array[..., :3] > 30, axis=-1)\n\n # Replace inverse-masked image values with masked image values where mask is true\n combined_array = inverse_masked_array.copy()\n combined_array[mask] = masked_array[mask]\n\n # Convert back to image\n combined_image = Image.fromarray(combined_array)\n\n # Save the combined image\n combined_image.save(save_path)\n print(f\"Combined image saved as {save_path}\")\n\n# # Display the combined image\n# plt.imshow(combined_image)\n# plt.axis('off')\n# plt.show()\n\n# Define directory paths\nmasked_images_dir = \"/kaggle/working/mask_gen\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/Generative_combined_images\"\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Get lists of files in the masked directory\nmasked_images = sorted(os.listdir(masked_images_dir))\n\n# Process files with matching names based on pattern\nfor masked_image_name in masked_images:\n if masked_image_name.startswith(\"gen_frame_\") and masked_image_name.endswith(\".png\"):\n # Extract the index number from the masked image name\n index = masked_image_name[len(\"gen_frame_\"):-len(\".png\")]\n\n # Generate the corresponding inverse image name\n inverse_image_name = f\"frame_{index}.png\"\n\n masked_image_path = os.path.join(masked_images_dir, masked_image_name)\n inverse_image_path = os.path.join(inverse_images_dir, inverse_image_name)\n save_path = os.path.join(output_dir, f\"combined_frame_{index}.png\")\n\n # Check if both files exist before combining\n if os.path.exists(masked_image_path) and os.path.exists(inverse_image_path):\n combine_masked_regions(masked_image_path, inverse_image_path, save_path)\n else:\n print(f\"Warning: Missing files for frame {index}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"### frames to video ","metadata":{}},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/Generative_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/Generative_combined_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## generating using multiple APIs","metadata":{}},{"cell_type":"markdown","source":"### using Multiple keys with better output of image to image generation, the below code can generate ~ 50 frames per 25 credits or 1 free new signup. ","metadata":{}},{"cell_type":"code","source":"import os\nimport io\nimport warnings\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom stability_sdk import client\nimport stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation\n\n# List of API keys\napi_keys = [\n 'sk-3GPp1EOphrXXXXXXXXXX3dmwrbji1iPK3',\n 'sk-6TygJFuBfiQWc7XXXXXXXXXXXqj8aMncmLYrYqpwE1Lv'\n # Add more API keys here\n]\n\n# Directory paths\nmasked_images_dir = '/kaggle/working/restored_frames'\noutput_gen_dir = '/kaggle/working/HD_mask_gen'\n\nos.makedirs(output_gen_dir, exist_ok=True)\n\ndef initialize_stability_api(api_key):\n \"\"\"\n Initialize the Stability API client with the given API key.\n \"\"\"\n return client.StabilityInference(\n key=api_key, # API Key reference.\n verbose=True, # Print debug messages.\n engine=\"stable-diffusion-xl-1024-v1-0\", # Set the engine to use for generation.\n )\n\ndef resize_image(image_path, output_path, max_size=1024):\n \"\"\"\n Resize an image if it exceeds the max_size dimension.\n \"\"\"\n # Open the image\n image = Image.open(image_path)\n\n # Get the current width and height of the image\n width, height = image.size\n\n # Calculate the scaling factor\n if width > height:\n scaling_factor = max_size / width\n else:\n scaling_factor = max_size / height\n\n # Only resize if the image is larger than the max_size\n if scaling_factor < 1:\n # Calculate new dimensions\n new_width = int(width * scaling_factor)\n new_height = int(height * scaling_factor)\n\n # Resize the image\n image_resized = image.resize((new_width, new_height))\n\n # Save the resized image\n image_resized.save(output_path)\n print(f\"Image resized to {new_width}x{new_height} and saved as {output_path}\")\n else:\n # Save the original image without resizing\n image.save(output_path)\n print(f\"Image is already within the size limits and saved as {output_path}\")\n\ndef generate_image_from_masked(api, input_image_path, output_image_path):\n \"\"\"\n Generate a new image from a masked image using an image-to-image model.\n \"\"\"\n # Open and possibly resize the image\n resized_image_path = '/kaggle/working/temp_resized_image.jpg'\n resize_image(input_image_path, resized_image_path)\n\n # Open the resized image\n img = Image.open(resized_image_path)\n\n # Get the dimensions of the image\n width, height = img.size\n\n # Set up our initial generation parameters.\n answers = api.generate(\n prompt=\"soccer ball covered in flames,blazing fireball,eldenring fireball,flames, shiny golden\",\n init_image=img, # Assign our previously generated img as our Initial Image for transformation.\n seed = 69696969,\n start_schedule=0.6, # Set the strength of our prompt in relation to our initial image.\n steps=65, # Amount of inference steps performed on image generation. Defaults to 30.\n cfg_scale=10.0, # Influences how strongly your generation is guided to match your prompt.\n width=width, # Generation width\n height=height, # Generation height\n sampler=generation.SAMPLER_K_DPMPP_SDE, # Sampler type\n style_preset=\"fantasy-art\" # Style preset\n )\n\n # Process the response and save the image\n for resp in answers:\n for artifact in resp.artifacts:\n if artifact.finish_reason == generation.FILTER:\n warnings.warn(\n \"Your request activated the API's safety filters and could not be processed.\"\n \"Please modify the prompt and try again.\")\n if artifact.type == generation.ARTIFACT_IMAGE:\n img2 = Image.open(io.BytesIO(artifact.binary))\n img2.save(output_image_path)\n print(f\"Generated image saved as {output_image_path}\")\n\n# Initialize the first Stability API client\nstability_api = initialize_stability_api(api_keys[0])\n\n# Iterate over each masked image and apply image-to-image generation\nfor i, masked_image_name in enumerate(os.listdir(masked_images_dir)):\n # Change API key every 50 frames\n if i > 0 and i % 50 == 0:\n api_index = (i // 50) % len(api_keys) # Calculate the API key index\n stability_api = initialize_stability_api(api_keys[api_index])\n\n masked_image_path = os.path.join(masked_images_dir, masked_image_name)\n output_image_path = os.path.join(output_gen_dir, f\"gen_{masked_image_name}\")\n\n # Generate new image from the masked image\n generate_image_from_masked(stability_api, masked_image_path, output_image_path)\n\n # Optional: Display the generated image\n out_img = Image.open(output_image_path)\n plt.imshow(out_img)\n plt.title(f\"Generated from {masked_image_name}\")\n plt.show()\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/HD_mask_gen' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/HD_mask_gen_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"from PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\ndef combine_masked_regions(masked_image_path, inverse_masked_image_path, save_path):\n \"\"\"\n Combine the original mask areas from the masked image with the inverse-masked image.\n\n Parameters:\n - masked_image_path: String, path to the masked image.\n - inverse_masked_image_path: String, path to the inverse-masked image.\n - save_path: String, path where the combined image will be saved.\n \"\"\"\n # Open images\n masked_image = Image.open(masked_image_path).convert(\"RGBA\")\n inverse_masked_image = Image.open(inverse_masked_image_path).convert(\"RGBA\")\n\n # Ensure images are the same size by resizing the inverse image\n if masked_image.size != inverse_masked_image.size:\n inverse_masked_image = inverse_masked_image.resize(masked_image.size)\n\n # Convert images to numpy arrays\n masked_array = np.array(masked_image)\n inverse_masked_array = np.array(inverse_masked_image)\n\n # Create a mask where the original mask was applied (non-zero areas in any color channel)\n mask = np.any(masked_array[..., :3] > 30, axis=-1)\n\n # Replace inverse-masked image values with masked image values where mask is true\n combined_array = inverse_masked_array.copy()\n combined_array[mask] = masked_array[mask]\n\n # Convert back to image\n combined_image = Image.fromarray(combined_array)\n\n # Save the combined image\n combined_image.save(save_path)\n print(f\"Combined image saved as {save_path}\")\n\n# # Display the combined image\n# plt.imshow(combined_image)\n# plt.axis('off')\n# plt.show()\n\n# Define directory paths\nmasked_images_dir = \"/kaggle/working/HD_mask_gen\"\ninverse_images_dir = \"/kaggle/working/inverse_restored_frames\"\noutput_dir = \"/kaggle/working/HD_Generative_combined_images\"\n\n# Ensure the output directory exists\nos.makedirs(output_dir, exist_ok=True)\n\n# Get lists of files in the masked directory\nmasked_images = sorted(os.listdir(masked_images_dir))\n\n# Process files with matching names based on pattern\nfor masked_image_name in masked_images:\n if masked_image_name.startswith(\"gen_frame_\") and masked_image_name.endswith(\".png\"):\n # Extract the index number from the masked image name\n index = masked_image_name[len(\"gen_frame_\"):-len(\".png\")]\n\n # Generate the corresponding inverse image name\n inverse_image_name = f\"frame_{index}.png\"\n\n masked_image_path = os.path.join(masked_images_dir, masked_image_name)\n inverse_image_path = os.path.join(inverse_images_dir, inverse_image_name)\n save_path = os.path.join(output_dir, f\"combined_frame_{index}.png\")\n\n # Check if both files exist before combining\n if os.path.exists(masked_image_path) and os.path.exists(inverse_image_path):\n combine_masked_regions(masked_image_path, inverse_image_path, save_path)\n else:\n print(f\"Warning: Missing files for frame {index}. Skipping combination.\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Example usage\nframes_folder = '/kaggle/working/HD_Generative_combined_images' # Replace with the folder containing your frames\noutput_video_path = \"/kaggle/working/HD_Generative_combined_output_video.mp4\" # Desired output video file path\n\nframes_to_video(frames_folder, output_video_path, fps=30)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"## side by side video of original with Img2Img generated video.","metadata":{}},{"cell_type":"code","source":"from PIL import Image\nimport os\nimport subprocess\nimport shutil\n\n# Directories for the input frames and output combined frames (switched)\ndir1 = '/kaggle/working/output_frames' # Formerly dir2\ndir2 = '/kaggle/working/HD_Generative_combined_images' # Formerly dir1\noutput_dir = '/kaggle/working/genai_with_replacement_combined_frames'\nvideo_output = '/kaggle/working/genai_with_replacement_output_video.mp4'\n\n# Ensure the output directory exists and is empty\nif os.path.exists(output_dir):\n shutil.rmtree(output_dir) # Remove the directory and its contents\nos.makedirs(output_dir) # Recreate the empty directory\n\n# Remove the previous video if it exists\nif os.path.exists(video_output):\n os.remove(video_output)\n\n# Get sorted lists of the frames\nframes1 = sorted([f for f in os.listdir(dir1) if f.endswith('.jpg')])\nframes2 = sorted([f for f in os.listdir(dir2) if f.endswith('.png')])\n\n# Iterate over both directories and combine images\nfor idx, (f1, f2) in enumerate(zip(frames1, frames2), start=1):\n img1 = Image.open(os.path.join(dir1, f1))\n img2 = Image.open(os.path.join(dir2, f2))\n \n # Resize the larger image to match the height of the smaller one while maintaining the aspect ratio\n if img1.height > img2.height:\n img1 = img1.resize((int(img1.width * (img2.height / img1.height)), img2.height), Image.LANCZOS)\n elif img2.height > img1.height:\n img2 = img2.resize((int(img2.width * (img1.height / img2.height)), img1.height), Image.LANCZOS)\n \n # Combine images side by side\n combined_img = Image.new('RGB', (img1.width + img2.width, img1.height))\n combined_img.paste(img1, (0, 0))\n combined_img.paste(img2, (img1.width, 0))\n \n # Save combined image with a sequential name like combined_frame_001.png\n combined_img.save(os.path.join(output_dir, f\"combined_frame_{idx:03d}.png\"))\n\nprint(f\"Frames combined and saved in {output_dir}\")\n\n# List the files in the output directory to verify they exist\nprint(\"Files in output directory:\", os.listdir(output_dir))\n\n# Convert the combined frames into a video using ffmpeg\nsubprocess.run([\n 'ffmpeg', '-framerate', '30', '-i', \n f'{output_dir}/combined_frame_%03d.png', '-c:v', \n 'libx264', '-pix_fmt', 'yuv420p', video_output\n])\n\nprint(f\"Video saved as {video_output}\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"# Thank you!!!","metadata":{}},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]}]}
|
sam2/__init__.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from hydra import initialize_config_module
|
8 |
+
|
9 |
+
initialize_config_module("sam2_configs", version_base="1.2")
|
sam2/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (298 Bytes). View file
|
|
sam2/__pycache__/build_sam.cpython-312.pyc
ADDED
Binary file (2.85 kB). View file
|
|
sam2/__pycache__/sam2_image_predictor.cpython-312.pyc
ADDED
Binary file (21.8 kB). View file
|
|
sam2/__pycache__/sam2_video_predictor.cpython-312.pyc
ADDED
Binary file (28.5 kB). View file
|
|
sam2/automatic_mask_generator.py
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
|
8 |
+
from typing import Any, Dict, List, Optional, Tuple
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import torch
|
12 |
+
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_base import SAM2Base
|
15 |
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
16 |
+
from sam2.utils.amg import (
|
17 |
+
area_from_rle,
|
18 |
+
batch_iterator,
|
19 |
+
batched_mask_to_box,
|
20 |
+
box_xyxy_to_xywh,
|
21 |
+
build_all_layer_point_grids,
|
22 |
+
calculate_stability_score,
|
23 |
+
coco_encode_rle,
|
24 |
+
generate_crop_boxes,
|
25 |
+
is_box_near_crop_edge,
|
26 |
+
mask_to_rle_pytorch,
|
27 |
+
MaskData,
|
28 |
+
remove_small_regions,
|
29 |
+
rle_to_mask,
|
30 |
+
uncrop_boxes_xyxy,
|
31 |
+
uncrop_masks,
|
32 |
+
uncrop_points,
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
class SAM2AutomaticMaskGenerator:
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
model: SAM2Base,
|
40 |
+
points_per_side: Optional[int] = 32,
|
41 |
+
points_per_batch: int = 64,
|
42 |
+
pred_iou_thresh: float = 0.8,
|
43 |
+
stability_score_thresh: float = 0.95,
|
44 |
+
stability_score_offset: float = 1.0,
|
45 |
+
mask_threshold: float = 0.0,
|
46 |
+
box_nms_thresh: float = 0.7,
|
47 |
+
crop_n_layers: int = 0,
|
48 |
+
crop_nms_thresh: float = 0.7,
|
49 |
+
crop_overlap_ratio: float = 512 / 1500,
|
50 |
+
crop_n_points_downscale_factor: int = 1,
|
51 |
+
point_grids: Optional[List[np.ndarray]] = None,
|
52 |
+
min_mask_region_area: int = 0,
|
53 |
+
output_mode: str = "binary_mask",
|
54 |
+
use_m2m: bool = False,
|
55 |
+
multimask_output: bool = True,
|
56 |
+
) -> None:
|
57 |
+
"""
|
58 |
+
Using a SAM 2 model, generates masks for the entire image.
|
59 |
+
Generates a grid of point prompts over the image, then filters
|
60 |
+
low quality and duplicate masks. The default settings are chosen
|
61 |
+
for SAM 2 with a HieraL backbone.
|
62 |
+
|
63 |
+
Arguments:
|
64 |
+
model (Sam): The SAM 2 model to use for mask prediction.
|
65 |
+
points_per_side (int or None): The number of points to be sampled
|
66 |
+
along one side of the image. The total number of points is
|
67 |
+
points_per_side**2. If None, 'point_grids' must provide explicit
|
68 |
+
point sampling.
|
69 |
+
points_per_batch (int): Sets the number of points run simultaneously
|
70 |
+
by the model. Higher numbers may be faster but use more GPU memory.
|
71 |
+
pred_iou_thresh (float): A filtering threshold in [0,1], using the
|
72 |
+
model's predicted mask quality.
|
73 |
+
stability_score_thresh (float): A filtering threshold in [0,1], using
|
74 |
+
the stability of the mask under changes to the cutoff used to binarize
|
75 |
+
the model's mask predictions.
|
76 |
+
stability_score_offset (float): The amount to shift the cutoff when
|
77 |
+
calculated the stability score.
|
78 |
+
mask_threshold (float): Threshold for binarizing the mask logits
|
79 |
+
box_nms_thresh (float): The box IoU cutoff used by non-maximal
|
80 |
+
suppression to filter duplicate masks.
|
81 |
+
crop_n_layers (int): If >0, mask prediction will be run again on
|
82 |
+
crops of the image. Sets the number of layers to run, where each
|
83 |
+
layer has 2**i_layer number of image crops.
|
84 |
+
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
|
85 |
+
suppression to filter duplicate masks between different crops.
|
86 |
+
crop_overlap_ratio (float): Sets the degree to which crops overlap.
|
87 |
+
In the first crop layer, crops will overlap by this fraction of
|
88 |
+
the image length. Later layers with more crops scale down this overlap.
|
89 |
+
crop_n_points_downscale_factor (int): The number of points-per-side
|
90 |
+
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
|
91 |
+
point_grids (list(np.ndarray) or None): A list over explicit grids
|
92 |
+
of points used for sampling, normalized to [0,1]. The nth grid in the
|
93 |
+
list is used in the nth crop layer. Exclusive with points_per_side.
|
94 |
+
min_mask_region_area (int): If >0, postprocessing will be applied
|
95 |
+
to remove disconnected regions and holes in masks with area smaller
|
96 |
+
than min_mask_region_area. Requires opencv.
|
97 |
+
output_mode (str): The form masks are returned in. Can be 'binary_mask',
|
98 |
+
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
|
99 |
+
For large resolutions, 'binary_mask' may consume large amounts of
|
100 |
+
memory.
|
101 |
+
use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
|
102 |
+
multimask_output (bool): Whether to output multimask at each point of the grid.
|
103 |
+
"""
|
104 |
+
|
105 |
+
assert (points_per_side is None) != (
|
106 |
+
point_grids is None
|
107 |
+
), "Exactly one of points_per_side or point_grid must be provided."
|
108 |
+
if points_per_side is not None:
|
109 |
+
self.point_grids = build_all_layer_point_grids(
|
110 |
+
points_per_side,
|
111 |
+
crop_n_layers,
|
112 |
+
crop_n_points_downscale_factor,
|
113 |
+
)
|
114 |
+
elif point_grids is not None:
|
115 |
+
self.point_grids = point_grids
|
116 |
+
else:
|
117 |
+
raise ValueError("Can't have both points_per_side and point_grid be None.")
|
118 |
+
|
119 |
+
assert output_mode in [
|
120 |
+
"binary_mask",
|
121 |
+
"uncompressed_rle",
|
122 |
+
"coco_rle",
|
123 |
+
], f"Unknown output_mode {output_mode}."
|
124 |
+
if output_mode == "coco_rle":
|
125 |
+
try:
|
126 |
+
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
|
127 |
+
except ImportError as e:
|
128 |
+
print("Please install pycocotools")
|
129 |
+
raise e
|
130 |
+
|
131 |
+
self.predictor = SAM2ImagePredictor(
|
132 |
+
model,
|
133 |
+
max_hole_area=min_mask_region_area,
|
134 |
+
max_sprinkle_area=min_mask_region_area,
|
135 |
+
)
|
136 |
+
self.points_per_batch = points_per_batch
|
137 |
+
self.pred_iou_thresh = pred_iou_thresh
|
138 |
+
self.stability_score_thresh = stability_score_thresh
|
139 |
+
self.stability_score_offset = stability_score_offset
|
140 |
+
self.mask_threshold = mask_threshold
|
141 |
+
self.box_nms_thresh = box_nms_thresh
|
142 |
+
self.crop_n_layers = crop_n_layers
|
143 |
+
self.crop_nms_thresh = crop_nms_thresh
|
144 |
+
self.crop_overlap_ratio = crop_overlap_ratio
|
145 |
+
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
|
146 |
+
self.min_mask_region_area = min_mask_region_area
|
147 |
+
self.output_mode = output_mode
|
148 |
+
self.use_m2m = use_m2m
|
149 |
+
self.multimask_output = multimask_output
|
150 |
+
|
151 |
+
@torch.no_grad()
|
152 |
+
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
|
153 |
+
"""
|
154 |
+
Generates masks for the given image.
|
155 |
+
|
156 |
+
Arguments:
|
157 |
+
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
|
158 |
+
|
159 |
+
Returns:
|
160 |
+
list(dict(str, any)): A list over records for masks. Each record is
|
161 |
+
a dict containing the following keys:
|
162 |
+
segmentation (dict(str, any) or np.ndarray): The mask. If
|
163 |
+
output_mode='binary_mask', is an array of shape HW. Otherwise,
|
164 |
+
is a dictionary containing the RLE.
|
165 |
+
bbox (list(float)): The box around the mask, in XYWH format.
|
166 |
+
area (int): The area in pixels of the mask.
|
167 |
+
predicted_iou (float): The model's own prediction of the mask's
|
168 |
+
quality. This is filtered by the pred_iou_thresh parameter.
|
169 |
+
point_coords (list(list(float))): The point coordinates input
|
170 |
+
to the model to generate this mask.
|
171 |
+
stability_score (float): A measure of the mask's quality. This
|
172 |
+
is filtered on using the stability_score_thresh parameter.
|
173 |
+
crop_box (list(float)): The crop of the image used to generate
|
174 |
+
the mask, given in XYWH format.
|
175 |
+
"""
|
176 |
+
|
177 |
+
# Generate masks
|
178 |
+
mask_data = self._generate_masks(image)
|
179 |
+
|
180 |
+
# Encode masks
|
181 |
+
if self.output_mode == "coco_rle":
|
182 |
+
mask_data["segmentations"] = [
|
183 |
+
coco_encode_rle(rle) for rle in mask_data["rles"]
|
184 |
+
]
|
185 |
+
elif self.output_mode == "binary_mask":
|
186 |
+
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
|
187 |
+
else:
|
188 |
+
mask_data["segmentations"] = mask_data["rles"]
|
189 |
+
|
190 |
+
# Write mask records
|
191 |
+
curr_anns = []
|
192 |
+
for idx in range(len(mask_data["segmentations"])):
|
193 |
+
ann = {
|
194 |
+
"segmentation": mask_data["segmentations"][idx],
|
195 |
+
"area": area_from_rle(mask_data["rles"][idx]),
|
196 |
+
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
|
197 |
+
"predicted_iou": mask_data["iou_preds"][idx].item(),
|
198 |
+
"point_coords": [mask_data["points"][idx].tolist()],
|
199 |
+
"stability_score": mask_data["stability_score"][idx].item(),
|
200 |
+
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
|
201 |
+
}
|
202 |
+
curr_anns.append(ann)
|
203 |
+
|
204 |
+
return curr_anns
|
205 |
+
|
206 |
+
def _generate_masks(self, image: np.ndarray) -> MaskData:
|
207 |
+
orig_size = image.shape[:2]
|
208 |
+
crop_boxes, layer_idxs = generate_crop_boxes(
|
209 |
+
orig_size, self.crop_n_layers, self.crop_overlap_ratio
|
210 |
+
)
|
211 |
+
|
212 |
+
# Iterate over image crops
|
213 |
+
data = MaskData()
|
214 |
+
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
|
215 |
+
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
|
216 |
+
data.cat(crop_data)
|
217 |
+
|
218 |
+
# Remove duplicate masks between crops
|
219 |
+
if len(crop_boxes) > 1:
|
220 |
+
# Prefer masks from smaller crops
|
221 |
+
scores = 1 / box_area(data["crop_boxes"])
|
222 |
+
scores = scores.to(data["boxes"].device)
|
223 |
+
keep_by_nms = batched_nms(
|
224 |
+
data["boxes"].float(),
|
225 |
+
scores,
|
226 |
+
torch.zeros_like(data["boxes"][:, 0]), # categories
|
227 |
+
iou_threshold=self.crop_nms_thresh,
|
228 |
+
)
|
229 |
+
data.filter(keep_by_nms)
|
230 |
+
data.to_numpy()
|
231 |
+
return data
|
232 |
+
|
233 |
+
def _process_crop(
|
234 |
+
self,
|
235 |
+
image: np.ndarray,
|
236 |
+
crop_box: List[int],
|
237 |
+
crop_layer_idx: int,
|
238 |
+
orig_size: Tuple[int, ...],
|
239 |
+
) -> MaskData:
|
240 |
+
# Crop the image and calculate embeddings
|
241 |
+
x0, y0, x1, y1 = crop_box
|
242 |
+
cropped_im = image[y0:y1, x0:x1, :]
|
243 |
+
cropped_im_size = cropped_im.shape[:2]
|
244 |
+
self.predictor.set_image(cropped_im)
|
245 |
+
|
246 |
+
# Get points for this crop
|
247 |
+
points_scale = np.array(cropped_im_size)[None, ::-1]
|
248 |
+
points_for_image = self.point_grids[crop_layer_idx] * points_scale
|
249 |
+
|
250 |
+
# Generate masks for this crop in batches
|
251 |
+
data = MaskData()
|
252 |
+
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
|
253 |
+
batch_data = self._process_batch(
|
254 |
+
points, cropped_im_size, crop_box, orig_size, normalize=True
|
255 |
+
)
|
256 |
+
data.cat(batch_data)
|
257 |
+
del batch_data
|
258 |
+
self.predictor.reset_predictor()
|
259 |
+
|
260 |
+
# Remove duplicates within this crop.
|
261 |
+
keep_by_nms = batched_nms(
|
262 |
+
data["boxes"].float(),
|
263 |
+
data["iou_preds"],
|
264 |
+
torch.zeros_like(data["boxes"][:, 0]), # categories
|
265 |
+
iou_threshold=self.box_nms_thresh,
|
266 |
+
)
|
267 |
+
data.filter(keep_by_nms)
|
268 |
+
|
269 |
+
# Return to the original image frame
|
270 |
+
data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
|
271 |
+
data["points"] = uncrop_points(data["points"], crop_box)
|
272 |
+
data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
|
273 |
+
|
274 |
+
return data
|
275 |
+
|
276 |
+
def _process_batch(
|
277 |
+
self,
|
278 |
+
points: np.ndarray,
|
279 |
+
im_size: Tuple[int, ...],
|
280 |
+
crop_box: List[int],
|
281 |
+
orig_size: Tuple[int, ...],
|
282 |
+
normalize=False,
|
283 |
+
) -> MaskData:
|
284 |
+
orig_h, orig_w = orig_size
|
285 |
+
|
286 |
+
# Run model on this batch
|
287 |
+
points = torch.as_tensor(points, device=self.predictor.device)
|
288 |
+
in_points = self.predictor._transforms.transform_coords(
|
289 |
+
points, normalize=normalize, orig_hw=im_size
|
290 |
+
)
|
291 |
+
in_labels = torch.ones(
|
292 |
+
in_points.shape[0], dtype=torch.int, device=in_points.device
|
293 |
+
)
|
294 |
+
masks, iou_preds, low_res_masks = self.predictor._predict(
|
295 |
+
in_points[:, None, :],
|
296 |
+
in_labels[:, None],
|
297 |
+
multimask_output=self.multimask_output,
|
298 |
+
return_logits=True,
|
299 |
+
)
|
300 |
+
|
301 |
+
# Serialize predictions and store in MaskData
|
302 |
+
data = MaskData(
|
303 |
+
masks=masks.flatten(0, 1),
|
304 |
+
iou_preds=iou_preds.flatten(0, 1),
|
305 |
+
points=points.repeat_interleave(masks.shape[1], dim=0),
|
306 |
+
low_res_masks=low_res_masks.flatten(0, 1),
|
307 |
+
)
|
308 |
+
del masks
|
309 |
+
|
310 |
+
if not self.use_m2m:
|
311 |
+
# Filter by predicted IoU
|
312 |
+
if self.pred_iou_thresh > 0.0:
|
313 |
+
keep_mask = data["iou_preds"] > self.pred_iou_thresh
|
314 |
+
data.filter(keep_mask)
|
315 |
+
|
316 |
+
# Calculate and filter by stability score
|
317 |
+
data["stability_score"] = calculate_stability_score(
|
318 |
+
data["masks"], self.mask_threshold, self.stability_score_offset
|
319 |
+
)
|
320 |
+
if self.stability_score_thresh > 0.0:
|
321 |
+
keep_mask = data["stability_score"] >= self.stability_score_thresh
|
322 |
+
data.filter(keep_mask)
|
323 |
+
else:
|
324 |
+
# One step refinement using previous mask predictions
|
325 |
+
in_points = self.predictor._transforms.transform_coords(
|
326 |
+
data["points"], normalize=normalize, orig_hw=im_size
|
327 |
+
)
|
328 |
+
labels = torch.ones(
|
329 |
+
in_points.shape[0], dtype=torch.int, device=in_points.device
|
330 |
+
)
|
331 |
+
masks, ious = self.refine_with_m2m(
|
332 |
+
in_points, labels, data["low_res_masks"], self.points_per_batch
|
333 |
+
)
|
334 |
+
data["masks"] = masks.squeeze(1)
|
335 |
+
data["iou_preds"] = ious.squeeze(1)
|
336 |
+
|
337 |
+
if self.pred_iou_thresh > 0.0:
|
338 |
+
keep_mask = data["iou_preds"] > self.pred_iou_thresh
|
339 |
+
data.filter(keep_mask)
|
340 |
+
|
341 |
+
data["stability_score"] = calculate_stability_score(
|
342 |
+
data["masks"], self.mask_threshold, self.stability_score_offset
|
343 |
+
)
|
344 |
+
if self.stability_score_thresh > 0.0:
|
345 |
+
keep_mask = data["stability_score"] >= self.stability_score_thresh
|
346 |
+
data.filter(keep_mask)
|
347 |
+
|
348 |
+
# Threshold masks and calculate boxes
|
349 |
+
data["masks"] = data["masks"] > self.mask_threshold
|
350 |
+
data["boxes"] = batched_mask_to_box(data["masks"])
|
351 |
+
|
352 |
+
# Filter boxes that touch crop boundaries
|
353 |
+
keep_mask = ~is_box_near_crop_edge(
|
354 |
+
data["boxes"], crop_box, [0, 0, orig_w, orig_h]
|
355 |
+
)
|
356 |
+
if not torch.all(keep_mask):
|
357 |
+
data.filter(keep_mask)
|
358 |
+
|
359 |
+
# Compress to RLE
|
360 |
+
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
|
361 |
+
data["rles"] = mask_to_rle_pytorch(data["masks"])
|
362 |
+
del data["masks"]
|
363 |
+
|
364 |
+
return data
|
365 |
+
|
366 |
+
@staticmethod
|
367 |
+
def postprocess_small_regions(
|
368 |
+
mask_data: MaskData, min_area: int, nms_thresh: float
|
369 |
+
) -> MaskData:
|
370 |
+
"""
|
371 |
+
Removes small disconnected regions and holes in masks, then reruns
|
372 |
+
box NMS to remove any new duplicates.
|
373 |
+
|
374 |
+
Edits mask_data in place.
|
375 |
+
|
376 |
+
Requires open-cv as a dependency.
|
377 |
+
"""
|
378 |
+
if len(mask_data["rles"]) == 0:
|
379 |
+
return mask_data
|
380 |
+
|
381 |
+
# Filter small disconnected regions and holes
|
382 |
+
new_masks = []
|
383 |
+
scores = []
|
384 |
+
for rle in mask_data["rles"]:
|
385 |
+
mask = rle_to_mask(rle)
|
386 |
+
|
387 |
+
mask, changed = remove_small_regions(mask, min_area, mode="holes")
|
388 |
+
unchanged = not changed
|
389 |
+
mask, changed = remove_small_regions(mask, min_area, mode="islands")
|
390 |
+
unchanged = unchanged and not changed
|
391 |
+
|
392 |
+
new_masks.append(torch.as_tensor(mask).unsqueeze(0))
|
393 |
+
# Give score=0 to changed masks and score=1 to unchanged masks
|
394 |
+
# so NMS will prefer ones that didn't need postprocessing
|
395 |
+
scores.append(float(unchanged))
|
396 |
+
|
397 |
+
# Recalculate boxes and remove any new duplicates
|
398 |
+
masks = torch.cat(new_masks, dim=0)
|
399 |
+
boxes = batched_mask_to_box(masks)
|
400 |
+
keep_by_nms = batched_nms(
|
401 |
+
boxes.float(),
|
402 |
+
torch.as_tensor(scores),
|
403 |
+
torch.zeros_like(boxes[:, 0]), # categories
|
404 |
+
iou_threshold=nms_thresh,
|
405 |
+
)
|
406 |
+
|
407 |
+
# Only recalculate RLEs for masks that have changed
|
408 |
+
for i_mask in keep_by_nms:
|
409 |
+
if scores[i_mask] == 0.0:
|
410 |
+
mask_torch = masks[i_mask].unsqueeze(0)
|
411 |
+
mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
|
412 |
+
mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
|
413 |
+
mask_data.filter(keep_by_nms)
|
414 |
+
|
415 |
+
return mask_data
|
416 |
+
|
417 |
+
def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
|
418 |
+
new_masks = []
|
419 |
+
new_iou_preds = []
|
420 |
+
|
421 |
+
for cur_points, cur_point_labels, low_res_mask in batch_iterator(
|
422 |
+
points_per_batch, points, point_labels, low_res_masks
|
423 |
+
):
|
424 |
+
best_masks, best_iou_preds, _ = self.predictor._predict(
|
425 |
+
cur_points[:, None, :],
|
426 |
+
cur_point_labels[:, None],
|
427 |
+
mask_input=low_res_mask[:, None, :],
|
428 |
+
multimask_output=False,
|
429 |
+
return_logits=True,
|
430 |
+
)
|
431 |
+
new_masks.append(best_masks)
|
432 |
+
new_iou_preds.append(best_iou_preds)
|
433 |
+
masks = torch.cat(new_masks, dim=0)
|
434 |
+
return masks, torch.cat(new_iou_preds, dim=0)
|
sam2/build_sam.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import logging
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from hydra import compose
|
11 |
+
from hydra.utils import instantiate
|
12 |
+
from omegaconf import OmegaConf
|
13 |
+
|
14 |
+
|
15 |
+
def build_sam2(
|
16 |
+
config_file,
|
17 |
+
ckpt_path=None,
|
18 |
+
device="cuda",
|
19 |
+
mode="eval",
|
20 |
+
hydra_overrides_extra=[],
|
21 |
+
apply_postprocessing=True,
|
22 |
+
):
|
23 |
+
|
24 |
+
if apply_postprocessing:
|
25 |
+
hydra_overrides_extra = hydra_overrides_extra.copy()
|
26 |
+
hydra_overrides_extra += [
|
27 |
+
# dynamically fall back to multi-mask if the single mask is not stable
|
28 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
|
29 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
|
30 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
|
31 |
+
]
|
32 |
+
# Read config and init model
|
33 |
+
cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
|
34 |
+
OmegaConf.resolve(cfg)
|
35 |
+
model = instantiate(cfg.model, _recursive_=True)
|
36 |
+
_load_checkpoint(model, ckpt_path)
|
37 |
+
model = model.to(device)
|
38 |
+
if mode == "eval":
|
39 |
+
model.eval()
|
40 |
+
return model
|
41 |
+
|
42 |
+
|
43 |
+
def build_sam2_video_predictor(
|
44 |
+
config_file,
|
45 |
+
ckpt_path=None,
|
46 |
+
device="cuda",
|
47 |
+
mode="eval",
|
48 |
+
hydra_overrides_extra=[],
|
49 |
+
apply_postprocessing=True,
|
50 |
+
):
|
51 |
+
hydra_overrides = [
|
52 |
+
"++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
|
53 |
+
]
|
54 |
+
if apply_postprocessing:
|
55 |
+
hydra_overrides_extra = hydra_overrides_extra.copy()
|
56 |
+
hydra_overrides_extra += [
|
57 |
+
# dynamically fall back to multi-mask if the single mask is not stable
|
58 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
|
59 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
|
60 |
+
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
|
61 |
+
# the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
|
62 |
+
"++model.binarize_mask_from_pts_for_mem_enc=true",
|
63 |
+
# fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
|
64 |
+
"++model.fill_hole_area=8",
|
65 |
+
]
|
66 |
+
hydra_overrides.extend(hydra_overrides_extra)
|
67 |
+
|
68 |
+
# Read config and init model
|
69 |
+
cfg = compose(config_name=config_file, overrides=hydra_overrides)
|
70 |
+
OmegaConf.resolve(cfg)
|
71 |
+
model = instantiate(cfg.model, _recursive_=True)
|
72 |
+
_load_checkpoint(model, ckpt_path)
|
73 |
+
model = model.to(device)
|
74 |
+
if mode == "eval":
|
75 |
+
model.eval()
|
76 |
+
return model
|
77 |
+
|
78 |
+
|
79 |
+
def _load_checkpoint(model, ckpt_path):
|
80 |
+
if ckpt_path is not None:
|
81 |
+
sd = torch.load(ckpt_path, map_location="cpu")["model"]
|
82 |
+
missing_keys, unexpected_keys = model.load_state_dict(sd)
|
83 |
+
if missing_keys:
|
84 |
+
logging.error(missing_keys)
|
85 |
+
raise RuntimeError()
|
86 |
+
if unexpected_keys:
|
87 |
+
logging.error(unexpected_keys)
|
88 |
+
raise RuntimeError()
|
89 |
+
logging.info("Loaded checkpoint sucessfully")
|
sam2/csrc/connected_components.cu
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
// All rights reserved.
|
3 |
+
|
4 |
+
// This source code is licensed under the license found in the
|
5 |
+
// LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
// adapted from https://github.com/zsef123/Connected_components_PyTorch
|
8 |
+
// with license found in the LICENSE_cctorch file in the root directory.
|
9 |
+
#include <ATen/cuda/CUDAContext.h>
|
10 |
+
#include <cuda.h>
|
11 |
+
#include <cuda_runtime.h>
|
12 |
+
#include <torch/extension.h>
|
13 |
+
#include <torch/script.h>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
// 2d
|
17 |
+
#define BLOCK_ROWS 16
|
18 |
+
#define BLOCK_COLS 16
|
19 |
+
|
20 |
+
namespace cc2d {
|
21 |
+
|
22 |
+
template <typename T>
|
23 |
+
__device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
|
24 |
+
return (bitmap >> pos) & 1;
|
25 |
+
}
|
26 |
+
|
27 |
+
__device__ int32_t find(const int32_t* s_buf, int32_t n) {
|
28 |
+
while (s_buf[n] != n)
|
29 |
+
n = s_buf[n];
|
30 |
+
return n;
|
31 |
+
}
|
32 |
+
|
33 |
+
__device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
|
34 |
+
const int32_t id = n;
|
35 |
+
while (s_buf[n] != n) {
|
36 |
+
n = s_buf[n];
|
37 |
+
s_buf[id] = n;
|
38 |
+
}
|
39 |
+
return n;
|
40 |
+
}
|
41 |
+
|
42 |
+
__device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
|
43 |
+
bool done;
|
44 |
+
do {
|
45 |
+
a = find(s_buf, a);
|
46 |
+
b = find(s_buf, b);
|
47 |
+
|
48 |
+
if (a < b) {
|
49 |
+
int32_t old = atomicMin(s_buf + b, a);
|
50 |
+
done = (old == b);
|
51 |
+
b = old;
|
52 |
+
} else if (b < a) {
|
53 |
+
int32_t old = atomicMin(s_buf + a, b);
|
54 |
+
done = (old == a);
|
55 |
+
a = old;
|
56 |
+
} else
|
57 |
+
done = true;
|
58 |
+
|
59 |
+
} while (!done);
|
60 |
+
}
|
61 |
+
|
62 |
+
__global__ void
|
63 |
+
init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
|
64 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
65 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
66 |
+
const uint32_t idx = row * W + col;
|
67 |
+
|
68 |
+
if (row < H && col < W)
|
69 |
+
label[idx] = idx;
|
70 |
+
}
|
71 |
+
|
72 |
+
__global__ void
|
73 |
+
merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
|
74 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
75 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
76 |
+
const uint32_t idx = row * W + col;
|
77 |
+
|
78 |
+
if (row >= H || col >= W)
|
79 |
+
return;
|
80 |
+
|
81 |
+
uint32_t P = 0;
|
82 |
+
|
83 |
+
if (img[idx])
|
84 |
+
P |= 0x777;
|
85 |
+
if (row + 1 < H && img[idx + W])
|
86 |
+
P |= 0x777 << 4;
|
87 |
+
if (col + 1 < W && img[idx + 1])
|
88 |
+
P |= 0x777 << 1;
|
89 |
+
|
90 |
+
if (col == 0)
|
91 |
+
P &= 0xEEEE;
|
92 |
+
if (col + 1 >= W)
|
93 |
+
P &= 0x3333;
|
94 |
+
else if (col + 2 >= W)
|
95 |
+
P &= 0x7777;
|
96 |
+
|
97 |
+
if (row == 0)
|
98 |
+
P &= 0xFFF0;
|
99 |
+
if (row + 1 >= H)
|
100 |
+
P &= 0xFF;
|
101 |
+
|
102 |
+
if (P > 0) {
|
103 |
+
// If need check about top-left pixel(if flag the first bit) and hit the
|
104 |
+
// top-left pixel
|
105 |
+
if (hasBit(P, 0) && img[idx - W - 1]) {
|
106 |
+
union_(label, idx, idx - 2 * W - 2); // top left block
|
107 |
+
}
|
108 |
+
|
109 |
+
if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
|
110 |
+
union_(label, idx, idx - 2 * W); // top bottom block
|
111 |
+
|
112 |
+
if (hasBit(P, 3) && img[idx + 2 - W])
|
113 |
+
union_(label, idx, idx - 2 * W + 2); // top right block
|
114 |
+
|
115 |
+
if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
|
116 |
+
union_(label, idx, idx - 2); // just left block
|
117 |
+
}
|
118 |
+
}
|
119 |
+
|
120 |
+
__global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
|
121 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
122 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
123 |
+
const uint32_t idx = row * W + col;
|
124 |
+
|
125 |
+
if (row < H && col < W)
|
126 |
+
find_n_compress(label, idx);
|
127 |
+
}
|
128 |
+
|
129 |
+
__global__ void final_labeling(
|
130 |
+
const uint8_t* img,
|
131 |
+
int32_t* label,
|
132 |
+
const int32_t W,
|
133 |
+
const int32_t H) {
|
134 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
|
135 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
|
136 |
+
const uint32_t idx = row * W + col;
|
137 |
+
|
138 |
+
if (row >= H || col >= W)
|
139 |
+
return;
|
140 |
+
|
141 |
+
int32_t y = label[idx] + 1;
|
142 |
+
|
143 |
+
if (img[idx])
|
144 |
+
label[idx] = y;
|
145 |
+
else
|
146 |
+
label[idx] = 0;
|
147 |
+
|
148 |
+
if (col + 1 < W) {
|
149 |
+
if (img[idx + 1])
|
150 |
+
label[idx + 1] = y;
|
151 |
+
else
|
152 |
+
label[idx + 1] = 0;
|
153 |
+
|
154 |
+
if (row + 1 < H) {
|
155 |
+
if (img[idx + W + 1])
|
156 |
+
label[idx + W + 1] = y;
|
157 |
+
else
|
158 |
+
label[idx + W + 1] = 0;
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
if (row + 1 < H) {
|
163 |
+
if (img[idx + W])
|
164 |
+
label[idx + W] = y;
|
165 |
+
else
|
166 |
+
label[idx + W] = 0;
|
167 |
+
}
|
168 |
+
}
|
169 |
+
|
170 |
+
__global__ void init_counting(
|
171 |
+
const int32_t* label,
|
172 |
+
int32_t* count_init,
|
173 |
+
const int32_t W,
|
174 |
+
const int32_t H) {
|
175 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
|
176 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
|
177 |
+
const uint32_t idx = row * W + col;
|
178 |
+
|
179 |
+
if (row >= H || col >= W)
|
180 |
+
return;
|
181 |
+
|
182 |
+
int32_t y = label[idx];
|
183 |
+
if (y > 0) {
|
184 |
+
int32_t count_idx = y - 1;
|
185 |
+
atomicAdd(count_init + count_idx, 1);
|
186 |
+
}
|
187 |
+
}
|
188 |
+
|
189 |
+
__global__ void final_counting(
|
190 |
+
const int32_t* label,
|
191 |
+
const int32_t* count_init,
|
192 |
+
int32_t* count_final,
|
193 |
+
const int32_t W,
|
194 |
+
const int32_t H) {
|
195 |
+
const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
|
196 |
+
const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
|
197 |
+
const uint32_t idx = row * W + col;
|
198 |
+
|
199 |
+
if (row >= H || col >= W)
|
200 |
+
return;
|
201 |
+
|
202 |
+
int32_t y = label[idx];
|
203 |
+
if (y > 0) {
|
204 |
+
int32_t count_idx = y - 1;
|
205 |
+
count_final[idx] = count_init[count_idx];
|
206 |
+
} else {
|
207 |
+
count_final[idx] = 0;
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
} // namespace cc2d
|
212 |
+
|
213 |
+
std::vector<torch::Tensor> get_connected_componnets(
|
214 |
+
const torch::Tensor& inputs) {
|
215 |
+
AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
|
216 |
+
AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
|
217 |
+
AT_ASSERTM(
|
218 |
+
inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
|
219 |
+
|
220 |
+
const uint32_t N = inputs.size(0);
|
221 |
+
const uint32_t C = inputs.size(1);
|
222 |
+
const uint32_t H = inputs.size(2);
|
223 |
+
const uint32_t W = inputs.size(3);
|
224 |
+
|
225 |
+
AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
|
226 |
+
AT_ASSERTM((H % 2) == 0, "height must be an even number");
|
227 |
+
AT_ASSERTM((W % 2) == 0, "width must be an even number");
|
228 |
+
|
229 |
+
// label must be uint32_t
|
230 |
+
auto label_options =
|
231 |
+
torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
|
232 |
+
torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
|
233 |
+
torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
|
234 |
+
torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
|
235 |
+
|
236 |
+
dim3 grid = dim3(
|
237 |
+
((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
|
238 |
+
((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
|
239 |
+
dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
|
240 |
+
dim3 grid_count =
|
241 |
+
dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
|
242 |
+
dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
|
243 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
244 |
+
|
245 |
+
for (int n = 0; n < N; n++) {
|
246 |
+
uint32_t offset = n * H * W;
|
247 |
+
|
248 |
+
cc2d::init_labeling<<<grid, block, 0, stream>>>(
|
249 |
+
labels.data_ptr<int32_t>() + offset, W, H);
|
250 |
+
cc2d::merge<<<grid, block, 0, stream>>>(
|
251 |
+
inputs.data_ptr<uint8_t>() + offset,
|
252 |
+
labels.data_ptr<int32_t>() + offset,
|
253 |
+
W,
|
254 |
+
H);
|
255 |
+
cc2d::compression<<<grid, block, 0, stream>>>(
|
256 |
+
labels.data_ptr<int32_t>() + offset, W, H);
|
257 |
+
cc2d::final_labeling<<<grid, block, 0, stream>>>(
|
258 |
+
inputs.data_ptr<uint8_t>() + offset,
|
259 |
+
labels.data_ptr<int32_t>() + offset,
|
260 |
+
W,
|
261 |
+
H);
|
262 |
+
|
263 |
+
// get the counting of each pixel
|
264 |
+
cc2d::init_counting<<<grid_count, block_count, 0, stream>>>(
|
265 |
+
labels.data_ptr<int32_t>() + offset,
|
266 |
+
counts_init.data_ptr<int32_t>() + offset,
|
267 |
+
W,
|
268 |
+
H);
|
269 |
+
cc2d::final_counting<<<grid_count, block_count, 0, stream>>>(
|
270 |
+
labels.data_ptr<int32_t>() + offset,
|
271 |
+
counts_init.data_ptr<int32_t>() + offset,
|
272 |
+
counts_final.data_ptr<int32_t>() + offset,
|
273 |
+
W,
|
274 |
+
H);
|
275 |
+
}
|
276 |
+
|
277 |
+
// returned values are [labels, counts]
|
278 |
+
std::vector<torch::Tensor> outputs;
|
279 |
+
outputs.push_back(labels);
|
280 |
+
outputs.push_back(counts_final);
|
281 |
+
return outputs;
|
282 |
+
}
|
283 |
+
|
284 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
285 |
+
m.def(
|
286 |
+
"get_connected_componnets",
|
287 |
+
&get_connected_componnets,
|
288 |
+
"get_connected_componnets");
|
289 |
+
}
|
sam2/modeling/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
sam2/modeling/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (183 Bytes). View file
|
|
sam2/modeling/__pycache__/memory_attention.cpython-312.pyc
ADDED
Binary file (6.82 kB). View file
|
|
sam2/modeling/__pycache__/memory_encoder.cpython-312.pyc
ADDED
Binary file (7.85 kB). View file
|
|
sam2/modeling/__pycache__/position_encoding.cpython-312.pyc
ADDED
Binary file (14.4 kB). View file
|
|
sam2/modeling/__pycache__/sam2_base.cpython-312.pyc
ADDED
Binary file (29.2 kB). View file
|
|
sam2/modeling/__pycache__/sam2_utils.cpython-312.pyc
ADDED
Binary file (9.01 kB). View file
|
|
sam2/modeling/backbones/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
sam2/modeling/backbones/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (193 Bytes). View file
|
|
sam2/modeling/backbones/__pycache__/hieradet.cpython-312.pyc
ADDED
Binary file (12 kB). View file
|
|
sam2/modeling/backbones/__pycache__/image_encoder.cpython-312.pyc
ADDED
Binary file (5.48 kB). View file
|
|
sam2/modeling/backbones/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (4.34 kB). View file
|
|
sam2/modeling/backbones/hieradet.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from functools import partial
|
8 |
+
from typing import List, Tuple, Union
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
|
14 |
+
from sam2.modeling.backbones.utils import (
|
15 |
+
PatchEmbed,
|
16 |
+
window_partition,
|
17 |
+
window_unpartition,
|
18 |
+
)
|
19 |
+
|
20 |
+
from sam2.modeling.sam2_utils import DropPath, MLP
|
21 |
+
|
22 |
+
|
23 |
+
def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
|
24 |
+
if pool is None:
|
25 |
+
return x
|
26 |
+
# (B, H, W, C) -> (B, C, H, W)
|
27 |
+
x = x.permute(0, 3, 1, 2)
|
28 |
+
x = pool(x)
|
29 |
+
# (B, C, H', W') -> (B, H', W', C)
|
30 |
+
x = x.permute(0, 2, 3, 1)
|
31 |
+
if norm:
|
32 |
+
x = norm(x)
|
33 |
+
|
34 |
+
return x
|
35 |
+
|
36 |
+
|
37 |
+
class MultiScaleAttention(nn.Module):
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
dim: int,
|
41 |
+
dim_out: int,
|
42 |
+
num_heads: int,
|
43 |
+
q_pool: nn.Module = None,
|
44 |
+
):
|
45 |
+
super().__init__()
|
46 |
+
|
47 |
+
self.dim = dim
|
48 |
+
self.dim_out = dim_out
|
49 |
+
|
50 |
+
self.num_heads = num_heads
|
51 |
+
head_dim = dim_out // num_heads
|
52 |
+
self.scale = head_dim**-0.5
|
53 |
+
|
54 |
+
self.q_pool = q_pool
|
55 |
+
self.qkv = nn.Linear(dim, dim_out * 3)
|
56 |
+
self.proj = nn.Linear(dim_out, dim_out)
|
57 |
+
|
58 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
59 |
+
B, H, W, _ = x.shape
|
60 |
+
# qkv with shape (B, H * W, 3, nHead, C)
|
61 |
+
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
|
62 |
+
# q, k, v with shape (B, H * W, nheads, C)
|
63 |
+
q, k, v = torch.unbind(qkv, 2)
|
64 |
+
|
65 |
+
# Q pooling (for downsample at stage changes)
|
66 |
+
if self.q_pool:
|
67 |
+
q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
|
68 |
+
H, W = q.shape[1:3] # downsampled shape
|
69 |
+
q = q.reshape(B, H * W, self.num_heads, -1)
|
70 |
+
|
71 |
+
# Torch's SDPA expects [B, nheads, H*W, C] so we transpose
|
72 |
+
x = F.scaled_dot_product_attention(
|
73 |
+
q.transpose(1, 2),
|
74 |
+
k.transpose(1, 2),
|
75 |
+
v.transpose(1, 2),
|
76 |
+
)
|
77 |
+
# Transpose back
|
78 |
+
x = x.transpose(1, 2)
|
79 |
+
x = x.reshape(B, H, W, -1)
|
80 |
+
|
81 |
+
x = self.proj(x)
|
82 |
+
|
83 |
+
return x
|
84 |
+
|
85 |
+
|
86 |
+
class MultiScaleBlock(nn.Module):
|
87 |
+
def __init__(
|
88 |
+
self,
|
89 |
+
dim: int,
|
90 |
+
dim_out: int,
|
91 |
+
num_heads: int,
|
92 |
+
mlp_ratio: float = 4.0,
|
93 |
+
drop_path: float = 0.0,
|
94 |
+
norm_layer: Union[nn.Module, str] = "LayerNorm",
|
95 |
+
q_stride: Tuple[int, int] = None,
|
96 |
+
act_layer: nn.Module = nn.GELU,
|
97 |
+
window_size: int = 0,
|
98 |
+
):
|
99 |
+
super().__init__()
|
100 |
+
|
101 |
+
if isinstance(norm_layer, str):
|
102 |
+
norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
|
103 |
+
|
104 |
+
self.dim = dim
|
105 |
+
self.dim_out = dim_out
|
106 |
+
self.norm1 = norm_layer(dim)
|
107 |
+
|
108 |
+
self.window_size = window_size
|
109 |
+
|
110 |
+
self.pool, self.q_stride = None, q_stride
|
111 |
+
if self.q_stride:
|
112 |
+
self.pool = nn.MaxPool2d(
|
113 |
+
kernel_size=q_stride, stride=q_stride, ceil_mode=False
|
114 |
+
)
|
115 |
+
|
116 |
+
self.attn = MultiScaleAttention(
|
117 |
+
dim,
|
118 |
+
dim_out,
|
119 |
+
num_heads=num_heads,
|
120 |
+
q_pool=self.pool,
|
121 |
+
)
|
122 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
123 |
+
|
124 |
+
self.norm2 = norm_layer(dim_out)
|
125 |
+
self.mlp = MLP(
|
126 |
+
dim_out,
|
127 |
+
int(dim_out * mlp_ratio),
|
128 |
+
dim_out,
|
129 |
+
num_layers=2,
|
130 |
+
activation=act_layer,
|
131 |
+
)
|
132 |
+
|
133 |
+
if dim != dim_out:
|
134 |
+
self.proj = nn.Linear(dim, dim_out)
|
135 |
+
|
136 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
137 |
+
shortcut = x # B, H, W, C
|
138 |
+
x = self.norm1(x)
|
139 |
+
|
140 |
+
# Skip connection
|
141 |
+
if self.dim != self.dim_out:
|
142 |
+
shortcut = do_pool(self.proj(x), self.pool)
|
143 |
+
|
144 |
+
# Window partition
|
145 |
+
window_size = self.window_size
|
146 |
+
if window_size > 0:
|
147 |
+
H, W = x.shape[1], x.shape[2]
|
148 |
+
x, pad_hw = window_partition(x, window_size)
|
149 |
+
|
150 |
+
# Window Attention + Q Pooling (if stage change)
|
151 |
+
x = self.attn(x)
|
152 |
+
if self.q_stride:
|
153 |
+
# Shapes have changed due to Q pooling
|
154 |
+
window_size = self.window_size // self.q_stride[0]
|
155 |
+
H, W = shortcut.shape[1:3]
|
156 |
+
|
157 |
+
pad_h = (window_size - H % window_size) % window_size
|
158 |
+
pad_w = (window_size - W % window_size) % window_size
|
159 |
+
pad_hw = (H + pad_h, W + pad_w)
|
160 |
+
|
161 |
+
# Reverse window partition
|
162 |
+
if self.window_size > 0:
|
163 |
+
x = window_unpartition(x, window_size, pad_hw, (H, W))
|
164 |
+
|
165 |
+
x = shortcut + self.drop_path(x)
|
166 |
+
# MLP
|
167 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
168 |
+
return x
|
169 |
+
|
170 |
+
|
171 |
+
class Hiera(nn.Module):
|
172 |
+
"""
|
173 |
+
Reference: https://arxiv.org/abs/2306.00989
|
174 |
+
"""
|
175 |
+
|
176 |
+
def __init__(
|
177 |
+
self,
|
178 |
+
embed_dim: int = 96, # initial embed dim
|
179 |
+
num_heads: int = 1, # initial number of heads
|
180 |
+
drop_path_rate: float = 0.0, # stochastic depth
|
181 |
+
q_pool: int = 3, # number of q_pool stages
|
182 |
+
q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
|
183 |
+
stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
|
184 |
+
dim_mul: float = 2.0, # dim_mul factor at stage shift
|
185 |
+
head_mul: float = 2.0, # head_mul factor at stage shift
|
186 |
+
window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
|
187 |
+
# window size per stage, when not using global att.
|
188 |
+
window_spec: Tuple[int, ...] = (
|
189 |
+
8,
|
190 |
+
4,
|
191 |
+
14,
|
192 |
+
7,
|
193 |
+
),
|
194 |
+
# global attn in these blocks
|
195 |
+
global_att_blocks: Tuple[int, ...] = (
|
196 |
+
12,
|
197 |
+
16,
|
198 |
+
20,
|
199 |
+
),
|
200 |
+
return_interm_layers=True, # return feats from every stage
|
201 |
+
):
|
202 |
+
super().__init__()
|
203 |
+
|
204 |
+
assert len(stages) == len(window_spec)
|
205 |
+
self.window_spec = window_spec
|
206 |
+
|
207 |
+
depth = sum(stages)
|
208 |
+
self.q_stride = q_stride
|
209 |
+
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
|
210 |
+
assert 0 <= q_pool <= len(self.stage_ends[:-1])
|
211 |
+
self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
|
212 |
+
self.return_interm_layers = return_interm_layers
|
213 |
+
|
214 |
+
self.patch_embed = PatchEmbed(
|
215 |
+
embed_dim=embed_dim,
|
216 |
+
)
|
217 |
+
# Which blocks have global att?
|
218 |
+
self.global_att_blocks = global_att_blocks
|
219 |
+
|
220 |
+
# Windowed positional embedding (https://arxiv.org/abs/2311.05613)
|
221 |
+
self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
|
222 |
+
self.pos_embed = nn.Parameter(
|
223 |
+
torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size)
|
224 |
+
)
|
225 |
+
self.pos_embed_window = nn.Parameter(
|
226 |
+
torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])
|
227 |
+
)
|
228 |
+
|
229 |
+
dpr = [
|
230 |
+
x.item() for x in torch.linspace(0, drop_path_rate, depth)
|
231 |
+
] # stochastic depth decay rule
|
232 |
+
|
233 |
+
cur_stage = 1
|
234 |
+
self.blocks = nn.ModuleList()
|
235 |
+
|
236 |
+
for i in range(depth):
|
237 |
+
dim_out = embed_dim
|
238 |
+
# lags by a block, so first block of
|
239 |
+
# next stage uses an initial window size
|
240 |
+
# of previous stage and final window size of current stage
|
241 |
+
window_size = self.window_spec[cur_stage - 1]
|
242 |
+
|
243 |
+
if self.global_att_blocks is not None:
|
244 |
+
window_size = 0 if i in self.global_att_blocks else window_size
|
245 |
+
|
246 |
+
if i - 1 in self.stage_ends:
|
247 |
+
dim_out = int(embed_dim * dim_mul)
|
248 |
+
num_heads = int(num_heads * head_mul)
|
249 |
+
cur_stage += 1
|
250 |
+
|
251 |
+
block = MultiScaleBlock(
|
252 |
+
dim=embed_dim,
|
253 |
+
dim_out=dim_out,
|
254 |
+
num_heads=num_heads,
|
255 |
+
drop_path=dpr[i],
|
256 |
+
q_stride=self.q_stride if i in self.q_pool_blocks else None,
|
257 |
+
window_size=window_size,
|
258 |
+
)
|
259 |
+
|
260 |
+
embed_dim = dim_out
|
261 |
+
self.blocks.append(block)
|
262 |
+
|
263 |
+
self.channel_list = (
|
264 |
+
[self.blocks[i].dim_out for i in self.stage_ends[::-1]]
|
265 |
+
if return_interm_layers
|
266 |
+
else [self.blocks[-1].dim_out]
|
267 |
+
)
|
268 |
+
|
269 |
+
def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
|
270 |
+
h, w = hw
|
271 |
+
window_embed = self.pos_embed_window
|
272 |
+
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
|
273 |
+
pos_embed = pos_embed + window_embed.tile(
|
274 |
+
[x // y for x, y in zip(pos_embed.shape, window_embed.shape)]
|
275 |
+
)
|
276 |
+
pos_embed = pos_embed.permute(0, 2, 3, 1)
|
277 |
+
return pos_embed
|
278 |
+
|
279 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
280 |
+
x = self.patch_embed(x)
|
281 |
+
# x: (B, H, W, C)
|
282 |
+
|
283 |
+
# Add pos embed
|
284 |
+
x = x + self._get_pos_embed(x.shape[1:3])
|
285 |
+
|
286 |
+
outputs = []
|
287 |
+
for i, blk in enumerate(self.blocks):
|
288 |
+
x = blk(x)
|
289 |
+
if (i == self.stage_ends[-1]) or (
|
290 |
+
i in self.stage_ends and self.return_interm_layers
|
291 |
+
):
|
292 |
+
feats = x.permute(0, 3, 1, 2)
|
293 |
+
outputs.append(feats)
|
294 |
+
|
295 |
+
return outputs
|
sam2/modeling/backbones/image_encoder.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import List, Optional
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
|
13 |
+
|
14 |
+
class ImageEncoder(nn.Module):
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
trunk: nn.Module,
|
18 |
+
neck: nn.Module,
|
19 |
+
scalp: int = 0,
|
20 |
+
):
|
21 |
+
super().__init__()
|
22 |
+
self.trunk = trunk
|
23 |
+
self.neck = neck
|
24 |
+
self.scalp = scalp
|
25 |
+
assert (
|
26 |
+
self.trunk.channel_list == self.neck.backbone_channel_list
|
27 |
+
), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
|
28 |
+
|
29 |
+
def forward(self, sample: torch.Tensor):
|
30 |
+
# Forward through backbone
|
31 |
+
features, pos = self.neck(self.trunk(sample))
|
32 |
+
if self.scalp > 0:
|
33 |
+
# Discard the lowest resolution features
|
34 |
+
features, pos = features[: -self.scalp], pos[: -self.scalp]
|
35 |
+
|
36 |
+
src = features[-1]
|
37 |
+
output = {
|
38 |
+
"vision_features": src,
|
39 |
+
"vision_pos_enc": pos,
|
40 |
+
"backbone_fpn": features,
|
41 |
+
}
|
42 |
+
return output
|
43 |
+
|
44 |
+
|
45 |
+
class FpnNeck(nn.Module):
|
46 |
+
"""
|
47 |
+
A modified variant of Feature Pyramid Network (FPN) neck
|
48 |
+
(we remove output conv and also do bicubic interpolation similar to ViT
|
49 |
+
pos embed interpolation)
|
50 |
+
"""
|
51 |
+
|
52 |
+
def __init__(
|
53 |
+
self,
|
54 |
+
position_encoding: nn.Module,
|
55 |
+
d_model: int,
|
56 |
+
backbone_channel_list: List[int],
|
57 |
+
kernel_size: int = 1,
|
58 |
+
stride: int = 1,
|
59 |
+
padding: int = 0,
|
60 |
+
fpn_interp_model: str = "bilinear",
|
61 |
+
fuse_type: str = "sum",
|
62 |
+
fpn_top_down_levels: Optional[List[int]] = None,
|
63 |
+
):
|
64 |
+
"""Initialize the neck
|
65 |
+
:param trunk: the backbone
|
66 |
+
:param position_encoding: the positional encoding to use
|
67 |
+
:param d_model: the dimension of the model
|
68 |
+
:param neck_norm: the normalization to use
|
69 |
+
"""
|
70 |
+
super().__init__()
|
71 |
+
self.position_encoding = position_encoding
|
72 |
+
self.convs = nn.ModuleList()
|
73 |
+
self.backbone_channel_list = backbone_channel_list
|
74 |
+
for dim in backbone_channel_list:
|
75 |
+
current = nn.Sequential()
|
76 |
+
current.add_module(
|
77 |
+
"conv",
|
78 |
+
nn.Conv2d(
|
79 |
+
in_channels=dim,
|
80 |
+
out_channels=d_model,
|
81 |
+
kernel_size=kernel_size,
|
82 |
+
stride=stride,
|
83 |
+
padding=padding,
|
84 |
+
),
|
85 |
+
)
|
86 |
+
|
87 |
+
self.convs.append(current)
|
88 |
+
self.fpn_interp_model = fpn_interp_model
|
89 |
+
assert fuse_type in ["sum", "avg"]
|
90 |
+
self.fuse_type = fuse_type
|
91 |
+
|
92 |
+
# levels to have top-down features in its outputs
|
93 |
+
# e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
|
94 |
+
# have top-down propagation, while outputs of level 0 and level 1 have only
|
95 |
+
# lateral features from the same backbone level.
|
96 |
+
if fpn_top_down_levels is None:
|
97 |
+
# default is to have top-down features on all levels
|
98 |
+
fpn_top_down_levels = range(len(self.convs))
|
99 |
+
self.fpn_top_down_levels = list(fpn_top_down_levels)
|
100 |
+
|
101 |
+
def forward(self, xs: List[torch.Tensor]):
|
102 |
+
|
103 |
+
out = [None] * len(self.convs)
|
104 |
+
pos = [None] * len(self.convs)
|
105 |
+
assert len(xs) == len(self.convs)
|
106 |
+
# fpn forward pass
|
107 |
+
# see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
|
108 |
+
prev_features = None
|
109 |
+
# forward in top-down order (from low to high resolution)
|
110 |
+
n = len(self.convs) - 1
|
111 |
+
for i in range(n, -1, -1):
|
112 |
+
x = xs[i]
|
113 |
+
lateral_features = self.convs[n - i](x)
|
114 |
+
if i in self.fpn_top_down_levels and prev_features is not None:
|
115 |
+
top_down_features = F.interpolate(
|
116 |
+
prev_features.to(dtype=torch.float32),
|
117 |
+
scale_factor=2.0,
|
118 |
+
mode=self.fpn_interp_model,
|
119 |
+
align_corners=(
|
120 |
+
None if self.fpn_interp_model == "nearest" else False
|
121 |
+
),
|
122 |
+
antialias=False,
|
123 |
+
)
|
124 |
+
prev_features = lateral_features + top_down_features
|
125 |
+
if self.fuse_type == "avg":
|
126 |
+
prev_features /= 2
|
127 |
+
else:
|
128 |
+
prev_features = lateral_features
|
129 |
+
x_out = prev_features
|
130 |
+
out[i] = x_out
|
131 |
+
pos[i] = self.position_encoding(x_out).to(x_out.dtype)
|
132 |
+
|
133 |
+
return out, pos
|
sam2/modeling/backbones/utils.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
"""Some utilities for backbones, in particular for windowing"""
|
8 |
+
|
9 |
+
from typing import Tuple
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn as nn
|
13 |
+
import torch.nn.functional as F
|
14 |
+
|
15 |
+
|
16 |
+
def window_partition(x, window_size):
|
17 |
+
"""
|
18 |
+
Partition into non-overlapping windows with padding if needed.
|
19 |
+
Args:
|
20 |
+
x (tensor): input tokens with [B, H, W, C].
|
21 |
+
window_size (int): window size.
|
22 |
+
Returns:
|
23 |
+
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
24 |
+
(Hp, Wp): padded height and width before partition
|
25 |
+
"""
|
26 |
+
B, H, W, C = x.shape
|
27 |
+
|
28 |
+
pad_h = (window_size - H % window_size) % window_size
|
29 |
+
pad_w = (window_size - W % window_size) % window_size
|
30 |
+
if pad_h > 0 or pad_w > 0:
|
31 |
+
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
32 |
+
Hp, Wp = H + pad_h, W + pad_w
|
33 |
+
|
34 |
+
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
35 |
+
windows = (
|
36 |
+
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
37 |
+
)
|
38 |
+
return windows, (Hp, Wp)
|
39 |
+
|
40 |
+
|
41 |
+
def window_unpartition(windows, window_size, pad_hw, hw):
|
42 |
+
"""
|
43 |
+
Window unpartition into original sequences and removing padding.
|
44 |
+
Args:
|
45 |
+
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
46 |
+
window_size (int): window size.
|
47 |
+
pad_hw (Tuple): padded height and width (Hp, Wp).
|
48 |
+
hw (Tuple): original height and width (H, W) before padding.
|
49 |
+
Returns:
|
50 |
+
x: unpartitioned sequences with [B, H, W, C].
|
51 |
+
"""
|
52 |
+
Hp, Wp = pad_hw
|
53 |
+
H, W = hw
|
54 |
+
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
55 |
+
x = windows.view(
|
56 |
+
B, Hp // window_size, Wp // window_size, window_size, window_size, -1
|
57 |
+
)
|
58 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
59 |
+
|
60 |
+
if Hp > H or Wp > W:
|
61 |
+
x = x[:, :H, :W, :].contiguous()
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
class PatchEmbed(nn.Module):
|
66 |
+
"""
|
67 |
+
Image to Patch Embedding.
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
kernel_size: Tuple[int, ...] = (7, 7),
|
73 |
+
stride: Tuple[int, ...] = (4, 4),
|
74 |
+
padding: Tuple[int, ...] = (3, 3),
|
75 |
+
in_chans: int = 3,
|
76 |
+
embed_dim: int = 768,
|
77 |
+
):
|
78 |
+
"""
|
79 |
+
Args:
|
80 |
+
kernel_size (Tuple): kernel size of the projection layer.
|
81 |
+
stride (Tuple): stride of the projection layer.
|
82 |
+
padding (Tuple): padding size of the projection layer.
|
83 |
+
in_chans (int): Number of input image channels.
|
84 |
+
embed_dim (int): embed_dim (int): Patch embedding dimension.
|
85 |
+
"""
|
86 |
+
super().__init__()
|
87 |
+
self.proj = nn.Conv2d(
|
88 |
+
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
|
89 |
+
)
|
90 |
+
|
91 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
92 |
+
x = self.proj(x)
|
93 |
+
# B C H W -> B H W C
|
94 |
+
x = x.permute(0, 2, 3, 1)
|
95 |
+
return x
|
sam2/modeling/memory_attention.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Optional
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn, Tensor
|
11 |
+
|
12 |
+
from sam2.modeling.sam.transformer import RoPEAttention
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import get_activation_fn, get_clones
|
15 |
+
|
16 |
+
|
17 |
+
class MemoryAttentionLayer(nn.Module):
|
18 |
+
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
activation: str,
|
22 |
+
cross_attention: nn.Module,
|
23 |
+
d_model: int,
|
24 |
+
dim_feedforward: int,
|
25 |
+
dropout: float,
|
26 |
+
pos_enc_at_attn: bool,
|
27 |
+
pos_enc_at_cross_attn_keys: bool,
|
28 |
+
pos_enc_at_cross_attn_queries: bool,
|
29 |
+
self_attention: nn.Module,
|
30 |
+
):
|
31 |
+
super().__init__()
|
32 |
+
self.d_model = d_model
|
33 |
+
self.dim_feedforward = dim_feedforward
|
34 |
+
self.dropout_value = dropout
|
35 |
+
self.self_attn = self_attention
|
36 |
+
self.cross_attn_image = cross_attention
|
37 |
+
|
38 |
+
# Implementation of Feedforward model
|
39 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
40 |
+
self.dropout = nn.Dropout(dropout)
|
41 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
42 |
+
|
43 |
+
self.norm1 = nn.LayerNorm(d_model)
|
44 |
+
self.norm2 = nn.LayerNorm(d_model)
|
45 |
+
self.norm3 = nn.LayerNorm(d_model)
|
46 |
+
self.dropout1 = nn.Dropout(dropout)
|
47 |
+
self.dropout2 = nn.Dropout(dropout)
|
48 |
+
self.dropout3 = nn.Dropout(dropout)
|
49 |
+
|
50 |
+
self.activation_str = activation
|
51 |
+
self.activation = get_activation_fn(activation)
|
52 |
+
|
53 |
+
# Where to add pos enc
|
54 |
+
self.pos_enc_at_attn = pos_enc_at_attn
|
55 |
+
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
|
56 |
+
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
|
57 |
+
|
58 |
+
def _forward_sa(self, tgt, query_pos):
|
59 |
+
# Self-Attention
|
60 |
+
tgt2 = self.norm1(tgt)
|
61 |
+
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
|
62 |
+
tgt2 = self.self_attn(q, k, v=tgt2)
|
63 |
+
tgt = tgt + self.dropout1(tgt2)
|
64 |
+
return tgt
|
65 |
+
|
66 |
+
def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
|
67 |
+
kwds = {}
|
68 |
+
if num_k_exclude_rope > 0:
|
69 |
+
assert isinstance(self.cross_attn_image, RoPEAttention)
|
70 |
+
kwds = {"num_k_exclude_rope": num_k_exclude_rope}
|
71 |
+
|
72 |
+
# Cross-Attention
|
73 |
+
tgt2 = self.norm2(tgt)
|
74 |
+
tgt2 = self.cross_attn_image(
|
75 |
+
q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
|
76 |
+
k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
|
77 |
+
v=memory,
|
78 |
+
**kwds,
|
79 |
+
)
|
80 |
+
tgt = tgt + self.dropout2(tgt2)
|
81 |
+
return tgt
|
82 |
+
|
83 |
+
def forward(
|
84 |
+
self,
|
85 |
+
tgt,
|
86 |
+
memory,
|
87 |
+
pos: Optional[Tensor] = None,
|
88 |
+
query_pos: Optional[Tensor] = None,
|
89 |
+
num_k_exclude_rope: int = 0,
|
90 |
+
) -> torch.Tensor:
|
91 |
+
|
92 |
+
# Self-Attn, Cross-Attn
|
93 |
+
tgt = self._forward_sa(tgt, query_pos)
|
94 |
+
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
|
95 |
+
# MLP
|
96 |
+
tgt2 = self.norm3(tgt)
|
97 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
98 |
+
tgt = tgt + self.dropout3(tgt2)
|
99 |
+
return tgt
|
100 |
+
|
101 |
+
|
102 |
+
class MemoryAttention(nn.Module):
|
103 |
+
def __init__(
|
104 |
+
self,
|
105 |
+
d_model: int,
|
106 |
+
pos_enc_at_input: bool,
|
107 |
+
layer: nn.Module,
|
108 |
+
num_layers: int,
|
109 |
+
batch_first: bool = True, # Do layers expect batch first input?
|
110 |
+
):
|
111 |
+
super().__init__()
|
112 |
+
self.d_model = d_model
|
113 |
+
self.layers = get_clones(layer, num_layers)
|
114 |
+
self.num_layers = num_layers
|
115 |
+
self.norm = nn.LayerNorm(d_model)
|
116 |
+
self.pos_enc_at_input = pos_enc_at_input
|
117 |
+
self.batch_first = batch_first
|
118 |
+
|
119 |
+
def forward(
|
120 |
+
self,
|
121 |
+
curr: torch.Tensor, # self-attention inputs
|
122 |
+
memory: torch.Tensor, # cross-attention inputs
|
123 |
+
curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
|
124 |
+
memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
|
125 |
+
num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
|
126 |
+
):
|
127 |
+
if isinstance(curr, list):
|
128 |
+
assert isinstance(curr_pos, list)
|
129 |
+
assert len(curr) == len(curr_pos) == 1
|
130 |
+
curr, curr_pos = (
|
131 |
+
curr[0],
|
132 |
+
curr_pos[0],
|
133 |
+
)
|
134 |
+
|
135 |
+
assert (
|
136 |
+
curr.shape[1] == memory.shape[1]
|
137 |
+
), "Batch size must be the same for curr and memory"
|
138 |
+
|
139 |
+
output = curr
|
140 |
+
if self.pos_enc_at_input and curr_pos is not None:
|
141 |
+
output = output + 0.1 * curr_pos
|
142 |
+
|
143 |
+
if self.batch_first:
|
144 |
+
# Convert to batch first
|
145 |
+
output = output.transpose(0, 1)
|
146 |
+
curr_pos = curr_pos.transpose(0, 1)
|
147 |
+
memory = memory.transpose(0, 1)
|
148 |
+
memory_pos = memory_pos.transpose(0, 1)
|
149 |
+
|
150 |
+
for layer in self.layers:
|
151 |
+
kwds = {}
|
152 |
+
if isinstance(layer.cross_attn_image, RoPEAttention):
|
153 |
+
kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
|
154 |
+
|
155 |
+
output = layer(
|
156 |
+
tgt=output,
|
157 |
+
memory=memory,
|
158 |
+
pos=memory_pos,
|
159 |
+
query_pos=curr_pos,
|
160 |
+
**kwds,
|
161 |
+
)
|
162 |
+
normed_output = self.norm(output)
|
163 |
+
|
164 |
+
if self.batch_first:
|
165 |
+
# Convert back to seq first
|
166 |
+
normed_output = normed_output.transpose(0, 1)
|
167 |
+
curr_pos = curr_pos.transpose(0, 1)
|
168 |
+
|
169 |
+
return normed_output
|
sam2/modeling/memory_encoder.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
from typing import Tuple
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
|
15 |
+
|
16 |
+
|
17 |
+
class MaskDownSampler(nn.Module):
|
18 |
+
"""
|
19 |
+
Progressively downsample a mask by total_stride, each time by stride.
|
20 |
+
Note that LayerNorm is applied per *token*, like in ViT.
|
21 |
+
|
22 |
+
With each downsample (by a factor stride**2), channel capacity increases by the same factor.
|
23 |
+
In the end, we linearly project to embed_dim channels.
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
embed_dim=256,
|
29 |
+
kernel_size=4,
|
30 |
+
stride=4,
|
31 |
+
padding=0,
|
32 |
+
total_stride=16,
|
33 |
+
activation=nn.GELU,
|
34 |
+
):
|
35 |
+
super().__init__()
|
36 |
+
num_layers = int(math.log2(total_stride) // math.log2(stride))
|
37 |
+
assert stride**num_layers == total_stride
|
38 |
+
self.encoder = nn.Sequential()
|
39 |
+
mask_in_chans, mask_out_chans = 1, 1
|
40 |
+
for _ in range(num_layers):
|
41 |
+
mask_out_chans = mask_in_chans * (stride**2)
|
42 |
+
self.encoder.append(
|
43 |
+
nn.Conv2d(
|
44 |
+
mask_in_chans,
|
45 |
+
mask_out_chans,
|
46 |
+
kernel_size=kernel_size,
|
47 |
+
stride=stride,
|
48 |
+
padding=padding,
|
49 |
+
)
|
50 |
+
)
|
51 |
+
self.encoder.append(LayerNorm2d(mask_out_chans))
|
52 |
+
self.encoder.append(activation())
|
53 |
+
mask_in_chans = mask_out_chans
|
54 |
+
|
55 |
+
self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
return self.encoder(x)
|
59 |
+
|
60 |
+
|
61 |
+
# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
|
62 |
+
class CXBlock(nn.Module):
|
63 |
+
r"""ConvNeXt Block. There are two equivalent implementations:
|
64 |
+
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
|
65 |
+
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
|
66 |
+
We use (2) as we find it slightly faster in PyTorch
|
67 |
+
|
68 |
+
Args:
|
69 |
+
dim (int): Number of input channels.
|
70 |
+
drop_path (float): Stochastic depth rate. Default: 0.0
|
71 |
+
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
|
72 |
+
"""
|
73 |
+
|
74 |
+
def __init__(
|
75 |
+
self,
|
76 |
+
dim,
|
77 |
+
kernel_size=7,
|
78 |
+
padding=3,
|
79 |
+
drop_path=0.0,
|
80 |
+
layer_scale_init_value=1e-6,
|
81 |
+
use_dwconv=True,
|
82 |
+
):
|
83 |
+
super().__init__()
|
84 |
+
self.dwconv = nn.Conv2d(
|
85 |
+
dim,
|
86 |
+
dim,
|
87 |
+
kernel_size=kernel_size,
|
88 |
+
padding=padding,
|
89 |
+
groups=dim if use_dwconv else 1,
|
90 |
+
) # depthwise conv
|
91 |
+
self.norm = LayerNorm2d(dim, eps=1e-6)
|
92 |
+
self.pwconv1 = nn.Linear(
|
93 |
+
dim, 4 * dim
|
94 |
+
) # pointwise/1x1 convs, implemented with linear layers
|
95 |
+
self.act = nn.GELU()
|
96 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
97 |
+
self.gamma = (
|
98 |
+
nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
|
99 |
+
if layer_scale_init_value > 0
|
100 |
+
else None
|
101 |
+
)
|
102 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
103 |
+
|
104 |
+
def forward(self, x):
|
105 |
+
input = x
|
106 |
+
x = self.dwconv(x)
|
107 |
+
x = self.norm(x)
|
108 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
109 |
+
x = self.pwconv1(x)
|
110 |
+
x = self.act(x)
|
111 |
+
x = self.pwconv2(x)
|
112 |
+
if self.gamma is not None:
|
113 |
+
x = self.gamma * x
|
114 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
115 |
+
|
116 |
+
x = input + self.drop_path(x)
|
117 |
+
return x
|
118 |
+
|
119 |
+
|
120 |
+
class Fuser(nn.Module):
|
121 |
+
def __init__(self, layer, num_layers, dim=None, input_projection=False):
|
122 |
+
super().__init__()
|
123 |
+
self.proj = nn.Identity()
|
124 |
+
self.layers = get_clones(layer, num_layers)
|
125 |
+
|
126 |
+
if input_projection:
|
127 |
+
assert dim is not None
|
128 |
+
self.proj = nn.Conv2d(dim, dim, kernel_size=1)
|
129 |
+
|
130 |
+
def forward(self, x):
|
131 |
+
# normally x: (N, C, H, W)
|
132 |
+
x = self.proj(x)
|
133 |
+
for layer in self.layers:
|
134 |
+
x = layer(x)
|
135 |
+
return x
|
136 |
+
|
137 |
+
|
138 |
+
class MemoryEncoder(nn.Module):
|
139 |
+
def __init__(
|
140 |
+
self,
|
141 |
+
out_dim,
|
142 |
+
mask_downsampler,
|
143 |
+
fuser,
|
144 |
+
position_encoding,
|
145 |
+
in_dim=256, # in_dim of pix_feats
|
146 |
+
):
|
147 |
+
super().__init__()
|
148 |
+
|
149 |
+
self.mask_downsampler = mask_downsampler
|
150 |
+
|
151 |
+
self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
|
152 |
+
self.fuser = fuser
|
153 |
+
self.position_encoding = position_encoding
|
154 |
+
self.out_proj = nn.Identity()
|
155 |
+
if out_dim != in_dim:
|
156 |
+
self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
|
157 |
+
|
158 |
+
def forward(
|
159 |
+
self,
|
160 |
+
pix_feat: torch.Tensor,
|
161 |
+
masks: torch.Tensor,
|
162 |
+
skip_mask_sigmoid: bool = False,
|
163 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
164 |
+
## Process masks
|
165 |
+
# sigmoid, so that less domain shift from gt masks which are bool
|
166 |
+
if not skip_mask_sigmoid:
|
167 |
+
masks = F.sigmoid(masks)
|
168 |
+
masks = self.mask_downsampler(masks)
|
169 |
+
|
170 |
+
## Fuse pix_feats and downsampled masks
|
171 |
+
# in case the visual features are on CPU, cast them to CUDA
|
172 |
+
pix_feat = pix_feat.to(masks.device)
|
173 |
+
|
174 |
+
x = self.pix_feat_proj(pix_feat)
|
175 |
+
x = x + masks
|
176 |
+
x = self.fuser(x)
|
177 |
+
x = self.out_proj(x)
|
178 |
+
|
179 |
+
pos = self.position_encoding(x).to(x.dtype)
|
180 |
+
|
181 |
+
return {"vision_features": x, "vision_pos_enc": [pos]}
|
sam2/modeling/position_encoding.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
from typing import Any, Optional, Tuple
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from torch import nn
|
14 |
+
|
15 |
+
|
16 |
+
class PositionEmbeddingSine(nn.Module):
|
17 |
+
"""
|
18 |
+
This is a more standard version of the position embedding, very similar to the one
|
19 |
+
used by the Attention is all you need paper, generalized to work on images.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(
|
23 |
+
self,
|
24 |
+
num_pos_feats,
|
25 |
+
temperature: int = 10000,
|
26 |
+
normalize: bool = True,
|
27 |
+
scale: Optional[float] = None,
|
28 |
+
):
|
29 |
+
super().__init__()
|
30 |
+
assert num_pos_feats % 2 == 0, "Expecting even model width"
|
31 |
+
self.num_pos_feats = num_pos_feats // 2
|
32 |
+
self.temperature = temperature
|
33 |
+
self.normalize = normalize
|
34 |
+
if scale is not None and normalize is False:
|
35 |
+
raise ValueError("normalize should be True if scale is passed")
|
36 |
+
if scale is None:
|
37 |
+
scale = 2 * math.pi
|
38 |
+
self.scale = scale
|
39 |
+
|
40 |
+
self.cache = {}
|
41 |
+
|
42 |
+
def _encode_xy(self, x, y):
|
43 |
+
# The positions are expected to be normalized
|
44 |
+
assert len(x) == len(y) and x.ndim == y.ndim == 1
|
45 |
+
x_embed = x * self.scale
|
46 |
+
y_embed = y * self.scale
|
47 |
+
|
48 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
49 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
50 |
+
|
51 |
+
pos_x = x_embed[:, None] / dim_t
|
52 |
+
pos_y = y_embed[:, None] / dim_t
|
53 |
+
pos_x = torch.stack(
|
54 |
+
(pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2
|
55 |
+
).flatten(1)
|
56 |
+
pos_y = torch.stack(
|
57 |
+
(pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2
|
58 |
+
).flatten(1)
|
59 |
+
return pos_x, pos_y
|
60 |
+
|
61 |
+
@torch.no_grad()
|
62 |
+
def encode_boxes(self, x, y, w, h):
|
63 |
+
pos_x, pos_y = self._encode_xy(x, y)
|
64 |
+
pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
|
65 |
+
return pos
|
66 |
+
|
67 |
+
encode = encode_boxes # Backwards compatibility
|
68 |
+
|
69 |
+
@torch.no_grad()
|
70 |
+
def encode_points(self, x, y, labels):
|
71 |
+
(bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
|
72 |
+
assert bx == by and nx == ny and bx == bl and nx == nl
|
73 |
+
pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
|
74 |
+
pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
|
75 |
+
pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
|
76 |
+
return pos
|
77 |
+
|
78 |
+
@torch.no_grad()
|
79 |
+
def forward(self, x: torch.Tensor):
|
80 |
+
cache_key = (x.shape[-2], x.shape[-1])
|
81 |
+
if cache_key in self.cache:
|
82 |
+
return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
|
83 |
+
y_embed = (
|
84 |
+
torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
|
85 |
+
.view(1, -1, 1)
|
86 |
+
.repeat(x.shape[0], 1, x.shape[-1])
|
87 |
+
)
|
88 |
+
x_embed = (
|
89 |
+
torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
|
90 |
+
.view(1, 1, -1)
|
91 |
+
.repeat(x.shape[0], x.shape[-2], 1)
|
92 |
+
)
|
93 |
+
|
94 |
+
if self.normalize:
|
95 |
+
eps = 1e-6
|
96 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
97 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
98 |
+
|
99 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
100 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
101 |
+
|
102 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
103 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
104 |
+
pos_x = torch.stack(
|
105 |
+
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
106 |
+
).flatten(3)
|
107 |
+
pos_y = torch.stack(
|
108 |
+
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
109 |
+
).flatten(3)
|
110 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
111 |
+
self.cache[cache_key] = pos[0]
|
112 |
+
return pos
|
113 |
+
|
114 |
+
|
115 |
+
class PositionEmbeddingRandom(nn.Module):
|
116 |
+
"""
|
117 |
+
Positional encoding using random spatial frequencies.
|
118 |
+
"""
|
119 |
+
|
120 |
+
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
|
121 |
+
super().__init__()
|
122 |
+
if scale is None or scale <= 0.0:
|
123 |
+
scale = 1.0
|
124 |
+
self.register_buffer(
|
125 |
+
"positional_encoding_gaussian_matrix",
|
126 |
+
scale * torch.randn((2, num_pos_feats)),
|
127 |
+
)
|
128 |
+
|
129 |
+
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
|
130 |
+
"""Positionally encode points that are normalized to [0,1]."""
|
131 |
+
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
|
132 |
+
coords = 2 * coords - 1
|
133 |
+
coords = coords @ self.positional_encoding_gaussian_matrix
|
134 |
+
coords = 2 * np.pi * coords
|
135 |
+
# outputs d_1 x ... x d_n x C shape
|
136 |
+
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
|
137 |
+
|
138 |
+
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
|
139 |
+
"""Generate positional encoding for a grid of the specified size."""
|
140 |
+
h, w = size
|
141 |
+
device: Any = self.positional_encoding_gaussian_matrix.device
|
142 |
+
grid = torch.ones((h, w), device=device, dtype=torch.float32)
|
143 |
+
y_embed = grid.cumsum(dim=0) - 0.5
|
144 |
+
x_embed = grid.cumsum(dim=1) - 0.5
|
145 |
+
y_embed = y_embed / h
|
146 |
+
x_embed = x_embed / w
|
147 |
+
|
148 |
+
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
|
149 |
+
return pe.permute(2, 0, 1) # C x H x W
|
150 |
+
|
151 |
+
def forward_with_coords(
|
152 |
+
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
|
153 |
+
) -> torch.Tensor:
|
154 |
+
"""Positionally encode points that are not normalized to [0,1]."""
|
155 |
+
coords = coords_input.clone()
|
156 |
+
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
|
157 |
+
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
|
158 |
+
return self._pe_encoding(coords.to(torch.float)) # B x N x C
|
159 |
+
|
160 |
+
|
161 |
+
# Rotary Positional Encoding, adapted from:
|
162 |
+
# 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
|
163 |
+
# 2. https://github.com/naver-ai/rope-vit
|
164 |
+
# 3. https://github.com/lucidrains/rotary-embedding-torch
|
165 |
+
|
166 |
+
|
167 |
+
def init_t_xy(end_x: int, end_y: int):
|
168 |
+
t = torch.arange(end_x * end_y, dtype=torch.float32)
|
169 |
+
t_x = (t % end_x).float()
|
170 |
+
t_y = torch.div(t, end_x, rounding_mode="floor").float()
|
171 |
+
return t_x, t_y
|
172 |
+
|
173 |
+
|
174 |
+
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
|
175 |
+
freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
176 |
+
freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
177 |
+
|
178 |
+
t_x, t_y = init_t_xy(end_x, end_y)
|
179 |
+
freqs_x = torch.outer(t_x, freqs_x)
|
180 |
+
freqs_y = torch.outer(t_y, freqs_y)
|
181 |
+
freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
|
182 |
+
freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
|
183 |
+
return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
|
184 |
+
|
185 |
+
|
186 |
+
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
|
187 |
+
ndim = x.ndim
|
188 |
+
assert 0 <= 1 < ndim
|
189 |
+
assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
|
190 |
+
shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
|
191 |
+
return freqs_cis.view(*shape)
|
192 |
+
|
193 |
+
|
194 |
+
def apply_rotary_enc(
|
195 |
+
xq: torch.Tensor,
|
196 |
+
xk: torch.Tensor,
|
197 |
+
freqs_cis: torch.Tensor,
|
198 |
+
repeat_freqs_k: bool = False,
|
199 |
+
):
|
200 |
+
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
|
201 |
+
xk_ = (
|
202 |
+
torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
|
203 |
+
if xk.shape[-2] != 0
|
204 |
+
else None
|
205 |
+
)
|
206 |
+
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
|
207 |
+
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
|
208 |
+
if xk_ is None:
|
209 |
+
# no keys to rotate, due to dropout
|
210 |
+
return xq_out.type_as(xq).to(xq.device), xk
|
211 |
+
# repeat freqs along seq_len dim to match k seq_len
|
212 |
+
if repeat_freqs_k:
|
213 |
+
r = xk_.shape[-2] // xq_.shape[-2]
|
214 |
+
freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
|
215 |
+
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
|
216 |
+
return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
|
sam2/modeling/sam/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
sam2/modeling/sam/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (187 Bytes). View file
|
|
sam2/modeling/sam/__pycache__/mask_decoder.cpython-312.pyc
ADDED
Binary file (12.7 kB). View file
|
|
sam2/modeling/sam/__pycache__/prompt_encoder.cpython-312.pyc
ADDED
Binary file (9.48 kB). View file
|
|
sam2/modeling/sam/__pycache__/transformer.cpython-312.pyc
ADDED
Binary file (14.2 kB). View file
|
|
sam2/modeling/sam/mask_decoder.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import List, Optional, Tuple, Type
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
from sam2.modeling.sam2_utils import LayerNorm2d, MLP
|
13 |
+
|
14 |
+
|
15 |
+
class MaskDecoder(nn.Module):
|
16 |
+
def __init__(
|
17 |
+
self,
|
18 |
+
*,
|
19 |
+
transformer_dim: int,
|
20 |
+
transformer: nn.Module,
|
21 |
+
num_multimask_outputs: int = 3,
|
22 |
+
activation: Type[nn.Module] = nn.GELU,
|
23 |
+
iou_head_depth: int = 3,
|
24 |
+
iou_head_hidden_dim: int = 256,
|
25 |
+
use_high_res_features: bool = False,
|
26 |
+
iou_prediction_use_sigmoid=False,
|
27 |
+
dynamic_multimask_via_stability=False,
|
28 |
+
dynamic_multimask_stability_delta=0.05,
|
29 |
+
dynamic_multimask_stability_thresh=0.98,
|
30 |
+
pred_obj_scores: bool = False,
|
31 |
+
pred_obj_scores_mlp: bool = False,
|
32 |
+
use_multimask_token_for_obj_ptr: bool = False,
|
33 |
+
) -> None:
|
34 |
+
"""
|
35 |
+
Predicts masks given an image and prompt embeddings, using a
|
36 |
+
transformer architecture.
|
37 |
+
|
38 |
+
Arguments:
|
39 |
+
transformer_dim (int): the channel dimension of the transformer
|
40 |
+
transformer (nn.Module): the transformer used to predict masks
|
41 |
+
num_multimask_outputs (int): the number of masks to predict
|
42 |
+
when disambiguating masks
|
43 |
+
activation (nn.Module): the type of activation to use when
|
44 |
+
upscaling masks
|
45 |
+
iou_head_depth (int): the depth of the MLP used to predict
|
46 |
+
mask quality
|
47 |
+
iou_head_hidden_dim (int): the hidden dimension of the MLP
|
48 |
+
used to predict mask quality
|
49 |
+
"""
|
50 |
+
super().__init__()
|
51 |
+
self.transformer_dim = transformer_dim
|
52 |
+
self.transformer = transformer
|
53 |
+
|
54 |
+
self.num_multimask_outputs = num_multimask_outputs
|
55 |
+
|
56 |
+
self.iou_token = nn.Embedding(1, transformer_dim)
|
57 |
+
self.num_mask_tokens = num_multimask_outputs + 1
|
58 |
+
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
|
59 |
+
|
60 |
+
self.pred_obj_scores = pred_obj_scores
|
61 |
+
if self.pred_obj_scores:
|
62 |
+
self.obj_score_token = nn.Embedding(1, transformer_dim)
|
63 |
+
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
|
64 |
+
|
65 |
+
self.output_upscaling = nn.Sequential(
|
66 |
+
nn.ConvTranspose2d(
|
67 |
+
transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
|
68 |
+
),
|
69 |
+
LayerNorm2d(transformer_dim // 4),
|
70 |
+
activation(),
|
71 |
+
nn.ConvTranspose2d(
|
72 |
+
transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
|
73 |
+
),
|
74 |
+
activation(),
|
75 |
+
)
|
76 |
+
self.use_high_res_features = use_high_res_features
|
77 |
+
if use_high_res_features:
|
78 |
+
self.conv_s0 = nn.Conv2d(
|
79 |
+
transformer_dim, transformer_dim // 8, kernel_size=1, stride=1
|
80 |
+
)
|
81 |
+
self.conv_s1 = nn.Conv2d(
|
82 |
+
transformer_dim, transformer_dim // 4, kernel_size=1, stride=1
|
83 |
+
)
|
84 |
+
|
85 |
+
self.output_hypernetworks_mlps = nn.ModuleList(
|
86 |
+
[
|
87 |
+
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
|
88 |
+
for i in range(self.num_mask_tokens)
|
89 |
+
]
|
90 |
+
)
|
91 |
+
|
92 |
+
self.iou_prediction_head = MLP(
|
93 |
+
transformer_dim,
|
94 |
+
iou_head_hidden_dim,
|
95 |
+
self.num_mask_tokens,
|
96 |
+
iou_head_depth,
|
97 |
+
sigmoid_output=iou_prediction_use_sigmoid,
|
98 |
+
)
|
99 |
+
if self.pred_obj_scores:
|
100 |
+
self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
|
101 |
+
if pred_obj_scores_mlp:
|
102 |
+
self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
|
103 |
+
|
104 |
+
# When outputting a single mask, optionally we can dynamically fall back to the best
|
105 |
+
# multimask output token if the single mask output token gives low stability scores.
|
106 |
+
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
|
107 |
+
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
|
108 |
+
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
|
109 |
+
|
110 |
+
def forward(
|
111 |
+
self,
|
112 |
+
image_embeddings: torch.Tensor,
|
113 |
+
image_pe: torch.Tensor,
|
114 |
+
sparse_prompt_embeddings: torch.Tensor,
|
115 |
+
dense_prompt_embeddings: torch.Tensor,
|
116 |
+
multimask_output: bool,
|
117 |
+
repeat_image: bool,
|
118 |
+
high_res_features: Optional[List[torch.Tensor]] = None,
|
119 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
120 |
+
"""
|
121 |
+
Predict masks given image and prompt embeddings.
|
122 |
+
|
123 |
+
Arguments:
|
124 |
+
image_embeddings (torch.Tensor): the embeddings from the image encoder
|
125 |
+
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
|
126 |
+
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
|
127 |
+
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
|
128 |
+
multimask_output (bool): Whether to return multiple masks or a single
|
129 |
+
mask.
|
130 |
+
|
131 |
+
Returns:
|
132 |
+
torch.Tensor: batched predicted masks
|
133 |
+
torch.Tensor: batched predictions of mask quality
|
134 |
+
torch.Tensor: batched SAM token for mask output
|
135 |
+
"""
|
136 |
+
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
|
137 |
+
image_embeddings=image_embeddings,
|
138 |
+
image_pe=image_pe,
|
139 |
+
sparse_prompt_embeddings=sparse_prompt_embeddings,
|
140 |
+
dense_prompt_embeddings=dense_prompt_embeddings,
|
141 |
+
repeat_image=repeat_image,
|
142 |
+
high_res_features=high_res_features,
|
143 |
+
)
|
144 |
+
|
145 |
+
# Select the correct mask or masks for output
|
146 |
+
if multimask_output:
|
147 |
+
masks = masks[:, 1:, :, :]
|
148 |
+
iou_pred = iou_pred[:, 1:]
|
149 |
+
elif self.dynamic_multimask_via_stability and not self.training:
|
150 |
+
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
|
151 |
+
else:
|
152 |
+
masks = masks[:, 0:1, :, :]
|
153 |
+
iou_pred = iou_pred[:, 0:1]
|
154 |
+
|
155 |
+
if multimask_output and self.use_multimask_token_for_obj_ptr:
|
156 |
+
sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
|
157 |
+
else:
|
158 |
+
# Take the mask output token. Here we *always* use the token for single mask output.
|
159 |
+
# At test time, even if we track after 1-click (and using multimask_output=True),
|
160 |
+
# we still take the single mask token here. The rationale is that we always track
|
161 |
+
# after multiple clicks during training, so the past tokens seen during training
|
162 |
+
# are always the single mask token (and we'll let it be the object-memory token).
|
163 |
+
sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
|
164 |
+
|
165 |
+
# Prepare output
|
166 |
+
return masks, iou_pred, sam_tokens_out, object_score_logits
|
167 |
+
|
168 |
+
def predict_masks(
|
169 |
+
self,
|
170 |
+
image_embeddings: torch.Tensor,
|
171 |
+
image_pe: torch.Tensor,
|
172 |
+
sparse_prompt_embeddings: torch.Tensor,
|
173 |
+
dense_prompt_embeddings: torch.Tensor,
|
174 |
+
repeat_image: bool,
|
175 |
+
high_res_features: Optional[List[torch.Tensor]] = None,
|
176 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
177 |
+
"""Predicts masks. See 'forward' for more details."""
|
178 |
+
# Concatenate output tokens
|
179 |
+
s = 0
|
180 |
+
if self.pred_obj_scores:
|
181 |
+
output_tokens = torch.cat(
|
182 |
+
[
|
183 |
+
self.obj_score_token.weight,
|
184 |
+
self.iou_token.weight,
|
185 |
+
self.mask_tokens.weight,
|
186 |
+
],
|
187 |
+
dim=0,
|
188 |
+
)
|
189 |
+
s = 1
|
190 |
+
else:
|
191 |
+
output_tokens = torch.cat(
|
192 |
+
[self.iou_token.weight, self.mask_tokens.weight], dim=0
|
193 |
+
)
|
194 |
+
output_tokens = output_tokens.unsqueeze(0).expand(
|
195 |
+
sparse_prompt_embeddings.size(0), -1, -1
|
196 |
+
)
|
197 |
+
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
|
198 |
+
|
199 |
+
# Expand per-image data in batch direction to be per-mask
|
200 |
+
if repeat_image:
|
201 |
+
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
|
202 |
+
else:
|
203 |
+
assert image_embeddings.shape[0] == tokens.shape[0]
|
204 |
+
src = image_embeddings
|
205 |
+
src = src + dense_prompt_embeddings
|
206 |
+
assert (
|
207 |
+
image_pe.size(0) == 1
|
208 |
+
), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
|
209 |
+
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
|
210 |
+
b, c, h, w = src.shape
|
211 |
+
|
212 |
+
# Run the transformer
|
213 |
+
hs, src = self.transformer(src, pos_src, tokens)
|
214 |
+
iou_token_out = hs[:, s, :]
|
215 |
+
mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
|
216 |
+
|
217 |
+
# Upscale mask embeddings and predict masks using the mask tokens
|
218 |
+
src = src.transpose(1, 2).view(b, c, h, w)
|
219 |
+
if not self.use_high_res_features:
|
220 |
+
upscaled_embedding = self.output_upscaling(src)
|
221 |
+
else:
|
222 |
+
dc1, ln1, act1, dc2, act2 = self.output_upscaling
|
223 |
+
feat_s0, feat_s1 = high_res_features
|
224 |
+
upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
|
225 |
+
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
|
226 |
+
|
227 |
+
hyper_in_list: List[torch.Tensor] = []
|
228 |
+
for i in range(self.num_mask_tokens):
|
229 |
+
hyper_in_list.append(
|
230 |
+
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
|
231 |
+
)
|
232 |
+
hyper_in = torch.stack(hyper_in_list, dim=1)
|
233 |
+
b, c, h, w = upscaled_embedding.shape
|
234 |
+
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
|
235 |
+
|
236 |
+
# Generate mask quality predictions
|
237 |
+
iou_pred = self.iou_prediction_head(iou_token_out)
|
238 |
+
if self.pred_obj_scores:
|
239 |
+
assert s == 1
|
240 |
+
object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
|
241 |
+
else:
|
242 |
+
# Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
|
243 |
+
object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
|
244 |
+
|
245 |
+
return masks, iou_pred, mask_tokens_out, object_score_logits
|
246 |
+
|
247 |
+
def _get_stability_scores(self, mask_logits):
|
248 |
+
"""
|
249 |
+
Compute stability scores of the mask logits based on the IoU between upper and
|
250 |
+
lower thresholds, similar to https://github.com/fairinternal/onevision/pull/568.
|
251 |
+
"""
|
252 |
+
mask_logits = mask_logits.flatten(-2)
|
253 |
+
stability_delta = self.dynamic_multimask_stability_delta
|
254 |
+
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
|
255 |
+
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
|
256 |
+
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
|
257 |
+
return stability_scores
|
258 |
+
|
259 |
+
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
|
260 |
+
"""
|
261 |
+
When outputting a single mask, if the stability score from the current single-mask
|
262 |
+
output (based on output token 0) falls below a threshold, we instead select from
|
263 |
+
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
|
264 |
+
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
|
265 |
+
"""
|
266 |
+
# The best mask from multimask output tokens (1~3)
|
267 |
+
multimask_logits = all_mask_logits[:, 1:, :, :]
|
268 |
+
multimask_iou_scores = all_iou_scores[:, 1:]
|
269 |
+
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
|
270 |
+
batch_inds = torch.arange(
|
271 |
+
multimask_iou_scores.size(0), device=all_iou_scores.device
|
272 |
+
)
|
273 |
+
best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
|
274 |
+
best_multimask_logits = best_multimask_logits.unsqueeze(1)
|
275 |
+
best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
|
276 |
+
best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
|
277 |
+
|
278 |
+
# The mask from singlemask output token 0 and its stability score
|
279 |
+
singlemask_logits = all_mask_logits[:, 0:1, :, :]
|
280 |
+
singlemask_iou_scores = all_iou_scores[:, 0:1]
|
281 |
+
stability_scores = self._get_stability_scores(singlemask_logits)
|
282 |
+
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
|
283 |
+
|
284 |
+
# Dynamically fall back to best multimask output upon low stability scores.
|
285 |
+
mask_logits_out = torch.where(
|
286 |
+
is_stable[..., None, None].expand_as(singlemask_logits),
|
287 |
+
singlemask_logits,
|
288 |
+
best_multimask_logits,
|
289 |
+
)
|
290 |
+
iou_scores_out = torch.where(
|
291 |
+
is_stable.expand_as(singlemask_iou_scores),
|
292 |
+
singlemask_iou_scores,
|
293 |
+
best_multimask_iou_scores,
|
294 |
+
)
|
295 |
+
return mask_logits_out, iou_scores_out
|
sam2/modeling/sam/prompt_encoder.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from typing import Optional, Tuple, Type
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
from sam2.modeling.position_encoding import PositionEmbeddingRandom
|
13 |
+
|
14 |
+
from sam2.modeling.sam2_utils import LayerNorm2d
|
15 |
+
|
16 |
+
|
17 |
+
class PromptEncoder(nn.Module):
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
embed_dim: int,
|
21 |
+
image_embedding_size: Tuple[int, int],
|
22 |
+
input_image_size: Tuple[int, int],
|
23 |
+
mask_in_chans: int,
|
24 |
+
activation: Type[nn.Module] = nn.GELU,
|
25 |
+
) -> None:
|
26 |
+
"""
|
27 |
+
Encodes prompts for input to SAM's mask decoder.
|
28 |
+
|
29 |
+
Arguments:
|
30 |
+
embed_dim (int): The prompts' embedding dimension
|
31 |
+
image_embedding_size (tuple(int, int)): The spatial size of the
|
32 |
+
image embedding, as (H, W).
|
33 |
+
input_image_size (int): The padded size of the image as input
|
34 |
+
to the image encoder, as (H, W).
|
35 |
+
mask_in_chans (int): The number of hidden channels used for
|
36 |
+
encoding input masks.
|
37 |
+
activation (nn.Module): The activation to use when encoding
|
38 |
+
input masks.
|
39 |
+
"""
|
40 |
+
super().__init__()
|
41 |
+
self.embed_dim = embed_dim
|
42 |
+
self.input_image_size = input_image_size
|
43 |
+
self.image_embedding_size = image_embedding_size
|
44 |
+
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
|
45 |
+
|
46 |
+
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
|
47 |
+
point_embeddings = [
|
48 |
+
nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
|
49 |
+
]
|
50 |
+
self.point_embeddings = nn.ModuleList(point_embeddings)
|
51 |
+
self.not_a_point_embed = nn.Embedding(1, embed_dim)
|
52 |
+
|
53 |
+
self.mask_input_size = (
|
54 |
+
4 * image_embedding_size[0],
|
55 |
+
4 * image_embedding_size[1],
|
56 |
+
)
|
57 |
+
self.mask_downscaling = nn.Sequential(
|
58 |
+
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
|
59 |
+
LayerNorm2d(mask_in_chans // 4),
|
60 |
+
activation(),
|
61 |
+
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
|
62 |
+
LayerNorm2d(mask_in_chans),
|
63 |
+
activation(),
|
64 |
+
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
|
65 |
+
)
|
66 |
+
self.no_mask_embed = nn.Embedding(1, embed_dim)
|
67 |
+
|
68 |
+
def get_dense_pe(self) -> torch.Tensor:
|
69 |
+
"""
|
70 |
+
Returns the positional encoding used to encode point prompts,
|
71 |
+
applied to a dense set of points the shape of the image encoding.
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
torch.Tensor: Positional encoding with shape
|
75 |
+
1x(embed_dim)x(embedding_h)x(embedding_w)
|
76 |
+
"""
|
77 |
+
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
|
78 |
+
|
79 |
+
def _embed_points(
|
80 |
+
self,
|
81 |
+
points: torch.Tensor,
|
82 |
+
labels: torch.Tensor,
|
83 |
+
pad: bool,
|
84 |
+
) -> torch.Tensor:
|
85 |
+
"""Embeds point prompts."""
|
86 |
+
points = points + 0.5 # Shift to center of pixel
|
87 |
+
if pad:
|
88 |
+
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
|
89 |
+
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
|
90 |
+
points = torch.cat([points, padding_point], dim=1)
|
91 |
+
labels = torch.cat([labels, padding_label], dim=1)
|
92 |
+
point_embedding = self.pe_layer.forward_with_coords(
|
93 |
+
points, self.input_image_size
|
94 |
+
)
|
95 |
+
point_embedding[labels == -1] = 0.0
|
96 |
+
point_embedding[labels == -1] += self.not_a_point_embed.weight
|
97 |
+
point_embedding[labels == 0] += self.point_embeddings[0].weight
|
98 |
+
point_embedding[labels == 1] += self.point_embeddings[1].weight
|
99 |
+
point_embedding[labels == 2] += self.point_embeddings[2].weight
|
100 |
+
point_embedding[labels == 3] += self.point_embeddings[3].weight
|
101 |
+
return point_embedding
|
102 |
+
|
103 |
+
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
|
104 |
+
"""Embeds box prompts."""
|
105 |
+
boxes = boxes + 0.5 # Shift to center of pixel
|
106 |
+
coords = boxes.reshape(-1, 2, 2)
|
107 |
+
corner_embedding = self.pe_layer.forward_with_coords(
|
108 |
+
coords, self.input_image_size
|
109 |
+
)
|
110 |
+
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
|
111 |
+
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
|
112 |
+
return corner_embedding
|
113 |
+
|
114 |
+
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
|
115 |
+
"""Embeds mask inputs."""
|
116 |
+
mask_embedding = self.mask_downscaling(masks)
|
117 |
+
return mask_embedding
|
118 |
+
|
119 |
+
def _get_batch_size(
|
120 |
+
self,
|
121 |
+
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
122 |
+
boxes: Optional[torch.Tensor],
|
123 |
+
masks: Optional[torch.Tensor],
|
124 |
+
) -> int:
|
125 |
+
"""
|
126 |
+
Gets the batch size of the output given the batch size of the input prompts.
|
127 |
+
"""
|
128 |
+
if points is not None:
|
129 |
+
return points[0].shape[0]
|
130 |
+
elif boxes is not None:
|
131 |
+
return boxes.shape[0]
|
132 |
+
elif masks is not None:
|
133 |
+
return masks.shape[0]
|
134 |
+
else:
|
135 |
+
return 1
|
136 |
+
|
137 |
+
def _get_device(self) -> torch.device:
|
138 |
+
return self.point_embeddings[0].weight.device
|
139 |
+
|
140 |
+
def forward(
|
141 |
+
self,
|
142 |
+
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
|
143 |
+
boxes: Optional[torch.Tensor],
|
144 |
+
masks: Optional[torch.Tensor],
|
145 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
146 |
+
"""
|
147 |
+
Embeds different types of prompts, returning both sparse and dense
|
148 |
+
embeddings.
|
149 |
+
|
150 |
+
Arguments:
|
151 |
+
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
|
152 |
+
and labels to embed.
|
153 |
+
boxes (torch.Tensor or none): boxes to embed
|
154 |
+
masks (torch.Tensor or none): masks to embed
|
155 |
+
|
156 |
+
Returns:
|
157 |
+
torch.Tensor: sparse embeddings for the points and boxes, with shape
|
158 |
+
BxNx(embed_dim), where N is determined by the number of input points
|
159 |
+
and boxes.
|
160 |
+
torch.Tensor: dense embeddings for the masks, in the shape
|
161 |
+
Bx(embed_dim)x(embed_H)x(embed_W)
|
162 |
+
"""
|
163 |
+
bs = self._get_batch_size(points, boxes, masks)
|
164 |
+
sparse_embeddings = torch.empty(
|
165 |
+
(bs, 0, self.embed_dim), device=self._get_device()
|
166 |
+
)
|
167 |
+
if points is not None:
|
168 |
+
coords, labels = points
|
169 |
+
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
|
170 |
+
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
|
171 |
+
if boxes is not None:
|
172 |
+
box_embeddings = self._embed_boxes(boxes)
|
173 |
+
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
|
174 |
+
|
175 |
+
if masks is not None:
|
176 |
+
dense_embeddings = self._embed_masks(masks)
|
177 |
+
else:
|
178 |
+
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
|
179 |
+
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
|
180 |
+
)
|
181 |
+
|
182 |
+
return sparse_embeddings, dense_embeddings
|
sam2/modeling/sam/transformer.py
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import math
|
8 |
+
import warnings
|
9 |
+
from functools import partial
|
10 |
+
from typing import Tuple, Type
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn.functional as F
|
14 |
+
from torch import nn, Tensor
|
15 |
+
|
16 |
+
from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
|
17 |
+
|
18 |
+
from sam2.modeling.sam2_utils import MLP
|
19 |
+
from sam2.utils.misc import get_sdpa_settings
|
20 |
+
|
21 |
+
warnings.simplefilter(action="ignore", category=FutureWarning)
|
22 |
+
OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
|
23 |
+
|
24 |
+
|
25 |
+
class TwoWayTransformer(nn.Module):
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
depth: int,
|
29 |
+
embedding_dim: int,
|
30 |
+
num_heads: int,
|
31 |
+
mlp_dim: int,
|
32 |
+
activation: Type[nn.Module] = nn.ReLU,
|
33 |
+
attention_downsample_rate: int = 2,
|
34 |
+
) -> None:
|
35 |
+
"""
|
36 |
+
A transformer decoder that attends to an input image using
|
37 |
+
queries whose positional embedding is supplied.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
depth (int): number of layers in the transformer
|
41 |
+
embedding_dim (int): the channel dimension for the input embeddings
|
42 |
+
num_heads (int): the number of heads for multihead attention. Must
|
43 |
+
divide embedding_dim
|
44 |
+
mlp_dim (int): the channel dimension internal to the MLP block
|
45 |
+
activation (nn.Module): the activation to use in the MLP block
|
46 |
+
"""
|
47 |
+
super().__init__()
|
48 |
+
self.depth = depth
|
49 |
+
self.embedding_dim = embedding_dim
|
50 |
+
self.num_heads = num_heads
|
51 |
+
self.mlp_dim = mlp_dim
|
52 |
+
self.layers = nn.ModuleList()
|
53 |
+
|
54 |
+
for i in range(depth):
|
55 |
+
self.layers.append(
|
56 |
+
TwoWayAttentionBlock(
|
57 |
+
embedding_dim=embedding_dim,
|
58 |
+
num_heads=num_heads,
|
59 |
+
mlp_dim=mlp_dim,
|
60 |
+
activation=activation,
|
61 |
+
attention_downsample_rate=attention_downsample_rate,
|
62 |
+
skip_first_layer_pe=(i == 0),
|
63 |
+
)
|
64 |
+
)
|
65 |
+
|
66 |
+
self.final_attn_token_to_image = Attention(
|
67 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
68 |
+
)
|
69 |
+
self.norm_final_attn = nn.LayerNorm(embedding_dim)
|
70 |
+
|
71 |
+
def forward(
|
72 |
+
self,
|
73 |
+
image_embedding: Tensor,
|
74 |
+
image_pe: Tensor,
|
75 |
+
point_embedding: Tensor,
|
76 |
+
) -> Tuple[Tensor, Tensor]:
|
77 |
+
"""
|
78 |
+
Args:
|
79 |
+
image_embedding (torch.Tensor): image to attend to. Should be shape
|
80 |
+
B x embedding_dim x h x w for any h and w.
|
81 |
+
image_pe (torch.Tensor): the positional encoding to add to the image. Must
|
82 |
+
have the same shape as image_embedding.
|
83 |
+
point_embedding (torch.Tensor): the embedding to add to the query points.
|
84 |
+
Must have shape B x N_points x embedding_dim for any N_points.
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
torch.Tensor: the processed point_embedding
|
88 |
+
torch.Tensor: the processed image_embedding
|
89 |
+
"""
|
90 |
+
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
|
91 |
+
bs, c, h, w = image_embedding.shape
|
92 |
+
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
|
93 |
+
image_pe = image_pe.flatten(2).permute(0, 2, 1)
|
94 |
+
|
95 |
+
# Prepare queries
|
96 |
+
queries = point_embedding
|
97 |
+
keys = image_embedding
|
98 |
+
|
99 |
+
# Apply transformer blocks and final layernorm
|
100 |
+
for layer in self.layers:
|
101 |
+
queries, keys = layer(
|
102 |
+
queries=queries,
|
103 |
+
keys=keys,
|
104 |
+
query_pe=point_embedding,
|
105 |
+
key_pe=image_pe,
|
106 |
+
)
|
107 |
+
|
108 |
+
# Apply the final attention layer from the points to the image
|
109 |
+
q = queries + point_embedding
|
110 |
+
k = keys + image_pe
|
111 |
+
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
|
112 |
+
queries = queries + attn_out
|
113 |
+
queries = self.norm_final_attn(queries)
|
114 |
+
|
115 |
+
return queries, keys
|
116 |
+
|
117 |
+
|
118 |
+
class TwoWayAttentionBlock(nn.Module):
|
119 |
+
def __init__(
|
120 |
+
self,
|
121 |
+
embedding_dim: int,
|
122 |
+
num_heads: int,
|
123 |
+
mlp_dim: int = 2048,
|
124 |
+
activation: Type[nn.Module] = nn.ReLU,
|
125 |
+
attention_downsample_rate: int = 2,
|
126 |
+
skip_first_layer_pe: bool = False,
|
127 |
+
) -> None:
|
128 |
+
"""
|
129 |
+
A transformer block with four layers: (1) self-attention of sparse
|
130 |
+
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
|
131 |
+
block on sparse inputs, and (4) cross attention of dense inputs to sparse
|
132 |
+
inputs.
|
133 |
+
|
134 |
+
Arguments:
|
135 |
+
embedding_dim (int): the channel dimension of the embeddings
|
136 |
+
num_heads (int): the number of heads in the attention layers
|
137 |
+
mlp_dim (int): the hidden dimension of the mlp block
|
138 |
+
activation (nn.Module): the activation of the mlp block
|
139 |
+
skip_first_layer_pe (bool): skip the PE on the first layer
|
140 |
+
"""
|
141 |
+
super().__init__()
|
142 |
+
self.self_attn = Attention(embedding_dim, num_heads)
|
143 |
+
self.norm1 = nn.LayerNorm(embedding_dim)
|
144 |
+
|
145 |
+
self.cross_attn_token_to_image = Attention(
|
146 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
147 |
+
)
|
148 |
+
self.norm2 = nn.LayerNorm(embedding_dim)
|
149 |
+
|
150 |
+
self.mlp = MLP(
|
151 |
+
embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation
|
152 |
+
)
|
153 |
+
self.norm3 = nn.LayerNorm(embedding_dim)
|
154 |
+
|
155 |
+
self.norm4 = nn.LayerNorm(embedding_dim)
|
156 |
+
self.cross_attn_image_to_token = Attention(
|
157 |
+
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
|
158 |
+
)
|
159 |
+
|
160 |
+
self.skip_first_layer_pe = skip_first_layer_pe
|
161 |
+
|
162 |
+
def forward(
|
163 |
+
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
|
164 |
+
) -> Tuple[Tensor, Tensor]:
|
165 |
+
# Self attention block
|
166 |
+
if self.skip_first_layer_pe:
|
167 |
+
queries = self.self_attn(q=queries, k=queries, v=queries)
|
168 |
+
else:
|
169 |
+
q = queries + query_pe
|
170 |
+
attn_out = self.self_attn(q=q, k=q, v=queries)
|
171 |
+
queries = queries + attn_out
|
172 |
+
queries = self.norm1(queries)
|
173 |
+
|
174 |
+
# Cross attention block, tokens attending to image embedding
|
175 |
+
q = queries + query_pe
|
176 |
+
k = keys + key_pe
|
177 |
+
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
|
178 |
+
queries = queries + attn_out
|
179 |
+
queries = self.norm2(queries)
|
180 |
+
|
181 |
+
# MLP block
|
182 |
+
mlp_out = self.mlp(queries)
|
183 |
+
queries = queries + mlp_out
|
184 |
+
queries = self.norm3(queries)
|
185 |
+
|
186 |
+
# Cross attention block, image embedding attending to tokens
|
187 |
+
q = queries + query_pe
|
188 |
+
k = keys + key_pe
|
189 |
+
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
|
190 |
+
keys = keys + attn_out
|
191 |
+
keys = self.norm4(keys)
|
192 |
+
|
193 |
+
return queries, keys
|
194 |
+
|
195 |
+
|
196 |
+
class Attention(nn.Module):
|
197 |
+
"""
|
198 |
+
An attention layer that allows for downscaling the size of the embedding
|
199 |
+
after projection to queries, keys, and values.
|
200 |
+
"""
|
201 |
+
|
202 |
+
def __init__(
|
203 |
+
self,
|
204 |
+
embedding_dim: int,
|
205 |
+
num_heads: int,
|
206 |
+
downsample_rate: int = 1,
|
207 |
+
dropout: float = 0.0,
|
208 |
+
kv_in_dim: int = None,
|
209 |
+
) -> None:
|
210 |
+
super().__init__()
|
211 |
+
self.embedding_dim = embedding_dim
|
212 |
+
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
|
213 |
+
self.internal_dim = embedding_dim // downsample_rate
|
214 |
+
self.num_heads = num_heads
|
215 |
+
assert (
|
216 |
+
self.internal_dim % num_heads == 0
|
217 |
+
), "num_heads must divide embedding_dim."
|
218 |
+
|
219 |
+
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
|
220 |
+
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
221 |
+
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
222 |
+
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
|
223 |
+
|
224 |
+
self.dropout_p = dropout
|
225 |
+
|
226 |
+
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
|
227 |
+
b, n, c = x.shape
|
228 |
+
x = x.reshape(b, n, num_heads, c // num_heads)
|
229 |
+
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
|
230 |
+
|
231 |
+
def _recombine_heads(self, x: Tensor) -> Tensor:
|
232 |
+
b, n_heads, n_tokens, c_per_head = x.shape
|
233 |
+
x = x.transpose(1, 2)
|
234 |
+
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
|
235 |
+
|
236 |
+
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
|
237 |
+
# Input projections
|
238 |
+
q = self.q_proj(q)
|
239 |
+
k = self.k_proj(k)
|
240 |
+
v = self.v_proj(v)
|
241 |
+
|
242 |
+
# Separate into heads
|
243 |
+
q = self._separate_heads(q, self.num_heads)
|
244 |
+
k = self._separate_heads(k, self.num_heads)
|
245 |
+
v = self._separate_heads(v, self.num_heads)
|
246 |
+
|
247 |
+
dropout_p = self.dropout_p if self.training else 0.0
|
248 |
+
# Attention
|
249 |
+
with torch.backends.cuda.sdp_kernel(
|
250 |
+
enable_flash=USE_FLASH_ATTN,
|
251 |
+
# if Flash attention kernel is off, then math kernel needs to be enabled
|
252 |
+
enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
|
253 |
+
enable_mem_efficient=OLD_GPU,
|
254 |
+
):
|
255 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
256 |
+
|
257 |
+
out = self._recombine_heads(out)
|
258 |
+
out = self.out_proj(out)
|
259 |
+
|
260 |
+
return out
|
261 |
+
|
262 |
+
|
263 |
+
class RoPEAttention(Attention):
|
264 |
+
"""Attention with rotary position encoding."""
|
265 |
+
|
266 |
+
def __init__(
|
267 |
+
self,
|
268 |
+
*args,
|
269 |
+
rope_theta=10000.0,
|
270 |
+
# whether to repeat q rope to match k length
|
271 |
+
# this is needed for cross-attention to memories
|
272 |
+
rope_k_repeat=False,
|
273 |
+
feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution
|
274 |
+
**kwargs,
|
275 |
+
):
|
276 |
+
super().__init__(*args, **kwargs)
|
277 |
+
|
278 |
+
self.compute_cis = partial(
|
279 |
+
compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta
|
280 |
+
)
|
281 |
+
freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
|
282 |
+
self.freqs_cis = freqs_cis
|
283 |
+
self.rope_k_repeat = rope_k_repeat
|
284 |
+
|
285 |
+
def forward(
|
286 |
+
self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0
|
287 |
+
) -> Tensor:
|
288 |
+
# Input projections
|
289 |
+
q = self.q_proj(q)
|
290 |
+
k = self.k_proj(k)
|
291 |
+
v = self.v_proj(v)
|
292 |
+
|
293 |
+
# Separate into heads
|
294 |
+
q = self._separate_heads(q, self.num_heads)
|
295 |
+
k = self._separate_heads(k, self.num_heads)
|
296 |
+
v = self._separate_heads(v, self.num_heads)
|
297 |
+
|
298 |
+
# Apply rotary position encoding
|
299 |
+
w = h = math.sqrt(q.shape[-2])
|
300 |
+
self.freqs_cis = self.freqs_cis.to(q.device)
|
301 |
+
if self.freqs_cis.shape[0] != q.shape[-2]:
|
302 |
+
self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
|
303 |
+
if q.shape[-2] != k.shape[-2]:
|
304 |
+
assert self.rope_k_repeat
|
305 |
+
|
306 |
+
num_k_rope = k.size(-2) - num_k_exclude_rope
|
307 |
+
q, k[:, :, :num_k_rope] = apply_rotary_enc(
|
308 |
+
q,
|
309 |
+
k[:, :, :num_k_rope],
|
310 |
+
freqs_cis=self.freqs_cis,
|
311 |
+
repeat_freqs_k=self.rope_k_repeat,
|
312 |
+
)
|
313 |
+
|
314 |
+
dropout_p = self.dropout_p if self.training else 0.0
|
315 |
+
# Attention
|
316 |
+
with torch.backends.cuda.sdp_kernel(
|
317 |
+
enable_flash=USE_FLASH_ATTN,
|
318 |
+
# if Flash attention kernel is off, then math kernel needs to be enabled
|
319 |
+
enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
|
320 |
+
enable_mem_efficient=OLD_GPU,
|
321 |
+
):
|
322 |
+
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
|
323 |
+
|
324 |
+
out = self._recombine_heads(out)
|
325 |
+
out = self.out_proj(out)
|
326 |
+
|
327 |
+
return out
|
sam2/modeling/sam2_base.py
ADDED
@@ -0,0 +1,829 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
|
4 |
+
# This source code is licensed under the license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.distributed
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
from torch.nn.init import trunc_normal_
|
12 |
+
|
13 |
+
from sam2.modeling.sam.mask_decoder import MaskDecoder
|
14 |
+
from sam2.modeling.sam.prompt_encoder import PromptEncoder
|
15 |
+
from sam2.modeling.sam.transformer import TwoWayTransformer
|
16 |
+
from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames
|
17 |
+
|
18 |
+
# a large negative value as a placeholder score for missing objects
|
19 |
+
NO_OBJ_SCORE = -1024.0
|
20 |
+
|
21 |
+
|
22 |
+
class SAM2Base(torch.nn.Module):
|
23 |
+
def __init__(
|
24 |
+
self,
|
25 |
+
image_encoder,
|
26 |
+
memory_attention,
|
27 |
+
memory_encoder,
|
28 |
+
num_maskmem=7, # default 1 input frame + 6 previous frames
|
29 |
+
image_size=512,
|
30 |
+
backbone_stride=16, # stride of the image backbone output
|
31 |
+
sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
|
32 |
+
sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
|
33 |
+
# During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
|
34 |
+
binarize_mask_from_pts_for_mem_enc=False,
|
35 |
+
use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
|
36 |
+
# The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
|
37 |
+
# we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
|
38 |
+
# a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
|
39 |
+
max_cond_frames_in_attn=-1,
|
40 |
+
# on the first frame, whether to directly add the no-memory embedding to the image feature
|
41 |
+
# (instead of using the transformer encoder)
|
42 |
+
directly_add_no_mem_embed=False,
|
43 |
+
# whether to use high-resolution feature maps in the SAM mask decoder
|
44 |
+
use_high_res_features_in_sam=False,
|
45 |
+
# whether to output multiple (3) masks for the first click on initial conditioning frames
|
46 |
+
multimask_output_in_sam=False,
|
47 |
+
# the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
|
48 |
+
# default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
|
49 |
+
multimask_min_pt_num=1,
|
50 |
+
multimask_max_pt_num=1,
|
51 |
+
# whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
|
52 |
+
multimask_output_for_tracking=False,
|
53 |
+
# Whether to use multimask tokens for obj ptr; Only relevant when both
|
54 |
+
# use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
|
55 |
+
use_multimask_token_for_obj_ptr: bool = False,
|
56 |
+
# whether to use sigmoid to restrict ious prediction to [0-1]
|
57 |
+
iou_prediction_use_sigmoid=False,
|
58 |
+
# The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
|
59 |
+
# For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
|
60 |
+
# (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
|
61 |
+
memory_temporal_stride_for_eval=1,
|
62 |
+
# if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
|
63 |
+
# if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
|
64 |
+
add_all_frames_to_correct_as_cond=False,
|
65 |
+
# whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
|
66 |
+
non_overlap_masks_for_mem_enc=False,
|
67 |
+
# whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
|
68 |
+
use_obj_ptrs_in_encoder=False,
|
69 |
+
# the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
|
70 |
+
max_obj_ptrs_in_encoder=16,
|
71 |
+
# whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
|
72 |
+
add_tpos_enc_to_obj_ptrs=True,
|
73 |
+
# whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
|
74 |
+
# with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
|
75 |
+
proj_tpos_enc_in_obj_ptrs=False,
|
76 |
+
# whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
|
77 |
+
# (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
|
78 |
+
only_obj_ptrs_in_the_past_for_eval=False,
|
79 |
+
# Whether to predict if there is an object in the frame
|
80 |
+
pred_obj_scores: bool = False,
|
81 |
+
# Whether to use an MLP to predict object scores
|
82 |
+
pred_obj_scores_mlp: bool = False,
|
83 |
+
# Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
|
84 |
+
# Whether to have a fixed no obj pointer when there is no object present
|
85 |
+
# or to use it as an additive embedding with obj_ptr produced by decoder
|
86 |
+
fixed_no_obj_ptr: bool = False,
|
87 |
+
# Soft no object, i.e. mix in no_obj_ptr softly,
|
88 |
+
# hope to make recovery easier if there is a mistake and mitigate accumulation of errors
|
89 |
+
soft_no_obj_ptr: bool = False,
|
90 |
+
use_mlp_for_obj_ptr_proj: bool = False,
|
91 |
+
# extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
|
92 |
+
sam_mask_decoder_extra_args=None,
|
93 |
+
compile_image_encoder: bool = False,
|
94 |
+
):
|
95 |
+
super().__init__()
|
96 |
+
|
97 |
+
# Part 1: the image backbone
|
98 |
+
self.image_encoder = image_encoder
|
99 |
+
# Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
|
100 |
+
self.use_high_res_features_in_sam = use_high_res_features_in_sam
|
101 |
+
self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
|
102 |
+
self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
|
103 |
+
self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
|
104 |
+
if use_obj_ptrs_in_encoder:
|
105 |
+
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
|
106 |
+
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
|
107 |
+
# so that it can be fed into the SAM mask decoder to generate a pointer.
|
108 |
+
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
|
109 |
+
self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
|
110 |
+
if proj_tpos_enc_in_obj_ptrs:
|
111 |
+
assert add_tpos_enc_to_obj_ptrs # these options need to be used together
|
112 |
+
self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
|
113 |
+
self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
|
114 |
+
|
115 |
+
# Part 2: memory attention to condition current frame's visual features
|
116 |
+
# with memories (and obj ptrs) from past frames
|
117 |
+
self.memory_attention = memory_attention
|
118 |
+
self.hidden_dim = memory_attention.d_model
|
119 |
+
|
120 |
+
# Part 3: memory encoder for the previous frame's outputs
|
121 |
+
self.memory_encoder = memory_encoder
|
122 |
+
self.mem_dim = self.hidden_dim
|
123 |
+
if hasattr(self.memory_encoder, "out_proj") and hasattr(
|
124 |
+
self.memory_encoder.out_proj, "weight"
|
125 |
+
):
|
126 |
+
# if there is compression of memories along channel dim
|
127 |
+
self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
|
128 |
+
self.num_maskmem = num_maskmem # Number of memories accessible
|
129 |
+
# Temporal encoding of the memories
|
130 |
+
self.maskmem_tpos_enc = torch.nn.Parameter(
|
131 |
+
torch.zeros(num_maskmem, 1, 1, self.mem_dim)
|
132 |
+
)
|
133 |
+
trunc_normal_(self.maskmem_tpos_enc, std=0.02)
|
134 |
+
# a single token to indicate no memory embedding from previous frames
|
135 |
+
self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
|
136 |
+
self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
|
137 |
+
trunc_normal_(self.no_mem_embed, std=0.02)
|
138 |
+
trunc_normal_(self.no_mem_pos_enc, std=0.02)
|
139 |
+
self.directly_add_no_mem_embed = directly_add_no_mem_embed
|
140 |
+
# Apply sigmoid to the output raw mask logits (to turn them from
|
141 |
+
# range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
|
142 |
+
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
|
143 |
+
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
|
144 |
+
self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
|
145 |
+
self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
|
146 |
+
self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
|
147 |
+
# On frames with mask input, whether to directly output the input mask without
|
148 |
+
# using a SAM prompt encoder + mask decoder
|
149 |
+
self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
|
150 |
+
self.multimask_output_in_sam = multimask_output_in_sam
|
151 |
+
self.multimask_min_pt_num = multimask_min_pt_num
|
152 |
+
self.multimask_max_pt_num = multimask_max_pt_num
|
153 |
+
self.multimask_output_for_tracking = multimask_output_for_tracking
|
154 |
+
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
|
155 |
+
self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
|
156 |
+
|
157 |
+
# Part 4: SAM-style prompt encoder (for both mask and point inputs)
|
158 |
+
# and SAM-style mask decoder for the final mask output
|
159 |
+
self.image_size = image_size
|
160 |
+
self.backbone_stride = backbone_stride
|
161 |
+
self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
|
162 |
+
self.pred_obj_scores = pred_obj_scores
|
163 |
+
self.pred_obj_scores_mlp = pred_obj_scores_mlp
|
164 |
+
self.fixed_no_obj_ptr = fixed_no_obj_ptr
|
165 |
+
self.soft_no_obj_ptr = soft_no_obj_ptr
|
166 |
+
if self.fixed_no_obj_ptr:
|
167 |
+
assert self.pred_obj_scores
|
168 |
+
assert self.use_obj_ptrs_in_encoder
|
169 |
+
if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
|
170 |
+
self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
|
171 |
+
trunc_normal_(self.no_obj_ptr, std=0.02)
|
172 |
+
self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
|
173 |
+
|
174 |
+
self._build_sam_heads()
|
175 |
+
self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
|
176 |
+
self.max_cond_frames_in_attn = max_cond_frames_in_attn
|
177 |
+
|
178 |
+
# Model compilation
|
179 |
+
if compile_image_encoder:
|
180 |
+
# Compile the forward function (not the full module) to allow loading checkpoints.
|
181 |
+
print(
|
182 |
+
"Image encoder compilation is enabled. First forward pass will be slow."
|
183 |
+
)
|
184 |
+
self.image_encoder.forward = torch.compile(
|
185 |
+
self.image_encoder.forward,
|
186 |
+
mode="max-autotune",
|
187 |
+
fullgraph=True,
|
188 |
+
dynamic=False,
|
189 |
+
)
|
190 |
+
|
191 |
+
@property
|
192 |
+
def device(self):
|
193 |
+
return next(self.parameters()).device
|
194 |
+
|
195 |
+
def forward(self, *args, **kwargs):
|
196 |
+
raise NotImplementedError(
|
197 |
+
"Please use the corresponding methods in SAM2VideoPredictor for inference."
|
198 |
+
"See notebooks/video_predictor_example.ipynb for an example."
|
199 |
+
)
|
200 |
+
|
201 |
+
def _build_sam_heads(self):
|
202 |
+
"""Build SAM-style prompt encoder and mask decoder."""
|
203 |
+
self.sam_prompt_embed_dim = self.hidden_dim
|
204 |
+
self.sam_image_embedding_size = self.image_size // self.backbone_stride
|
205 |
+
|
206 |
+
# build PromptEncoder and MaskDecoder from SAM
|
207 |
+
# (their hyperparameters like `mask_in_chans=16` are from SAM code)
|
208 |
+
self.sam_prompt_encoder = PromptEncoder(
|
209 |
+
embed_dim=self.sam_prompt_embed_dim,
|
210 |
+
image_embedding_size=(
|
211 |
+
self.sam_image_embedding_size,
|
212 |
+
self.sam_image_embedding_size,
|
213 |
+
),
|
214 |
+
input_image_size=(self.image_size, self.image_size),
|
215 |
+
mask_in_chans=16,
|
216 |
+
)
|
217 |
+
self.sam_mask_decoder = MaskDecoder(
|
218 |
+
num_multimask_outputs=3,
|
219 |
+
transformer=TwoWayTransformer(
|
220 |
+
depth=2,
|
221 |
+
embedding_dim=self.sam_prompt_embed_dim,
|
222 |
+
mlp_dim=2048,
|
223 |
+
num_heads=8,
|
224 |
+
),
|
225 |
+
transformer_dim=self.sam_prompt_embed_dim,
|
226 |
+
iou_head_depth=3,
|
227 |
+
iou_head_hidden_dim=256,
|
228 |
+
use_high_res_features=self.use_high_res_features_in_sam,
|
229 |
+
iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
|
230 |
+
pred_obj_scores=self.pred_obj_scores,
|
231 |
+
pred_obj_scores_mlp=self.pred_obj_scores_mlp,
|
232 |
+
use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
|
233 |
+
**(self.sam_mask_decoder_extra_args or {}),
|
234 |
+
)
|
235 |
+
if self.use_obj_ptrs_in_encoder:
|
236 |
+
# a linear projection on SAM output tokens to turn them into object pointers
|
237 |
+
self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
|
238 |
+
if self.use_mlp_for_obj_ptr_proj:
|
239 |
+
self.obj_ptr_proj = MLP(
|
240 |
+
self.hidden_dim, self.hidden_dim, self.hidden_dim, 3
|
241 |
+
)
|
242 |
+
else:
|
243 |
+
self.obj_ptr_proj = torch.nn.Identity()
|
244 |
+
if self.proj_tpos_enc_in_obj_ptrs:
|
245 |
+
# a linear projection on temporal positional encoding in object pointers to
|
246 |
+
# avoid potential interference with spatial positional encoding
|
247 |
+
self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
|
248 |
+
else:
|
249 |
+
self.obj_ptr_tpos_proj = torch.nn.Identity()
|
250 |
+
|
251 |
+
def _forward_sam_heads(
|
252 |
+
self,
|
253 |
+
backbone_features,
|
254 |
+
point_inputs=None,
|
255 |
+
mask_inputs=None,
|
256 |
+
high_res_features=None,
|
257 |
+
multimask_output=False,
|
258 |
+
):
|
259 |
+
"""
|
260 |
+
Forward SAM prompt encoders and mask heads.
|
261 |
+
|
262 |
+
Inputs:
|
263 |
+
- backbone_features: image features of [B, C, H, W] shape
|
264 |
+
- point_inputs: a dictionary with "point_coords" and "point_labels", where
|
265 |
+
1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
|
266 |
+
absolute pixel-unit coordinate in (x, y) format of the P input points
|
267 |
+
2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
|
268 |
+
positive clicks, 0 means negative clicks, and -1 means padding
|
269 |
+
- mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
|
270 |
+
same spatial size as the image.
|
271 |
+
- high_res_features: either 1) None or 2) or a list of length 2 containing
|
272 |
+
two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
|
273 |
+
which will be used as high-resolution feature maps for SAM decoder.
|
274 |
+
- multimask_output: if it's True, we output 3 candidate masks and their 3
|
275 |
+
corresponding IoU estimates, and if it's False, we output only 1 mask and
|
276 |
+
its corresponding IoU estimate.
|
277 |
+
|
278 |
+
Outputs:
|
279 |
+
- low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
|
280 |
+
`multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
|
281 |
+
output mask logits (before sigmoid) for the low-resolution masks, with 4x
|
282 |
+
the resolution (1/4 stride) of the input backbone_features.
|
283 |
+
- high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
|
284 |
+
if `multimask_output=True` and M = 1 if `multimask_output=False`),
|
285 |
+
upsampled from the low-resolution masks, with shape size as the image
|
286 |
+
(stride is 1 pixel).
|
287 |
+
- ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
|
288 |
+
if `multimask_output=False`), the estimated IoU of each output mask.
|
289 |
+
- low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
|
290 |
+
If `multimask_output=True`, it's the mask with the highest IoU estimate.
|
291 |
+
If `multimask_output=False`, it's the same as `low_res_multimasks`.
|
292 |
+
- high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
|
293 |
+
If `multimask_output=True`, it's the mask with the highest IoU estimate.
|
294 |
+
If `multimask_output=False`, it's the same as `high_res_multimasks`.
|
295 |
+
- obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
|
296 |
+
based on the output token from the SAM mask decoder.
|
297 |
+
"""
|
298 |
+
B = backbone_features.size(0)
|
299 |
+
device = backbone_features.device
|
300 |
+
assert backbone_features.size(1) == self.sam_prompt_embed_dim
|
301 |
+
assert backbone_features.size(2) == self.sam_image_embedding_size
|
302 |
+
assert backbone_features.size(3) == self.sam_image_embedding_size
|
303 |
+
|
304 |
+
# a) Handle point prompts
|
305 |
+
if point_inputs is not None:
|
306 |
+
sam_point_coords = point_inputs["point_coords"]
|
307 |
+
sam_point_labels = point_inputs["point_labels"]
|
308 |
+
assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
|
309 |
+
else:
|
310 |
+
# If no points are provide, pad with an empty point (with label -1)
|
311 |
+
sam_point_coords = torch.zeros(B, 1, 2, device=device)
|
312 |
+
sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
|
313 |
+
|
314 |
+
# b) Handle mask prompts
|
315 |
+
if mask_inputs is not None:
|
316 |
+
# If mask_inputs is provided, downsize it into low-res mask input if needed
|
317 |
+
# and feed it as a dense mask prompt into the SAM mask encoder
|
318 |
+
assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
|
319 |
+
if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
|
320 |
+
sam_mask_prompt = F.interpolate(
|
321 |
+
mask_inputs.float(),
|
322 |
+
size=self.sam_prompt_encoder.mask_input_size,
|
323 |
+
align_corners=False,
|
324 |
+
mode="bilinear",
|
325 |
+
antialias=True, # use antialias for downsampling
|
326 |
+
)
|
327 |
+
else:
|
328 |
+
sam_mask_prompt = mask_inputs
|
329 |
+
else:
|
330 |
+
# Otherwise, simply feed None (and SAM's prompt encoder will add
|
331 |
+
# a learned `no_mask_embed` to indicate no mask input in this case).
|
332 |
+
sam_mask_prompt = None
|
333 |
+
|
334 |
+
sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
|
335 |
+
points=(sam_point_coords, sam_point_labels),
|
336 |
+
boxes=None,
|
337 |
+
masks=sam_mask_prompt,
|
338 |
+
)
|
339 |
+
(
|
340 |
+
low_res_multimasks,
|
341 |
+
ious,
|
342 |
+
sam_output_tokens,
|
343 |
+
object_score_logits,
|
344 |
+
) = self.sam_mask_decoder(
|
345 |
+
image_embeddings=backbone_features,
|
346 |
+
image_pe=self.sam_prompt_encoder.get_dense_pe(),
|
347 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
348 |
+
dense_prompt_embeddings=dense_embeddings,
|
349 |
+
multimask_output=multimask_output,
|
350 |
+
repeat_image=False, # the image is already batched
|
351 |
+
high_res_features=high_res_features,
|
352 |
+
)
|
353 |
+
if self.pred_obj_scores:
|
354 |
+
is_obj_appearing = object_score_logits > 0
|
355 |
+
|
356 |
+
# Mask used for spatial memories is always a *hard* choice between obj and no obj,
|
357 |
+
# consistent with the actual mask prediction
|
358 |
+
low_res_multimasks = torch.where(
|
359 |
+
is_obj_appearing[:, None, None],
|
360 |
+
low_res_multimasks,
|
361 |
+
NO_OBJ_SCORE,
|
362 |
+
)
|
363 |
+
|
364 |
+
# convert masks from possibly bfloat16 (or float16) to float32
|
365 |
+
# (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
|
366 |
+
low_res_multimasks = low_res_multimasks.float()
|
367 |
+
high_res_multimasks = F.interpolate(
|
368 |
+
low_res_multimasks,
|
369 |
+
size=(self.image_size, self.image_size),
|
370 |
+
mode="bilinear",
|
371 |
+
align_corners=False,
|
372 |
+
)
|
373 |
+
|
374 |
+
sam_output_token = sam_output_tokens[:, 0]
|
375 |
+
if multimask_output:
|
376 |
+
# take the best mask prediction (with the highest IoU estimation)
|
377 |
+
best_iou_inds = torch.argmax(ious, dim=-1)
|
378 |
+
batch_inds = torch.arange(B, device=device)
|
379 |
+
low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
|
380 |
+
high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
|
381 |
+
if sam_output_tokens.size(1) > 1:
|
382 |
+
sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
|
383 |
+
else:
|
384 |
+
low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
|
385 |
+
|
386 |
+
# Extract object pointer from the SAM output token (with occlusion handling)
|
387 |
+
obj_ptr = self.obj_ptr_proj(sam_output_token)
|
388 |
+
if self.pred_obj_scores:
|
389 |
+
# Allow *soft* no obj ptr, unlike for masks
|
390 |
+
if self.soft_no_obj_ptr:
|
391 |
+
# Only hard possible with gt
|
392 |
+
assert not self.teacher_force_obj_scores_for_mem
|
393 |
+
lambda_is_obj_appearing = object_score_logits.sigmoid()
|
394 |
+
else:
|
395 |
+
lambda_is_obj_appearing = is_obj_appearing.float()
|
396 |
+
|
397 |
+
if self.fixed_no_obj_ptr:
|
398 |
+
obj_ptr = lambda_is_obj_appearing * obj_ptr
|
399 |
+
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
|
400 |
+
|
401 |
+
return (
|
402 |
+
low_res_multimasks,
|
403 |
+
high_res_multimasks,
|
404 |
+
ious,
|
405 |
+
low_res_masks,
|
406 |
+
high_res_masks,
|
407 |
+
obj_ptr,
|
408 |
+
object_score_logits,
|
409 |
+
)
|
410 |
+
|
411 |
+
def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
|
412 |
+
"""
|
413 |
+
Directly turn binary `mask_inputs` into a output mask logits without using SAM.
|
414 |
+
(same input and output shapes as in _forward_sam_heads above).
|
415 |
+
"""
|
416 |
+
# Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
|
417 |
+
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
|
418 |
+
mask_inputs_float = mask_inputs.float()
|
419 |
+
high_res_masks = mask_inputs_float * out_scale + out_bias
|
420 |
+
low_res_masks = F.interpolate(
|
421 |
+
high_res_masks,
|
422 |
+
size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
|
423 |
+
align_corners=False,
|
424 |
+
mode="bilinear",
|
425 |
+
antialias=True, # use antialias for downsampling
|
426 |
+
)
|
427 |
+
# a dummy IoU prediction of all 1's under mask input
|
428 |
+
ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
|
429 |
+
if not self.use_obj_ptrs_in_encoder:
|
430 |
+
# all zeros as a dummy object pointer (of shape [B, C])
|
431 |
+
obj_ptr = torch.zeros(
|
432 |
+
mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device
|
433 |
+
)
|
434 |
+
else:
|
435 |
+
# produce an object pointer using the SAM decoder from the mask input
|
436 |
+
_, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
|
437 |
+
backbone_features=backbone_features,
|
438 |
+
mask_inputs=self.mask_downsample(mask_inputs_float),
|
439 |
+
high_res_features=high_res_features,
|
440 |
+
)
|
441 |
+
# In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
|
442 |
+
# Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
|
443 |
+
# on the object_scores from the SAM decoder.
|
444 |
+
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
|
445 |
+
is_obj_appearing = is_obj_appearing[..., None]
|
446 |
+
lambda_is_obj_appearing = is_obj_appearing.float()
|
447 |
+
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
|
448 |
+
if self.pred_obj_scores:
|
449 |
+
if self.fixed_no_obj_ptr:
|
450 |
+
obj_ptr = lambda_is_obj_appearing * obj_ptr
|
451 |
+
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
|
452 |
+
|
453 |
+
return (
|
454 |
+
low_res_masks,
|
455 |
+
high_res_masks,
|
456 |
+
ious,
|
457 |
+
low_res_masks,
|
458 |
+
high_res_masks,
|
459 |
+
obj_ptr,
|
460 |
+
object_score_logits,
|
461 |
+
)
|
462 |
+
|
463 |
+
def forward_image(self, img_batch: torch.Tensor):
|
464 |
+
"""Get the image feature on the input batch."""
|
465 |
+
backbone_out = self.image_encoder(img_batch)
|
466 |
+
if self.use_high_res_features_in_sam:
|
467 |
+
# precompute projected level 0 and level 1 features in SAM decoder
|
468 |
+
# to avoid running it again on every SAM click
|
469 |
+
backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(
|
470 |
+
backbone_out["backbone_fpn"][0]
|
471 |
+
)
|
472 |
+
backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(
|
473 |
+
backbone_out["backbone_fpn"][1]
|
474 |
+
)
|
475 |
+
return backbone_out
|
476 |
+
|
477 |
+
def _prepare_backbone_features(self, backbone_out):
|
478 |
+
"""Prepare and flatten visual features."""
|
479 |
+
backbone_out = backbone_out.copy()
|
480 |
+
assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
|
481 |
+
assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
|
482 |
+
|
483 |
+
feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
|
484 |
+
vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
|
485 |
+
|
486 |
+
feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
|
487 |
+
# flatten NxCxHxW to HWxNxC
|
488 |
+
vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
|
489 |
+
vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
|
490 |
+
|
491 |
+
return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
|
492 |
+
|
493 |
+
def _prepare_memory_conditioned_features(
|
494 |
+
self,
|
495 |
+
frame_idx,
|
496 |
+
is_init_cond_frame,
|
497 |
+
current_vision_feats,
|
498 |
+
current_vision_pos_embeds,
|
499 |
+
feat_sizes,
|
500 |
+
output_dict,
|
501 |
+
num_frames,
|
502 |
+
track_in_reverse=False, # tracking in reverse time order (for demo usage)
|
503 |
+
):
|
504 |
+
"""Fuse the current frame's visual feature map with previous memory."""
|
505 |
+
B = current_vision_feats[-1].size(1) # batch size on this frame
|
506 |
+
C = self.hidden_dim
|
507 |
+
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
|
508 |
+
device = current_vision_feats[-1].device
|
509 |
+
# The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
|
510 |
+
# In this case, we skip the fusion with any memory.
|
511 |
+
if self.num_maskmem == 0: # Disable memory and skip fusion
|
512 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
|
513 |
+
return pix_feat
|
514 |
+
|
515 |
+
num_obj_ptr_tokens = 0
|
516 |
+
# Step 1: condition the visual features of the current frame on previous memories
|
517 |
+
if not is_init_cond_frame:
|
518 |
+
# Retrieve the memories encoded with the maskmem backbone
|
519 |
+
to_cat_memory, to_cat_memory_pos_embed = [], []
|
520 |
+
# Add conditioning frames's output first (all cond frames have t_pos=0 for
|
521 |
+
# when getting temporal positional embedding below)
|
522 |
+
assert len(output_dict["cond_frame_outputs"]) > 0
|
523 |
+
# Select a maximum number of temporally closest cond frames for cross attention
|
524 |
+
cond_outputs = output_dict["cond_frame_outputs"]
|
525 |
+
selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
|
526 |
+
frame_idx, cond_outputs, self.max_cond_frames_in_attn
|
527 |
+
)
|
528 |
+
t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
|
529 |
+
# Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
|
530 |
+
# the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
|
531 |
+
# We also allow taking the memory frame non-consecutively (with r>1), in which case
|
532 |
+
# we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame.
|
533 |
+
r = self.memory_temporal_stride_for_eval
|
534 |
+
for t_pos in range(1, self.num_maskmem):
|
535 |
+
t_rel = self.num_maskmem - t_pos # how many frames before current frame
|
536 |
+
if t_rel == 1:
|
537 |
+
# for t_rel == 1, we take the last frame (regardless of r)
|
538 |
+
if not track_in_reverse:
|
539 |
+
# the frame immediately before this frame (i.e. frame_idx - 1)
|
540 |
+
prev_frame_idx = frame_idx - t_rel
|
541 |
+
else:
|
542 |
+
# the frame immediately after this frame (i.e. frame_idx + 1)
|
543 |
+
prev_frame_idx = frame_idx + t_rel
|
544 |
+
else:
|
545 |
+
# for t_rel >= 2, we take the memory frame from every r-th frames
|
546 |
+
if not track_in_reverse:
|
547 |
+
# first find the nearest frame among every r-th frames before this frame
|
548 |
+
# for r=1, this would be (frame_idx - 2)
|
549 |
+
prev_frame_idx = ((frame_idx - 2) // r) * r
|
550 |
+
# then seek further among every r-th frames
|
551 |
+
prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
|
552 |
+
else:
|
553 |
+
# first find the nearest frame among every r-th frames after this frame
|
554 |
+
# for r=1, this would be (frame_idx + 2)
|
555 |
+
prev_frame_idx = -(-(frame_idx + 2) // r) * r
|
556 |
+
# then seek further among every r-th frames
|
557 |
+
prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
|
558 |
+
out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
|
559 |
+
if out is None:
|
560 |
+
# If an unselected conditioning frame is among the last (self.num_maskmem - 1)
|
561 |
+
# frames, we still attend to it as if it's a non-conditioning frame.
|
562 |
+
out = unselected_cond_outputs.get(prev_frame_idx, None)
|
563 |
+
t_pos_and_prevs.append((t_pos, out))
|
564 |
+
|
565 |
+
for t_pos, prev in t_pos_and_prevs:
|
566 |
+
if prev is None:
|
567 |
+
continue # skip padding frames
|
568 |
+
# "maskmem_features" might have been offloaded to CPU in demo use cases,
|
569 |
+
# so we load it back to GPU (it's a no-op if it's already on GPU).
|
570 |
+
feats = prev["maskmem_features"].cuda(non_blocking=True)
|
571 |
+
to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
|
572 |
+
# Spatial positional encoding (it might have been offloaded to CPU in eval)
|
573 |
+
maskmem_enc = prev["maskmem_pos_enc"][-1].cuda()
|
574 |
+
maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
|
575 |
+
# Temporal positional encoding
|
576 |
+
maskmem_enc = (
|
577 |
+
maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
|
578 |
+
)
|
579 |
+
to_cat_memory_pos_embed.append(maskmem_enc)
|
580 |
+
|
581 |
+
# Construct the list of past object pointers
|
582 |
+
if self.use_obj_ptrs_in_encoder:
|
583 |
+
max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
|
584 |
+
# First add those object pointers from selected conditioning frames
|
585 |
+
# (optionally, only include object pointers in the past during evaluation)
|
586 |
+
if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
|
587 |
+
ptr_cond_outputs = {
|
588 |
+
t: out
|
589 |
+
for t, out in selected_cond_outputs.items()
|
590 |
+
if (t >= frame_idx if track_in_reverse else t <= frame_idx)
|
591 |
+
}
|
592 |
+
else:
|
593 |
+
ptr_cond_outputs = selected_cond_outputs
|
594 |
+
pos_and_ptrs = [
|
595 |
+
# Temporal pos encoding contains how far away each pointer is from current frame
|
596 |
+
(abs(frame_idx - t), out["obj_ptr"])
|
597 |
+
for t, out in ptr_cond_outputs.items()
|
598 |
+
]
|
599 |
+
# Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
|
600 |
+
for t_diff in range(1, max_obj_ptrs_in_encoder):
|
601 |
+
t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
|
602 |
+
if t < 0 or (num_frames is not None and t >= num_frames):
|
603 |
+
break
|
604 |
+
out = output_dict["non_cond_frame_outputs"].get(
|
605 |
+
t, unselected_cond_outputs.get(t, None)
|
606 |
+
)
|
607 |
+
if out is not None:
|
608 |
+
pos_and_ptrs.append((t_diff, out["obj_ptr"]))
|
609 |
+
# If we have at least one object pointer, add them to the across attention
|
610 |
+
if len(pos_and_ptrs) > 0:
|
611 |
+
pos_list, ptrs_list = zip(*pos_and_ptrs)
|
612 |
+
# stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
|
613 |
+
obj_ptrs = torch.stack(ptrs_list, dim=0)
|
614 |
+
# a temporal positional embedding based on how far each object pointer is from
|
615 |
+
# the current frame (sine embedding normalized by the max pointer num).
|
616 |
+
if self.add_tpos_enc_to_obj_ptrs:
|
617 |
+
t_diff_max = max_obj_ptrs_in_encoder - 1
|
618 |
+
tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
|
619 |
+
obj_pos = torch.tensor(pos_list, device=device)
|
620 |
+
obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
|
621 |
+
obj_pos = self.obj_ptr_tpos_proj(obj_pos)
|
622 |
+
obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
|
623 |
+
else:
|
624 |
+
obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
|
625 |
+
if self.mem_dim < C:
|
626 |
+
# split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
|
627 |
+
obj_ptrs = obj_ptrs.reshape(
|
628 |
+
-1, B, C // self.mem_dim, self.mem_dim
|
629 |
+
)
|
630 |
+
obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
|
631 |
+
obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
|
632 |
+
to_cat_memory.append(obj_ptrs)
|
633 |
+
to_cat_memory_pos_embed.append(obj_pos)
|
634 |
+
num_obj_ptr_tokens = obj_ptrs.shape[0]
|
635 |
+
else:
|
636 |
+
num_obj_ptr_tokens = 0
|
637 |
+
else:
|
638 |
+
# for initial conditioning frames, encode them without using any previous memory
|
639 |
+
if self.directly_add_no_mem_embed:
|
640 |
+
# directly add no-mem embedding (instead of using the transformer encoder)
|
641 |
+
pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
|
642 |
+
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
|
643 |
+
return pix_feat_with_mem
|
644 |
+
|
645 |
+
# Use a dummy token on the first frame (to avoid emtpy memory input to tranformer encoder)
|
646 |
+
to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
|
647 |
+
to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
|
648 |
+
|
649 |
+
# Step 2: Concatenate the memories and forward through the transformer encoder
|
650 |
+
memory = torch.cat(to_cat_memory, dim=0)
|
651 |
+
memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
|
652 |
+
|
653 |
+
pix_feat_with_mem = self.memory_attention(
|
654 |
+
curr=current_vision_feats,
|
655 |
+
curr_pos=current_vision_pos_embeds,
|
656 |
+
memory=memory,
|
657 |
+
memory_pos=memory_pos_embed,
|
658 |
+
num_obj_ptr_tokens=num_obj_ptr_tokens,
|
659 |
+
)
|
660 |
+
# reshape the output (HW)BC => BCHW
|
661 |
+
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
|
662 |
+
return pix_feat_with_mem
|
663 |
+
|
664 |
+
def _encode_new_memory(
|
665 |
+
self,
|
666 |
+
current_vision_feats,
|
667 |
+
feat_sizes,
|
668 |
+
pred_masks_high_res,
|
669 |
+
is_mask_from_pts,
|
670 |
+
):
|
671 |
+
"""Encode the current image and its prediction into a memory feature."""
|
672 |
+
B = current_vision_feats[-1].size(1) # batch size on this frame
|
673 |
+
C = self.hidden_dim
|
674 |
+
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
|
675 |
+
# top-level feature, (HW)BC => BCHW
|
676 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
|
677 |
+
if self.non_overlap_masks_for_mem_enc and not self.training:
|
678 |
+
# optionally, apply non-overlapping constraints to the masks (it's applied
|
679 |
+
# in the batch dimension and should only be used during eval, where all
|
680 |
+
# the objects come from the same video under batch size 1).
|
681 |
+
pred_masks_high_res = self._apply_non_overlapping_constraints(
|
682 |
+
pred_masks_high_res
|
683 |
+
)
|
684 |
+
# scale the raw mask logits with a temperature before applying sigmoid
|
685 |
+
binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
|
686 |
+
if binarize and not self.training:
|
687 |
+
mask_for_mem = (pred_masks_high_res > 0).float()
|
688 |
+
else:
|
689 |
+
# apply sigmoid on the raw mask logits to turn them into range (0, 1)
|
690 |
+
mask_for_mem = torch.sigmoid(pred_masks_high_res)
|
691 |
+
# apply scale and bias terms to the sigmoid probabilities
|
692 |
+
if self.sigmoid_scale_for_mem_enc != 1.0:
|
693 |
+
mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
|
694 |
+
if self.sigmoid_bias_for_mem_enc != 0.0:
|
695 |
+
mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
|
696 |
+
maskmem_out = self.memory_encoder(
|
697 |
+
pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied
|
698 |
+
)
|
699 |
+
maskmem_features = maskmem_out["vision_features"]
|
700 |
+
maskmem_pos_enc = maskmem_out["vision_pos_enc"]
|
701 |
+
|
702 |
+
return maskmem_features, maskmem_pos_enc
|
703 |
+
|
704 |
+
def track_step(
|
705 |
+
self,
|
706 |
+
frame_idx,
|
707 |
+
is_init_cond_frame,
|
708 |
+
current_vision_feats,
|
709 |
+
current_vision_pos_embeds,
|
710 |
+
feat_sizes,
|
711 |
+
point_inputs,
|
712 |
+
mask_inputs,
|
713 |
+
output_dict,
|
714 |
+
num_frames,
|
715 |
+
track_in_reverse=False, # tracking in reverse time order (for demo usage)
|
716 |
+
# Whether to run the memory encoder on the predicted masks. Sometimes we might want
|
717 |
+
# to skip the memory encoder with `run_mem_encoder=False`. For example,
|
718 |
+
# in demo we might call `track_step` multiple times for each user click,
|
719 |
+
# and only encode the memory when the user finalizes their clicks. And in ablation
|
720 |
+
# settings like SAM training on static images, we don't need the memory encoder.
|
721 |
+
run_mem_encoder=True,
|
722 |
+
# The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
|
723 |
+
prev_sam_mask_logits=None,
|
724 |
+
):
|
725 |
+
current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
|
726 |
+
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
|
727 |
+
if len(current_vision_feats) > 1:
|
728 |
+
high_res_features = [
|
729 |
+
x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
|
730 |
+
for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
|
731 |
+
]
|
732 |
+
else:
|
733 |
+
high_res_features = None
|
734 |
+
if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
|
735 |
+
# When use_mask_input_as_output_without_sam=True, we directly output the mask input
|
736 |
+
# (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
|
737 |
+
pix_feat = current_vision_feats[-1].permute(1, 2, 0)
|
738 |
+
pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
|
739 |
+
sam_outputs = self._use_mask_as_output(
|
740 |
+
pix_feat, high_res_features, mask_inputs
|
741 |
+
)
|
742 |
+
else:
|
743 |
+
# fused the visual feature with previous memory features in the memory bank
|
744 |
+
pix_feat_with_mem = self._prepare_memory_conditioned_features(
|
745 |
+
frame_idx=frame_idx,
|
746 |
+
is_init_cond_frame=is_init_cond_frame,
|
747 |
+
current_vision_feats=current_vision_feats[-1:],
|
748 |
+
current_vision_pos_embeds=current_vision_pos_embeds[-1:],
|
749 |
+
feat_sizes=feat_sizes[-1:],
|
750 |
+
output_dict=output_dict,
|
751 |
+
num_frames=num_frames,
|
752 |
+
track_in_reverse=track_in_reverse,
|
753 |
+
)
|
754 |
+
# apply SAM-style segmentation head
|
755 |
+
# here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
|
756 |
+
# e.g. in demo where such logits come from earlier interaction instead of correction sampling
|
757 |
+
# (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
|
758 |
+
if prev_sam_mask_logits is not None:
|
759 |
+
assert point_inputs is not None and mask_inputs is None
|
760 |
+
mask_inputs = prev_sam_mask_logits
|
761 |
+
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
|
762 |
+
sam_outputs = self._forward_sam_heads(
|
763 |
+
backbone_features=pix_feat_with_mem,
|
764 |
+
point_inputs=point_inputs,
|
765 |
+
mask_inputs=mask_inputs,
|
766 |
+
high_res_features=high_res_features,
|
767 |
+
multimask_output=multimask_output,
|
768 |
+
)
|
769 |
+
(
|
770 |
+
_,
|
771 |
+
_,
|
772 |
+
_,
|
773 |
+
low_res_masks,
|
774 |
+
high_res_masks,
|
775 |
+
obj_ptr,
|
776 |
+
_,
|
777 |
+
) = sam_outputs
|
778 |
+
|
779 |
+
current_out["pred_masks"] = low_res_masks
|
780 |
+
current_out["pred_masks_high_res"] = high_res_masks
|
781 |
+
current_out["obj_ptr"] = obj_ptr
|
782 |
+
|
783 |
+
# Finally run the memory encoder on the predicted mask to encode
|
784 |
+
# it into a new memory feature (that can be used in future frames)
|
785 |
+
if run_mem_encoder and self.num_maskmem > 0:
|
786 |
+
high_res_masks_for_mem_enc = high_res_masks
|
787 |
+
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
|
788 |
+
current_vision_feats=current_vision_feats,
|
789 |
+
feat_sizes=feat_sizes,
|
790 |
+
pred_masks_high_res=high_res_masks_for_mem_enc,
|
791 |
+
is_mask_from_pts=(point_inputs is not None),
|
792 |
+
)
|
793 |
+
current_out["maskmem_features"] = maskmem_features
|
794 |
+
current_out["maskmem_pos_enc"] = maskmem_pos_enc
|
795 |
+
else:
|
796 |
+
current_out["maskmem_features"] = None
|
797 |
+
current_out["maskmem_pos_enc"] = None
|
798 |
+
|
799 |
+
return current_out
|
800 |
+
|
801 |
+
def _use_multimask(self, is_init_cond_frame, point_inputs):
|
802 |
+
"""Whether to use multimask output in the SAM head."""
|
803 |
+
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
|
804 |
+
multimask_output = (
|
805 |
+
self.multimask_output_in_sam
|
806 |
+
and (is_init_cond_frame or self.multimask_output_for_tracking)
|
807 |
+
and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
|
808 |
+
)
|
809 |
+
return multimask_output
|
810 |
+
|
811 |
+
def _apply_non_overlapping_constraints(self, pred_masks):
|
812 |
+
"""
|
813 |
+
Apply non-overlapping constraints to the object scores in pred_masks. Here we
|
814 |
+
keep only the highest scoring object at each spatial location in pred_masks.
|
815 |
+
"""
|
816 |
+
batch_size = pred_masks.size(0)
|
817 |
+
if batch_size == 1:
|
818 |
+
return pred_masks
|
819 |
+
|
820 |
+
device = pred_masks.device
|
821 |
+
# "max_obj_inds": object index of the object with the highest score at each location
|
822 |
+
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
|
823 |
+
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
|
824 |
+
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
|
825 |
+
keep = max_obj_inds == batch_obj_inds
|
826 |
+
# suppress overlapping regions' scores below -10.0 so that the foreground regions
|
827 |
+
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
|
828 |
+
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
|
829 |
+
return pred_masks
|