Spaces:
Runtime error
Runtime error
sunshineatnoon
commited on
Commit
•
3a68fa5
1
Parent(s):
89c0378
editing
Browse files- 04-18/exp_args.txt +60 -0
- README.md +1 -1
- app.py +177 -62
- data/masks/124084_0_label.png +0 -0
- libs/__pycache__/__init__.cpython-37.pyc +0 -0
- libs/__pycache__/__init__.cpython-38.pyc +0 -0
- libs/__pycache__/flow_transforms.cpython-37.pyc +0 -0
- libs/__pycache__/flow_transforms.cpython-38.pyc +0 -0
- libs/__pycache__/nnutils.cpython-37.pyc +0 -0
- libs/__pycache__/nnutils.cpython-38.pyc +0 -0
- libs/__pycache__/options.cpython-37.pyc +0 -0
- libs/__pycache__/options.cpython-38.pyc +0 -0
- libs/__pycache__/test_base.cpython-37.pyc +0 -0
- libs/__pycache__/test_base.cpython-38.pyc +0 -0
- libs/__pycache__/utils.cpython-37.pyc +0 -0
- libs/__pycache__/utils.cpython-38.pyc +0 -0
- models/week0417/__pycache__/loss.cpython-37.pyc +0 -0
- models/week0417/__pycache__/model.cpython-37.pyc +0 -0
- models/week0417/__pycache__/nnutils.cpython-37.pyc +0 -0
- models/week0417/__pycache__/taming_blocks.cpython-37.pyc +0 -0
- swapae/models/__pycache__/__init__.cpython-37.pyc +0 -0
- swapae/models/__pycache__/base_model.cpython-37.pyc +0 -0
- swapae/models/networks/__pycache__/__init__.cpython-37.pyc +0 -0
- swapae/models/networks/__pycache__/base_network.cpython-37.pyc +0 -0
- swapae/models/networks/__pycache__/stylegan2_layers.cpython-37.pyc +0 -0
- swapae/models/networks/stylegan2_op/__pycache__/__init__.cpython-37.pyc +0 -0
- swapae/models/networks/stylegan2_op/__pycache__/fused_act.cpython-37.pyc +0 -0
- swapae/models/networks/stylegan2_op/__pycache__/upfirdn2d.cpython-37.pyc +0 -0
- swapae/util/__pycache__/__init__.cpython-37.pyc +0 -0
- swapae/util/__pycache__/html.cpython-37.pyc +0 -0
- swapae/util/__pycache__/iter_counter.cpython-37.pyc +0 -0
- swapae/util/__pycache__/metric_tracker.cpython-37.pyc +0 -0
- swapae/util/__pycache__/util.cpython-37.pyc +0 -0
- swapae/util/__pycache__/visualizer.cpython-37.pyc +0 -0
- tmp/0.png +0 -0
- tmp/1.png +0 -0
- tmp/2.png +0 -0
- tmp/3.png +0 -0
- tmp/4.png +0 -0
- tmp/5.png +0 -0
- tmp/6.png +0 -0
- tmp/7.png +0 -0
- tmp/8.png +0 -0
- tmp/9.png +0 -0
04-18/exp_args.txt
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
add_clustering_epoch: 0 [default: 1000]
|
2 |
+
add_gcn_epoch: 0 [default: None]
|
3 |
+
add_self_loops: 1
|
4 |
+
add_texture_epoch: 0 [default: 1000]
|
5 |
+
batch_size: 1
|
6 |
+
beta: 0.999
|
7 |
+
config_file: models/week0417/json/single_scale_grouping_ft.json
|
8 |
+
crop_size: 224
|
9 |
+
data_path: images
|
10 |
+
dataset: dataset [default: None]
|
11 |
+
dec_input_mode: sine_wave_noise [default: None]
|
12 |
+
display_freq: 100
|
13 |
+
exp_name: 04-18/ [default: None]
|
14 |
+
gumbel: 0
|
15 |
+
hidden_dim: 256 [default: None]
|
16 |
+
img_path: None
|
17 |
+
l1_loss_wt: 1.0
|
18 |
+
lambda_GAN: 1 [default: None]
|
19 |
+
lambda_L1: 1 [default: None]
|
20 |
+
lambda_style_loss: 1.0 [default: None]
|
21 |
+
local_rank: None
|
22 |
+
log_freq: 10
|
23 |
+
lr: 5e-05 [default: 0.1]
|
24 |
+
lr_decay_freq: 3000
|
25 |
+
maxIter: 1000
|
26 |
+
model_name: model [default: None]
|
27 |
+
momentum: 0.5
|
28 |
+
nChannel: 100
|
29 |
+
nConv: 2
|
30 |
+
n_cluster: 10 [default: None]
|
31 |
+
n_layers_D: 3 [default: None]
|
32 |
+
nepochs: 20 [default: None]
|
33 |
+
netE_nc_steepness: 2.0
|
34 |
+
netE_num_downsampling_gl: 2
|
35 |
+
netE_num_downsampling_sp: 4
|
36 |
+
netE_scale_capacity: 1.0
|
37 |
+
netG_num_base_resnet_layers: 2
|
38 |
+
netG_resnet_ch: 256
|
39 |
+
netG_scale_capacity: 1.0
|
40 |
+
no_ganFeat_loss: False
|
41 |
+
num_D: 2 [default: None]
|
42 |
+
num_classes: 0
|
43 |
+
out_dir: ./04-18/ [default: None]
|
44 |
+
patch_size: 40
|
45 |
+
perceptual_loss_wt: 1.0
|
46 |
+
pretrained_ae: /home/xli/WORKDIR/07-16/transformer/cpk.pth
|
47 |
+
pretrained_path: /home/xtli/WORKDIR/04-15/single_scale_grouping_resume/cpk.pth [default: None]
|
48 |
+
project_name: test_time
|
49 |
+
save_freq: 1000 [default: 2000]
|
50 |
+
sine_weight: 1 [default: None]
|
51 |
+
sp_num: None
|
52 |
+
spatial_code_ch: 8
|
53 |
+
spatial_code_dim: 32 [default: 256]
|
54 |
+
temperature: 23 [default: 1]
|
55 |
+
test_time: 0
|
56 |
+
texture_code_ch: 256
|
57 |
+
use_slic: True
|
58 |
+
use_wandb: False
|
59 |
+
work_dir: ./
|
60 |
+
workers: 4
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
|
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.10.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
app.py
CHANGED
@@ -69,10 +69,10 @@ class Tester(TesterBase):
|
|
69 |
def to_pil(self, tensor):
|
70 |
return transforms.ToPILImage()(tensor.cpu().squeeze().clamp(0.0, 1.0)).convert("RGB")
|
71 |
|
72 |
-
def
|
73 |
with st.spinner('Running...'):
|
74 |
with torch.no_grad():
|
75 |
-
grouping_mask = self.
|
76 |
|
77 |
data = (self.data + 1) / 2.0
|
78 |
|
@@ -124,7 +124,7 @@ class Tester(TesterBase):
|
|
124 |
tex_size = st.slider('', 0, 1000, 256)
|
125 |
tex_size = (tex_size // 8) * 8
|
126 |
with torch.no_grad():
|
127 |
-
tex = self.
|
128 |
col1, col2, col3, col4 = st.columns([1, 1, 4, 1])
|
129 |
with col1:
|
130 |
st.markdown("")
|
@@ -140,33 +140,159 @@ class Tester(TesterBase):
|
|
140 |
with col4:
|
141 |
st.markdown("")
|
142 |
st.markdown('<p class="big-font">You can choose another image from the examplar images on the top and start again!</p>', unsafe_allow_html=True)
|
143 |
-
#torch.cuda.empty_cache()
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
tmp_img_list,
|
150 |
titles=[f"Group #{str(i)}" for i in range(len(tmp_img_list))],
|
151 |
div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
|
152 |
-
img_style={"margin": "5px", "height": "
|
153 |
-
key=
|
154 |
)
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
|
160 |
-
img_style={"margin": "5px", "height": "
|
161 |
-
key=
|
162 |
)
|
163 |
-
rec = self.model_forward(self.data, self.slic, return_type = 'editing', fill_idx = fill_idx, remove_idx = remove_idx)
|
164 |
-
st.image(self.to_pil(rec))
|
165 |
-
"""
|
166 |
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
test = True, tex_idx = None, tex_size = 256,
|
169 |
-
return_type = '
|
|
|
170 |
args = self.args
|
171 |
B, _, imgH, imgW = rgb_img.shape
|
172 |
|
@@ -185,47 +311,29 @@ class Tester(TesterBase):
|
|
185 |
if return_type == 'grouping':
|
186 |
return torch.argmax(sp_assign.cpu(), dim = 1)
|
187 |
|
188 |
-
|
189 |
tex_seg = poolfeat(conv_feats, softmax, avg = True)
|
190 |
seg = label2one_hot_torch(torch.argmax(softmax, dim = 1).unsqueeze(1), C = softmax.shape[1])
|
191 |
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
remove_mask = seg[:, remove_idx:remove_idx+1]
|
213 |
-
fill_tex = tex_seg[:, fill_idx, :].view(1, -1, 1, 1).repeat(1, 1, imgH, imgW)
|
214 |
-
rec_tex = rec_tex * (1 - remove_mask) + fill_tex * remove_mask
|
215 |
-
|
216 |
-
sine_wave = self.model.get_sine_wave(rec_tex, 'rec')
|
217 |
-
H = imgH // 8; W = imgW // 8
|
218 |
-
noise = torch.randn(B, self.model.sine_wave_dim, H, W).to(tex_code.device)
|
219 |
-
dec_input = torch.cat((sine_wave, noise), dim = 1)
|
220 |
-
weight = self.model.ChannelWeight(rec_tex)
|
221 |
-
weight = F.adaptive_avg_pool2d(weight, output_size = (1)).view(weight.shape[0], -1, 1, 1)
|
222 |
-
weight = torch.sigmoid(weight)
|
223 |
-
dec_input *= weight
|
224 |
-
|
225 |
-
rep_rec = self.model.G(dec_input, rec_tex)
|
226 |
-
rep_rec = (rep_rec + 1) / 2.0
|
227 |
-
return rep_rec
|
228 |
-
|
229 |
|
230 |
def load_data(self, data_path):
|
231 |
rgb_img = Image.open(data_path)
|
@@ -253,12 +361,12 @@ class Tester(TesterBase):
|
|
253 |
self.model = self.model.module
|
254 |
return
|
255 |
|
|
|
256 |
def test(self):
|
257 |
-
""" Test function
|
258 |
-
"""
|
259 |
#for iteration in tqdm(range(args.nsamples)):
|
260 |
self.test_step(0)
|
261 |
self.display(0, 'train')
|
|
|
262 |
|
263 |
def main():
|
264 |
#torch.cuda.empty_cache()
|
@@ -300,7 +408,14 @@ def main():
|
|
300 |
tester.define_model()
|
301 |
tester.load_data(img_path)
|
302 |
tester.load_model(args.pretrained_path)
|
303 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
if __name__ == '__main__':
|
306 |
os.system("pip install torch-geometric==1.7.2")
|
|
|
69 |
def to_pil(self, tensor):
|
70 |
return transforms.ToPILImage()(tensor.cpu().squeeze().clamp(0.0, 1.0)).convert("RGB")
|
71 |
|
72 |
+
def display_synthesis(self):
|
73 |
with st.spinner('Running...'):
|
74 |
with torch.no_grad():
|
75 |
+
grouping_mask = self.model_forward_synthesis(self.data, self.slic, return_type = 'grouping')
|
76 |
|
77 |
data = (self.data + 1) / 2.0
|
78 |
|
|
|
124 |
tex_size = st.slider('', 0, 1000, 256)
|
125 |
tex_size = (tex_size // 8) * 8
|
126 |
with torch.no_grad():
|
127 |
+
tex = self.model_forward_synthesis(self.data, self.slic, tex_idx = tex_idx, tex_size = tex_size, return_type = 'tex')
|
128 |
col1, col2, col3, col4 = st.columns([1, 1, 4, 1])
|
129 |
with col1:
|
130 |
st.markdown("")
|
|
|
140 |
with col4:
|
141 |
st.markdown("")
|
142 |
st.markdown('<p class="big-font">You can choose another image from the examplar images on the top and start again!</p>', unsafe_allow_html=True)
|
|
|
143 |
|
144 |
+
def model_forward_synthesis(self, rgb_img, slic, epoch = 1000, test_time = False,
|
145 |
+
test = True, tex_idx = None, tex_size = 256,
|
146 |
+
return_type = 'tex', fill_idx = None, remove_idx = None):
|
147 |
+
args = self.args
|
148 |
+
B, _, imgH, imgW = rgb_img.shape
|
149 |
+
|
150 |
+
# Encoder: img (B, 3, H, W) -> feature (B, C, imgH//8, imgW//8)
|
151 |
+
conv_feat, _ = self.model.enc(rgb_img)
|
152 |
+
B, C, H, W = conv_feat.shape
|
153 |
+
|
154 |
+
# Texture code for each superpixel
|
155 |
+
tex_code = self.model.ToTexCode(conv_feat)
|
156 |
+
|
157 |
+
code = F.interpolate(tex_code, size = (imgH, imgW), mode = 'bilinear', align_corners = False)
|
158 |
+
pool_code = poolfeat(code, slic, avg = True)
|
159 |
+
|
160 |
+
prop_code, sp_assign, conv_feats = self.model.gcn(pool_code, slic, (args.add_clustering_epoch <= epoch))
|
161 |
+
softmax = F.softmax(sp_assign * args.temperature, dim = 1)
|
162 |
+
if return_type == 'grouping':
|
163 |
+
return torch.argmax(sp_assign.cpu(), dim = 1)
|
164 |
+
|
165 |
+
tex_seg = poolfeat(conv_feats, softmax, avg = True)
|
166 |
+
seg = label2one_hot_torch(torch.argmax(softmax, dim = 1).unsqueeze(1), C = softmax.shape[1])
|
167 |
+
|
168 |
+
sampled_code = tex_seg[:, tex_idx, :]
|
169 |
+
rec_tex = sampled_code.view(1, -1, 1, 1).repeat(1, 1, tex_size, tex_size)
|
170 |
+
sine_wave = self.model.get_sine_wave(rec_tex, 'rec')
|
171 |
+
H = tex_size // 8; W = tex_size // 8
|
172 |
+
noise = torch.randn(B, self.model.sine_wave_dim, H, W).to(tex_code.device)
|
173 |
+
dec_input = torch.cat((sine_wave, noise), dim = 1)
|
174 |
+
|
175 |
+
weight = self.model.ChannelWeight(rec_tex)
|
176 |
+
weight = F.adaptive_avg_pool2d(weight, output_size = (1)).view(weight.shape[0], -1, 1, 1)
|
177 |
+
weight = torch.sigmoid(weight)
|
178 |
+
dec_input *= weight
|
179 |
+
|
180 |
+
rep_rec = self.model.G(dec_input, rec_tex)
|
181 |
+
rep_rec = (rep_rec + 1) / 2.0
|
182 |
+
return rep_rec
|
183 |
+
|
184 |
+
def display_editing(self):
|
185 |
+
with st.spinner('Running...'):
|
186 |
+
with torch.no_grad():
|
187 |
+
grouping_mask = self.model_forward_editing(self.data, self.slic, return_type = 'grouping')
|
188 |
+
|
189 |
+
data = (self.data + 1) / 2.0
|
190 |
+
|
191 |
+
seg = grouping_mask.view(-1, 1, args.crop_size, args.crop_size)
|
192 |
+
color_vq = self.draw_color_seg(seg)
|
193 |
+
color_vq = color_vq * 0.8 + data.cpu() * 0.2
|
194 |
+
|
195 |
+
st.markdown('<p class="big-font">Given the image you chose, our model decomposes the image into ten texture segments, each depicts one kind of texture in the image.</p>', unsafe_allow_html=True)
|
196 |
+
col1, col2, col3, col4 = st.columns(4)
|
197 |
+
with col1:
|
198 |
+
st.markdown("")
|
199 |
+
|
200 |
+
with col2:
|
201 |
+
st.markdown("Chosen image")
|
202 |
+
st.image(self.to_pil(data))
|
203 |
+
|
204 |
+
with col3:
|
205 |
+
st.markdown("Grouping mask")
|
206 |
+
st.image(self.to_pil(color_vq))
|
207 |
+
|
208 |
+
with col4:
|
209 |
+
st.markdown("")
|
210 |
+
|
211 |
+
seg_onehot = label2one_hot_torch(seg, C = 10)
|
212 |
+
parts = data.cpu() * seg_onehot.squeeze().unsqueeze(1)
|
213 |
+
|
214 |
+
st.markdown('<p class="big-font">We show all texture segments below.</p>', unsafe_allow_html=True)
|
215 |
+
tmp_img_list = []
|
216 |
+
for i in range(parts.shape[0]):
|
217 |
+
part_img = self.to_pil(parts[i])
|
218 |
+
out_path = 'tmp/{}.png'.format(i)
|
219 |
+
part_img.save(out_path)
|
220 |
+
|
221 |
+
with open(out_path, "rb") as image:
|
222 |
+
encoded = base64.b64encode(image.read()).decode()
|
223 |
+
tmp_img_list.append(f"data:image/jpeg;base64,{encoded}")
|
224 |
+
|
225 |
+
tex_idx = clickable_images(
|
226 |
tmp_img_list,
|
227 |
titles=[f"Group #{str(i)}" for i in range(len(tmp_img_list))],
|
228 |
div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
|
229 |
+
img_style={"margin": "5px", "height": "150px"},
|
230 |
+
key=2
|
231 |
)
|
232 |
+
|
233 |
+
st.markdown('<p class="big-font">Choose the texture segment for each group in the given mask below.</p>', unsafe_allow_html=True)
|
234 |
+
given_mask = Image.open('data/masks/124084_0_label.png').convert("L")
|
235 |
+
given_mask = np.asarray(given_mask)
|
236 |
+
given_mask = torch.from_numpy(given_mask)
|
237 |
+
H, W = given_mask.shape[0], given_mask.shape[1]
|
238 |
+
given_mask = label2one_hot_torch(given_mask.view(1, 1, H, W), C = (given_mask.max()+1))
|
239 |
+
mask_img_list = []
|
240 |
+
for i in range(given_mask.shape[1]):
|
241 |
+
part_img = self.to_pil(given_mask[0, i])
|
242 |
+
out_path = 'tmp/{}.png'.format(i)
|
243 |
+
part_img.save(out_path)
|
244 |
+
|
245 |
+
with open(out_path, "rb") as image:
|
246 |
+
encoded = base64.b64encode(image.read()).decode()
|
247 |
+
mask_img_list.append(f"data:image/jpeg;base64,{encoded}")
|
248 |
+
|
249 |
+
part_idx = clickable_images(
|
250 |
+
mask_img_list,
|
251 |
div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
|
252 |
+
img_style={"margin": "5px", "height": "150px"},
|
253 |
+
key=1
|
254 |
)
|
|
|
|
|
|
|
255 |
|
256 |
+
cols = st.columns(len(mask_img_list))
|
257 |
+
options = []
|
258 |
+
for i, col in enumerate(cols):
|
259 |
+
with col:
|
260 |
+
option = st.selectbox(
|
261 |
+
"",
|
262 |
+
([str(ii) for ii in range(10)]),
|
263 |
+
key = i)
|
264 |
+
options.append(int(option))
|
265 |
+
print(options)
|
266 |
+
|
267 |
+
if len(options) > 0:
|
268 |
+
with st.spinner('Running...'):
|
269 |
+
st.markdown('<p class="big-font">Edited image is shown below.</p>', unsafe_allow_html=True)
|
270 |
+
#tex_size = st.slider('', 0, 1000, 256)
|
271 |
+
#tex_size = (tex_size // 8) * 8
|
272 |
+
with torch.no_grad():
|
273 |
+
edited = self.model_forward_editing(self.data, self.slic, options=options, given_mask=given_mask, return_type = 'edited')
|
274 |
+
col1, col2, col3, col4 = st.columns([1, 1, 4, 1])
|
275 |
+
with col1:
|
276 |
+
st.markdown("")
|
277 |
+
|
278 |
+
with col2:
|
279 |
+
st.markdown("Input image")
|
280 |
+
img = F.interpolate(self.data, size = edited.shape[-2:], mode = 'bilinear', align_corners = False)
|
281 |
+
st.image(self.to_pil((img + 1) / 2.0))
|
282 |
+
print(img.shape, edited.shape)
|
283 |
+
|
284 |
+
with col3:
|
285 |
+
st.markdown("Synthesized texture image")
|
286 |
+
st.image(self.to_pil(edited))
|
287 |
+
|
288 |
+
with col4:
|
289 |
+
st.markdown("")
|
290 |
+
st.markdown('<p class="big-font">You can choose another image from the examplar images on the top and start again!</p>', unsafe_allow_html=True)
|
291 |
+
|
292 |
+
def model_forward_editing(self, rgb_img, slic, epoch = 1000, test_time = False,
|
293 |
test = True, tex_idx = None, tex_size = 256,
|
294 |
+
return_type = 'edited', fill_idx = None, remove_idx = None,
|
295 |
+
options = None, given_mask = None):
|
296 |
args = self.args
|
297 |
B, _, imgH, imgW = rgb_img.shape
|
298 |
|
|
|
311 |
if return_type == 'grouping':
|
312 |
return torch.argmax(sp_assign.cpu(), dim = 1)
|
313 |
|
|
|
314 |
tex_seg = poolfeat(conv_feats, softmax, avg = True)
|
315 |
seg = label2one_hot_torch(torch.argmax(softmax, dim = 1).unsqueeze(1), C = softmax.shape[1])
|
316 |
|
317 |
+
given_mask = F.interpolate(given_mask, size = (512, 512), mode = 'bilinear', align_corners = False)
|
318 |
+
rec_tex = torch.zeros((1, tex_seg.shape[-1], 512, 512))
|
319 |
+
for i in range(given_mask.shape[1]):
|
320 |
+
label = options[i]
|
321 |
+
code = tex_seg[0, label, :].view(1, -1, 1, 1).repeat(1, 1, 512, 512)
|
322 |
+
rec_tex += code * given_mask[:, i:i+1]
|
323 |
+
tex_size = 512
|
324 |
+
sine_wave = self.model.get_sine_wave(rec_tex, 'rec')
|
325 |
+
H = tex_size // 8; W = tex_size // 8
|
326 |
+
noise = torch.randn(B, self.model.sine_wave_dim, H, W).to(tex_code.device)
|
327 |
+
dec_input = torch.cat((sine_wave, noise), dim = 1)
|
328 |
+
|
329 |
+
weight = self.model.ChannelWeight(rec_tex)
|
330 |
+
weight = F.adaptive_avg_pool2d(weight, output_size = (1)).view(weight.shape[0], -1, 1, 1)
|
331 |
+
weight = torch.sigmoid(weight)
|
332 |
+
dec_input *= weight
|
333 |
+
|
334 |
+
rep_rec = self.model.G(dec_input, rec_tex)
|
335 |
+
rep_rec = (rep_rec + 1) / 2.0
|
336 |
+
return rep_rec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
337 |
|
338 |
def load_data(self, data_path):
|
339 |
rgb_img = Image.open(data_path)
|
|
|
361 |
self.model = self.model.module
|
362 |
return
|
363 |
|
364 |
+
"""
|
365 |
def test(self):
|
|
|
|
|
366 |
#for iteration in tqdm(range(args.nsamples)):
|
367 |
self.test_step(0)
|
368 |
self.display(0, 'train')
|
369 |
+
"""
|
370 |
|
371 |
def main():
|
372 |
#torch.cuda.empty_cache()
|
|
|
408 |
tester.define_model()
|
409 |
tester.load_data(img_path)
|
410 |
tester.load_model(args.pretrained_path)
|
411 |
+
app_idx = st.selectbox('Please select between texture synthesis or editing',
|
412 |
+
["Texture Synthesis", "Texture Editing"])
|
413 |
+
if app_idx == 'Texture Editing':
|
414 |
+
st.header("Texture Editing")
|
415 |
+
tester.display_editing()
|
416 |
+
else:
|
417 |
+
st.header("Texture Synthesis")
|
418 |
+
tester.display_synthesis()
|
419 |
|
420 |
if __name__ == '__main__':
|
421 |
os.system("pip install torch-geometric==1.7.2")
|
data/masks/124084_0_label.png
ADDED
libs/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/__init__.cpython-37.pyc and b/libs/__pycache__/__init__.cpython-37.pyc differ
|
|
libs/__pycache__/__init__.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/__init__.cpython-38.pyc and b/libs/__pycache__/__init__.cpython-38.pyc differ
|
|
libs/__pycache__/flow_transforms.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/flow_transforms.cpython-37.pyc and b/libs/__pycache__/flow_transforms.cpython-37.pyc differ
|
|
libs/__pycache__/flow_transforms.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/flow_transforms.cpython-38.pyc and b/libs/__pycache__/flow_transforms.cpython-38.pyc differ
|
|
libs/__pycache__/nnutils.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/nnutils.cpython-37.pyc and b/libs/__pycache__/nnutils.cpython-37.pyc differ
|
|
libs/__pycache__/nnutils.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/nnutils.cpython-38.pyc and b/libs/__pycache__/nnutils.cpython-38.pyc differ
|
|
libs/__pycache__/options.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/options.cpython-37.pyc and b/libs/__pycache__/options.cpython-37.pyc differ
|
|
libs/__pycache__/options.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/options.cpython-38.pyc and b/libs/__pycache__/options.cpython-38.pyc differ
|
|
libs/__pycache__/test_base.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/test_base.cpython-37.pyc and b/libs/__pycache__/test_base.cpython-37.pyc differ
|
|
libs/__pycache__/test_base.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/test_base.cpython-38.pyc and b/libs/__pycache__/test_base.cpython-38.pyc differ
|
|
libs/__pycache__/utils.cpython-37.pyc
CHANGED
Binary files a/libs/__pycache__/utils.cpython-37.pyc and b/libs/__pycache__/utils.cpython-37.pyc differ
|
|
libs/__pycache__/utils.cpython-38.pyc
CHANGED
Binary files a/libs/__pycache__/utils.cpython-38.pyc and b/libs/__pycache__/utils.cpython-38.pyc differ
|
|
models/week0417/__pycache__/loss.cpython-37.pyc
CHANGED
Binary files a/models/week0417/__pycache__/loss.cpython-37.pyc and b/models/week0417/__pycache__/loss.cpython-37.pyc differ
|
|
models/week0417/__pycache__/model.cpython-37.pyc
CHANGED
Binary files a/models/week0417/__pycache__/model.cpython-37.pyc and b/models/week0417/__pycache__/model.cpython-37.pyc differ
|
|
models/week0417/__pycache__/nnutils.cpython-37.pyc
CHANGED
Binary files a/models/week0417/__pycache__/nnutils.cpython-37.pyc and b/models/week0417/__pycache__/nnutils.cpython-37.pyc differ
|
|
models/week0417/__pycache__/taming_blocks.cpython-37.pyc
CHANGED
Binary files a/models/week0417/__pycache__/taming_blocks.cpython-37.pyc and b/models/week0417/__pycache__/taming_blocks.cpython-37.pyc differ
|
|
swapae/models/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/swapae/models/__pycache__/__init__.cpython-37.pyc and b/swapae/models/__pycache__/__init__.cpython-37.pyc differ
|
|
swapae/models/__pycache__/base_model.cpython-37.pyc
CHANGED
Binary files a/swapae/models/__pycache__/base_model.cpython-37.pyc and b/swapae/models/__pycache__/base_model.cpython-37.pyc differ
|
|
swapae/models/networks/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/__pycache__/__init__.cpython-37.pyc and b/swapae/models/networks/__pycache__/__init__.cpython-37.pyc differ
|
|
swapae/models/networks/__pycache__/base_network.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/__pycache__/base_network.cpython-37.pyc and b/swapae/models/networks/__pycache__/base_network.cpython-37.pyc differ
|
|
swapae/models/networks/__pycache__/stylegan2_layers.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/__pycache__/stylegan2_layers.cpython-37.pyc and b/swapae/models/networks/__pycache__/stylegan2_layers.cpython-37.pyc differ
|
|
swapae/models/networks/stylegan2_op/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/stylegan2_op/__pycache__/__init__.cpython-37.pyc and b/swapae/models/networks/stylegan2_op/__pycache__/__init__.cpython-37.pyc differ
|
|
swapae/models/networks/stylegan2_op/__pycache__/fused_act.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/stylegan2_op/__pycache__/fused_act.cpython-37.pyc and b/swapae/models/networks/stylegan2_op/__pycache__/fused_act.cpython-37.pyc differ
|
|
swapae/models/networks/stylegan2_op/__pycache__/upfirdn2d.cpython-37.pyc
CHANGED
Binary files a/swapae/models/networks/stylegan2_op/__pycache__/upfirdn2d.cpython-37.pyc and b/swapae/models/networks/stylegan2_op/__pycache__/upfirdn2d.cpython-37.pyc differ
|
|
swapae/util/__pycache__/__init__.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/__init__.cpython-37.pyc and b/swapae/util/__pycache__/__init__.cpython-37.pyc differ
|
|
swapae/util/__pycache__/html.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/html.cpython-37.pyc and b/swapae/util/__pycache__/html.cpython-37.pyc differ
|
|
swapae/util/__pycache__/iter_counter.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/iter_counter.cpython-37.pyc and b/swapae/util/__pycache__/iter_counter.cpython-37.pyc differ
|
|
swapae/util/__pycache__/metric_tracker.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/metric_tracker.cpython-37.pyc and b/swapae/util/__pycache__/metric_tracker.cpython-37.pyc differ
|
|
swapae/util/__pycache__/util.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/util.cpython-37.pyc and b/swapae/util/__pycache__/util.cpython-37.pyc differ
|
|
swapae/util/__pycache__/visualizer.cpython-37.pyc
CHANGED
Binary files a/swapae/util/__pycache__/visualizer.cpython-37.pyc and b/swapae/util/__pycache__/visualizer.cpython-37.pyc differ
|
|
tmp/0.png
CHANGED
tmp/1.png
CHANGED
tmp/2.png
CHANGED
tmp/3.png
CHANGED
tmp/4.png
CHANGED
tmp/5.png
CHANGED
tmp/6.png
CHANGED
tmp/7.png
CHANGED
tmp/8.png
CHANGED
tmp/9.png
CHANGED