Spaces:
Running
on
T4
Running
on
T4
Merge branch 'main' of https://github.com/MaxReimann/WISE-Editing into main
Browse files- README.md +11 -0
- Whitebox_style_transfer.py +8 -4
- pages/1_🎨_Apply_preset.py +3 -1
- pages/2_🖌️_Local_edits.py +2 -0
- pages/4_📖_Readme.py +4 -4
- tasks.py +2 -2
README.md
CHANGED
@@ -40,3 +40,14 @@ Then run the streamlit app using `streamlit run Whitebox_style_transfer.py`
|
|
40 |
### Further notes
|
41 |
Pull Requests and further improvements welcome.
|
42 |
Please note that the shown effect is a minimal pipeline in terms of stylization capability, the much more feature-rich oilpaint and watercolor pipelines we show in our ECCV paper cannot be open-sourced due to IP reasons.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
### Further notes
|
41 |
Pull Requests and further improvements welcome.
|
42 |
Please note that the shown effect is a minimal pipeline in terms of stylization capability, the much more feature-rich oilpaint and watercolor pipelines we show in our ECCV paper cannot be open-sourced due to IP reasons.
|
43 |
+
|
44 |
+
``` latex
|
45 |
+
@misc{loetzsch2022wise,
|
46 |
+
title={WISE: Whitebox Image Stylization by Example-based Learning},
|
47 |
+
author={Lötzsch, Winfried and Reimann, Max and Büssemeyer, Martin and Semmo, Amir and Döllner, Jürgen and Trapp, Matthias},
|
48 |
+
year={2022},
|
49 |
+
eprint={2207.14606},
|
50 |
+
archivePrefix={arXiv},
|
51 |
+
primaryClass={cs.CV}
|
52 |
+
}
|
53 |
+
```
|
Whitebox_style_transfer.py
CHANGED
@@ -41,6 +41,9 @@ if "click_counter" not in st.session_state:
|
|
41 |
|
42 |
if "action" not in st.session_state:
|
43 |
st.session_state["action"] = ""
|
|
|
|
|
|
|
44 |
|
45 |
content_urls = [
|
46 |
{
|
@@ -129,7 +132,7 @@ def img_choice_panel(imgtype, urls, default_choice, expanded):
|
|
129 |
st.write("uploaded.")
|
130 |
|
131 |
last_clicked = last_image_clicked(type=imgtype)
|
132 |
-
print("last_clicked", last_clicked, "clicked", clicked, "action", st.session_state["action"] )
|
133 |
if not upload_pressed and clicked != "": # trigger when no file uploaded
|
134 |
if last_clicked != clicked: # only activate when content was actually clicked
|
135 |
store_img_from_id(clicked, urls, imgtype)
|
@@ -146,6 +149,7 @@ def optimize(effect, preset, result_image_placeholder):
|
|
146 |
style = st.session_state["Style_im"]
|
147 |
st.session_state["optimize_next"] = False
|
148 |
with st.spinner(text="Optimizing parameters.."):
|
|
|
149 |
if HUGGING_FACE:
|
150 |
optimize_on_server(content, style, result_image_placeholder)
|
151 |
else:
|
@@ -245,11 +249,11 @@ if st.session_state["action"] == "uploaded":
|
|
245 |
content_img, _vp = optimize_next(result_image_placeholder)
|
246 |
elif st.session_state["action"] in ("switch_page_from_local_edits", "switch_page_from_presets", "slider_change") or \
|
247 |
content_id == "uploaded" or style_id == "uploaded":
|
248 |
-
print("restore param")
|
249 |
_vp = st.session_state["result_vp"]
|
250 |
content_img = st.session_state["effect_input"]
|
251 |
else:
|
252 |
-
print("load_params")
|
253 |
content_img, _vp = load_params(content_id, style_id)#, effect)
|
254 |
|
255 |
vp = torch.clone(_vp)
|
@@ -284,7 +288,7 @@ with coll2:
|
|
284 |
|
285 |
others_idx = set(range(len(effect.vpd.vp_ranges))) - set([effect.vpd.name2idx[name] for name in sum(params_mapping.values(), [])])
|
286 |
others_names = [effect.vpd.vp_ranges[i][0] for i in sorted(list(others_idx))]
|
287 |
-
other_param = st.selectbox("Other parameters: ", others_names)
|
288 |
create_slider(other_param)
|
289 |
|
290 |
|
|
|
41 |
|
42 |
if "action" not in st.session_state:
|
43 |
st.session_state["action"] = ""
|
44 |
+
|
45 |
+
if "user" not in st.session_state:
|
46 |
+
st.session_state["user"] = hash(time.time())
|
47 |
|
48 |
content_urls = [
|
49 |
{
|
|
|
132 |
st.write("uploaded.")
|
133 |
|
134 |
last_clicked = last_image_clicked(type=imgtype)
|
135 |
+
print(st.session_state["user"], " last_clicked", last_clicked, "clicked", clicked, "action", st.session_state["action"] )
|
136 |
if not upload_pressed and clicked != "": # trigger when no file uploaded
|
137 |
if last_clicked != clicked: # only activate when content was actually clicked
|
138 |
store_img_from_id(clicked, urls, imgtype)
|
|
|
149 |
style = st.session_state["Style_im"]
|
150 |
st.session_state["optimize_next"] = False
|
151 |
with st.spinner(text="Optimizing parameters.."):
|
152 |
+
print("optimizing for user", st.session_state["user"])
|
153 |
if HUGGING_FACE:
|
154 |
optimize_on_server(content, style, result_image_placeholder)
|
155 |
else:
|
|
|
249 |
content_img, _vp = optimize_next(result_image_placeholder)
|
250 |
elif st.session_state["action"] in ("switch_page_from_local_edits", "switch_page_from_presets", "slider_change") or \
|
251 |
content_id == "uploaded" or style_id == "uploaded":
|
252 |
+
print(st.session_state["user"], "restore param")
|
253 |
_vp = st.session_state["result_vp"]
|
254 |
content_img = st.session_state["effect_input"]
|
255 |
else:
|
256 |
+
print(st.session_state["user"], "load_params")
|
257 |
content_img, _vp = load_params(content_id, style_id)#, effect)
|
258 |
|
259 |
vp = torch.clone(_vp)
|
|
|
288 |
|
289 |
others_idx = set(range(len(effect.vpd.vp_ranges))) - set([effect.vpd.name2idx[name] for name in sum(params_mapping.values(), [])])
|
290 |
others_names = [effect.vpd.vp_ranges[i][0] for i in sorted(list(others_idx))]
|
291 |
+
other_param = st.selectbox("Other parameters: ", ["hueShift"] + [n for n in others_names if n != "hueShift"] )
|
292 |
create_slider(other_param)
|
293 |
|
294 |
|
pages/1_🎨_Apply_preset.py
CHANGED
@@ -116,8 +116,10 @@ with torch.no_grad():
|
|
116 |
img_res = Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8))
|
117 |
coll2.image(img_res)
|
118 |
|
|
|
|
|
119 |
apply_btn = st.sidebar.button("Apply")
|
120 |
if apply_btn:
|
121 |
st.session_state["result_vp"] = vp
|
122 |
|
123 |
-
st.info("Note: Press apply to make changes permanent")
|
|
|
116 |
img_res = Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8))
|
117 |
coll2.image(img_res)
|
118 |
|
119 |
+
print(st.session_state["user"], " edited preset")
|
120 |
+
|
121 |
apply_btn = st.sidebar.button("Apply")
|
122 |
if apply_btn:
|
123 |
st.session_state["result_vp"] = vp
|
124 |
|
125 |
+
st.info("Note: Press apply to make changes permanent")
|
pages/2_🖌️_Local_edits.py
CHANGED
@@ -222,6 +222,8 @@ if st.session_state.local_edit_action in ("slider", "param_change", "init"):
|
|
222 |
print("set redraw")
|
223 |
st.session_state.local_edit_action = "redraw"
|
224 |
|
|
|
|
|
225 |
|
226 |
print("plot masks")
|
227 |
texts = []
|
|
|
222 |
print("set redraw")
|
223 |
st.session_state.local_edit_action = "redraw"
|
224 |
|
225 |
+
if "objects" in canvas_result.json_data and canvas_result.json_data["objects"] != []:
|
226 |
+
print(st.session_state["user"], " edited local param canvas")
|
227 |
|
228 |
print("plot masks")
|
229 |
texts = []
|
pages/4_📖_Readme.py
CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
|
|
2 |
|
3 |
st.title("White-box Style Transfer Editing")
|
4 |
|
5 |
-
print("
|
6 |
st.markdown("""
|
7 |
This app demonstrates the editing capabilities of the White-box Style Transfer Editing (WISE) framework.
|
8 |
It optimizes the parameters of classical image processing filters to match a given style image.
|
@@ -21,9 +21,9 @@ st.markdown("""
|
|
21 |
- Strokes stay on the canvas unless manually deleted by clicking the trash button. To remove them from the canvas after each stroke, tick the corresponding checkbox in the sidebar.
|
22 |
|
23 |
### Links & Paper
|
24 |
-
[Project page](https://ivpg.hpi3d.de/wise/),
|
25 |
-
[arxiv link](https://arxiv.org/abs/2207.14606)
|
26 |
-
[demo code](https://github.com/MaxReimann/WISE-Editing)
|
27 |
|
28 |
"WISE: Whitebox Image Stylization by Example-based Learning", by Winfried Lötzsch*, Max Reimann*, Martin Büßemeyer, Amir Semmo, Jürgen Döllner, Matthias Trapp, in ECCV 2022
|
29 |
|
|
|
2 |
|
3 |
st.title("White-box Style Transfer Editing")
|
4 |
|
5 |
+
print(st.session_state["user"], " opened readme")
|
6 |
st.markdown("""
|
7 |
This app demonstrates the editing capabilities of the White-box Style Transfer Editing (WISE) framework.
|
8 |
It optimizes the parameters of classical image processing filters to match a given style image.
|
|
|
21 |
- Strokes stay on the canvas unless manually deleted by clicking the trash button. To remove them from the canvas after each stroke, tick the corresponding checkbox in the sidebar.
|
22 |
|
23 |
### Links & Paper
|
24 |
+
**[Project page](https://ivpg.hpi3d.de/wise/),
|
25 |
+
[arxiv link](https://arxiv.org/abs/2207.14606),
|
26 |
+
[demo code](https://github.com/MaxReimann/WISE-Editing)**
|
27 |
|
28 |
"WISE: Whitebox Image Stylization by Example-based Learning", by Winfried Lötzsch*, Max Reimann*, Martin Büßemeyer, Amir Semmo, Jürgen Döllner, Matthias Trapp, in ECCV 2022
|
29 |
|
tasks.py
CHANGED
@@ -108,7 +108,7 @@ def optimize_on_server(content, style, result_image_placeholder):
|
|
108 |
style = pil_resize_long_edge_to(style, 1024)
|
109 |
style.save(style_path)
|
110 |
files = {'style-image': open(style_path, "rb"), "content-image": open(content_path, "rb")}
|
111 |
-
print("start-optimizing")
|
112 |
url = WORKER_URL + "/upload"
|
113 |
task_id_res = requests.post(url, files=files)
|
114 |
if task_id_res.status_code != 200:
|
@@ -140,4 +140,4 @@ def optimize_params(effect, preset, content, style, result_image_placeholder):
|
|
140 |
write_video=False, base_dir=base_dir,
|
141 |
iter_callback=lambda i: progress_bar.progress(
|
142 |
float(i) / ST_CONFIG["n_iterations"]))
|
143 |
-
st.session_state["effect_input"], st.session_state["result_vp"] = content_img_cuda.detach(), vp.cuda().detach()
|
|
|
108 |
style = pil_resize_long_edge_to(style, 1024)
|
109 |
style.save(style_path)
|
110 |
files = {'style-image': open(style_path, "rb"), "content-image": open(content_path, "rb")}
|
111 |
+
print("start-optimizing. Time: ", datetime.datetime.now())
|
112 |
url = WORKER_URL + "/upload"
|
113 |
task_id_res = requests.post(url, files=files)
|
114 |
if task_id_res.status_code != 200:
|
|
|
140 |
write_video=False, base_dir=base_dir,
|
141 |
iter_callback=lambda i: progress_bar.progress(
|
142 |
float(i) / ST_CONFIG["n_iterations"]))
|
143 |
+
st.session_state["effect_input"], st.session_state["result_vp"] = content_img_cuda.detach(), vp.cuda().detach()
|