BertChristiaens commited on
Commit
5981c2d
1 Parent(s): 1c669b7
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. app.py +9 -2
  3. explanation.py +22 -1
.gitattributes CHANGED
@@ -42,3 +42,4 @@ content/example_1.jpg filter=lfs diff=lfs merge=lfs -text
42
  content/output_0.png filter=lfs diff=lfs merge=lfs -text
43
  content/output_1.png filter=lfs diff=lfs merge=lfs -text
44
  content/Schermafbeelding[[:space:]]2023-05-05[[:space:]]om[[:space:]]14.29.39.png filter=lfs diff=lfs merge=lfs -text
 
 
42
  content/output_0.png filter=lfs diff=lfs merge=lfs -text
43
  content/output_1.png filter=lfs diff=lfs merge=lfs -text
44
  content/Schermafbeelding[[:space:]]2023-05-05[[:space:]]om[[:space:]]14.29.39.png filter=lfs diff=lfs merge=lfs -text
45
+ content/* filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -284,7 +284,7 @@ def main():
284
  "in almost 30 design styles. After fetching all these images, we started adding metadata such as "
285
  "captions (from the BLIP captioning model) and segmentation maps (from the HuggingFace UperNetForSemanticSegmentation model). "
286
  )
287
- st.write("For the gathering and inference of the metadata we used the Fondant framework (https://github.com/ml6team/fondant) made by ML6 (https://www.ml6.eu/), which is an open source "
288
  "data centric framework for data preparation. The pipeline used for training this controlnet will soon be available as an "
289
  "example pipeline within Fondant and can be easily adapted for building your own dataset."
290
  )
@@ -322,6 +322,7 @@ def main():
322
 
323
  st.session_state['example_image_0'] = Image.open("content/example_0.png")
324
  st.session_state['example_image_1'] = Image.open("content/example_1.jpg")
 
325
 
326
  col_im_0, col_im_1 = st.columns(2)
327
 
@@ -329,6 +330,10 @@ def main():
329
  st.image(st.session_state['example_image_0'], caption="Example image 1", use_column_width=True)
330
  if st.button("Use example 1"):
331
  move_image('example_image_0', 'initial_image', remove_state=True, rerun=True)
 
 
 
 
332
  with col_im_1:
333
  st.image(st.session_state['example_image_1'], caption="Example image 2", use_column_width=True)
334
  if st.button("Use example 2"):
@@ -367,4 +372,6 @@ def main():
367
  make_output_image()
368
 
369
  if __name__ == "__main__":
370
- main()
 
 
 
284
  "in almost 30 design styles. After fetching all these images, we started adding metadata such as "
285
  "captions (from the BLIP captioning model) and segmentation maps (from the HuggingFace UperNetForSemanticSegmentation model). "
286
  )
287
+ st.write("For the gathering and inference of the metadata we used the Fondant framework (https://github.com/ml6team/fondant) provided by ML6 (https://www.ml6.eu/), which is an open source "
288
  "data centric framework for data preparation. The pipeline used for training this controlnet will soon be available as an "
289
  "example pipeline within Fondant and can be easily adapted for building your own dataset."
290
  )
 
322
 
323
  st.session_state['example_image_0'] = Image.open("content/example_0.png")
324
  st.session_state['example_image_1'] = Image.open("content/example_1.jpg")
325
+ st.session_state['example_image_2'] = Image.open("content/example_2.jpg")
326
 
327
  col_im_0, col_im_1 = st.columns(2)
328
 
 
330
  st.image(st.session_state['example_image_0'], caption="Example image 1", use_column_width=True)
331
  if st.button("Use example 1"):
332
  move_image('example_image_0', 'initial_image', remove_state=True, rerun=True)
333
+
334
+ st.image(st.session_state['example_image_2'], caption="Example image 3", use_column_width=True)
335
+ if st.button("Use example 2"):
336
+ move_image('example_image_2', 'initial_image', remove_state=True, rerun=True)
337
  with col_im_1:
338
  st.image(st.session_state['example_image_1'], caption="Example image 2", use_column_width=True)
339
  if st.button("Use example 2"):
 
372
  make_output_image()
373
 
374
  if __name__ == "__main__":
375
+ main()
376
+
377
+
explanation.py CHANGED
@@ -27,4 +27,25 @@ def make_regeneration_explanation():
27
  st.image("content/regen_example.png", caption="Room where all concepts except for 'bed', 'lamp', 'table' are regenerated")
28
 
29
  def make_segmentation_explanation():
30
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  st.image("content/regen_example.png", caption="Room where all concepts except for 'bed', 'lamp', 'table' are regenerated")
28
 
29
  def make_segmentation_explanation():
30
+ with st.expander("Segmentation mode", expanded=False):
31
+ st.write("In the segmentation mode, the user can use his imagination and the paint brush to place concepts in the image. "
32
+ "In the left sidebar, you can first find the high level category of the concept you want to add, such as 'lighting', 'floor', .. "
33
+ "After selecting the category, you can select the specific concept you want to add in the 'Choose a color' dropdown. "
34
+ "This will change the color of the paint brush, which you can then use to draw on the input image. "
35
+ "The model will then regenerate the image with the concepts you have drawn and leave the rest of the image unchanged. "
36
+ )
37
+ st.image("content/sidebar segmentation.png", caption="Sidebar with segmentation options", width=300)
38
+ st.write("You can choose the freedraw mode which gives you a pencil of a certain (chosen) width or the polygon mode. With the polygon mode you can click to add a point to the polygon and close the polygon by right clicking. ")
39
+ st.write("Important: "
40
+ "it's not easy to draw a good segmentation mask. This is because you need to keep in mind the perspective of the room and the exact "
41
+ "shape of the object you want to draw within this perspective. Controlnet will follow your segmentation mask pretty well, so "
42
+ "a non-natural object shape will sometimes result in weird outputs. However, give it a try and see what you can do! "
43
+ )
44
+ st.image("content/segmentation window.png", caption="Example of a segmentation mask drawn on the input image to add a window to the room")
45
+ st.write("Tip: ")
46
+ st.write("In the concepts dropdown, you can select 'keep background' (which is a white color). Everything drawn in this color will use "
47
+ "the original underlying segmentation mask. This can be useful to help with generating other objects, since you give the model a some "
48
+ "freedom to generate outside the object borders."
49
+ )
50
+ st.image("content/keep background 1.png", caption="Image with a poster drawn on the wall.")
51
+ st.image("content/keep background 2.png", caption="Image with a poster drawn on the wall surrounded by 'keep background'.")