mfranzon commited on
Commit
d9dec00
1 Parent(s): 70c3bfe

first commit, magicboard on hf

Browse files
README.md CHANGED
@@ -1,13 +1,32 @@
1
- ---
2
- title: MagicBoard
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Magic Board
2
+ Magic Board is a PoC using stable diffusion to generate out-of-mind images starting from a sketch.
3
+
4
+ Thank to streamlit-drawable-canvas package it is super simple create the perfect environment to play with stable diffusion, starting form our quick draw.
5
+ Select the number of steps and the strength to generate the final image.
6
+
7
+ I like minimalism, so for this experiments I try to use a low-code approach. Also lowest time as possible ;)
8
+
9
+ ## Some technicalities
10
+
11
+ The Stable Diffusion model is the latest provided by the diffusers library.
12
+
13
+ If you want to use it, to avoid local checkpoint, just get an access token from hugging face, follow [these](https://huggingface.co/CompVis/stable-diffusion-v1-4) instruction to get access.
14
+ Then copy and paste the Token in [config.py](./config.py).
15
+
16
+ Install the requirements and you should be able to run:
17
+ ```
18
+ streamlit run main.py
19
+ ```
20
+
21
+ This version has also the multi-language support, at the moment just English and Italian.
22
+
23
+ ![Screen dashboard](./magic.png)
24
+
25
+
26
+ The final result is :
27
+ <p style="text-align:center;"><img src="./result.png" width="200"/></p>
28
+
29
+
30
+ NOTE:
31
+
32
+ This implementation use a `torch.float16` due to low VRAM availability. Strongly suggest to set it `torch.float32` to speed up the computation. [Here](./utils.py)
locales/base.pot ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SOME DESCRIPTIVE TITLE.
2
+ # Copyright (C) YEAR ORGANIZATION
3
+ # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
4
+ #
5
+ msgid ""
6
+ msgstr ""
7
+ "Project-Id-Version: PACKAGE VERSION\n"
8
+ "POT-Creation-Date: 2022-09-06 00:56+0200\n"
9
+ "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
10
+ "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
11
+ "Language-Team: LANGUAGE <LL@li.org>\n"
12
+ "MIME-Version: 1.0\n"
13
+ "Content-Type: text/plain; charset=UTF-8\n"
14
+ "Content-Transfer-Encoding: 8bit\n"
15
+ "Generated-By: pygettext.py 1.5\n"
16
+
17
+ #: main.py:10
18
+ msgid "Choose your language"
19
+ msgstr ""
20
+
21
+ #: main.py:21
22
+ msgid "Drawing tool:"
23
+ msgstr ""
24
+
25
+ #: main.py:24
26
+ msgid "Stroke width: "
27
+ msgstr ""
28
+
29
+ #: main.py:25
30
+ msgid "Stroke color hex: "
31
+ msgstr ""
32
+
33
+ #: main.py:26
34
+ msgid "Background color hex: "
35
+ msgstr ""
36
+
37
+ #: main.py:27
38
+ msgid "Background image:"
39
+ msgstr ""
40
+
41
+ #: main.py:28
42
+ msgid "Number of Steps: "
43
+ msgstr ""
44
+
45
+ #: main.py:29
46
+ msgid "Strength value: "
47
+ msgstr ""
48
+
49
+ #: main.py:50
50
+ msgid "Prompt to generate your cool image"
51
+ msgstr ""
52
+
53
+ #: main.py:51
54
+ msgid "write you text here to improve the image with your favourite style"
55
+ msgstr ""
56
+
locales/it/LC_MESSAGES/base.mo ADDED
Binary file (1.06 kB). View file
locales/it/LC_MESSAGES/base.po ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SOME DESCRIPTIVE TITLE.
2
+ # Copyright (C) YEAR ORGANIZATION
3
+ # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
4
+ #
5
+ msgid ""
6
+ msgstr ""
7
+ "Project-Id-Version: PACKAGE VERSION\n"
8
+ "POT-Creation-Date: 2022-09-06 00:56+0200\n"
9
+ "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
10
+ "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
11
+ "Language-Team: LANGUAGE <LL@li.org>\n"
12
+ "MIME-Version: 1.0\n"
13
+ "Content-Type: text/plain; charset=UTF-8\n"
14
+ "Content-Transfer-Encoding: 8bit\n"
15
+ "Generated-By: pygettext.py 1.5\n"
16
+
17
+ #: main.py:10
18
+ msgid "Choose your language"
19
+ msgstr "Seleziona la lingua"
20
+
21
+ #: main.py:21
22
+ msgid "Drawing tool:"
23
+ msgstr "Strumento da disegno:"
24
+
25
+ #: main.py:24
26
+ msgid "Stroke width: "
27
+ msgstr "Spessore tratto:"
28
+
29
+ #: main.py:25
30
+ msgid "Stroke color hex: "
31
+ msgstr "Colore tratto:"
32
+
33
+ #: main.py:26
34
+ msgid "Background color hex: "
35
+ msgstr "Colore background:"
36
+
37
+ #: main.py:27
38
+ msgid "Background image:"
39
+ msgstr "Carica un'immagine:"
40
+
41
+ #: main.py:28
42
+ msgid "Number of Steps: "
43
+ msgstr "Numero di iterazioni:"
44
+
45
+ #: main.py:29
46
+ msgid "Strength value: "
47
+ msgstr "Attinenza all'originale:"
48
+
49
+ #: main.py:50
50
+ msgid "Prompt to generate your cool image"
51
+ msgstr "Testo di input per iniziare la magia"
52
+
53
+ #: main.py:51
54
+ msgid "write you text here to improve the image with your favourite style"
55
+ msgstr "Scrivi qui le indicazioni che trasformeranno la tua immagine"
56
+
main.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from utils import pipe_image
3
+ import streamlit as st
4
+ from streamlit_drawable_canvas import st_canvas
5
+ import gettext
6
+ import numpy as np
7
+
8
+ _ = gettext.gettext
9
+
10
+ language = st.selectbox(_('Choose your language'), ['en', 'it'])
11
+ try:
12
+ localizator = gettext.translation('base', localedir='locales', languages=[language])
13
+ localizator.install()
14
+ _ = localizator.gettext
15
+ except:
16
+ pass
17
+
18
+ st.markdown("<h1 style='text-align:center;'> Magic Board</h1>", unsafe_allow_html=True)
19
+ # Specify canvas parameters in application
20
+ drawing_mode = st.sidebar.selectbox(
21
+ _("Drawing tool:"), ("freedraw", "line", "rect", "circle", "transform")
22
+ )
23
+
24
+ stroke_width = st.sidebar.slider(_("Stroke width: "), 1, 25, 3)
25
+ stroke_color = st.sidebar.color_picker(_("Stroke color hex: "))
26
+ bg_color = st.sidebar.color_picker(_("Background color hex: "), "#eee")
27
+ bg_image = st.sidebar.file_uploader(_("Background image:"), type=["png", "jpg"])
28
+ strength = st.sidebar.slider(_("Strength value: "), 0.0, 1.0, 0.1)
29
+
30
+ # Create a canvas component
31
+ canvas_result = st_canvas(
32
+ fill_color="rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
33
+ stroke_width=stroke_width,
34
+ stroke_color=stroke_color,
35
+ background_color=bg_color,
36
+ background_image=Image.open(bg_image) if bg_image else None,
37
+ update_streamlit=True,
38
+ height=450,
39
+ width=680,
40
+ drawing_mode=drawing_mode,
41
+ #point_display_radius=point_display_radius if drawing_mode == 'point' else 0,
42
+ key="canvas",
43
+ )
44
+
45
+ prompt = st.text_input(_('Prompt to generate your cool image'),
46
+ placeholder=_('write you text here to improve the image with your favourite style'))
47
+
48
+ if st.button('Submit'):
49
+ img_size = (512, 512)
50
+ if bg_image:
51
+ img_back=Image.open(bg_image).convert('RGB').resize(img_size)
52
+ comp_img = img_back
53
+ if np.any(canvas_result.image_data):
54
+ img_draw=Image.fromarray(canvas_result.image_data).convert('RGB').resize(img_size)
55
+ comp_img = img_draw
56
+ if bg_image and np.any(canvas_result.image_data):
57
+ comp_img = Image.new("RGB", img_size)
58
+ comp_img = Image.blend(img_draw, img_back, alpha=0.5)
59
+
60
+ images = pipe_image(prompt=prompt,
61
+ init_image=comp_img,
62
+ strength=strength)
63
+ for image in images:
64
+ st.image(image)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ scipy
4
+ ftfy
5
+ git+https://github.com/huggingface/diffusers
utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import StableDiffusionImg2ImgPipeline
3
+
4
+ import os
5
+ TOKEN=os.environ.get('sd')
6
+
7
+ torch.cuda.empty_cache()
8
+ device = "cpu"
9
+
10
+ def pipe_image(prompt,
11
+ init_image=None,
12
+ strength=0.6):
13
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
14
+ revision="fp16",
15
+ torch_dtype=torch.float32,
16
+ use_auth_token=TOKEN)
17
+
18
+ pipe = pipe.to(device)
19
+ pipe.enable_attention_slicing()
20
+ images = pipe(prompt=prompt,
21
+ init_image=init_image,
22
+ strength=strength,
23
+ guidance_scale=7.5,
24
+ num_inference_steps=1000).images
25
+ return images