ashishtanwer kadirnar commited on
Commit
08a7b0a
0 Parent(s):

Duplicate from keras-dreambooth/traditional-furniture-demo

Browse files

Co-authored-by: Kadir Nar <kadirnar@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +17 -0
  3. app.py +53 -0
  4. requirements.txt +2 -0
  5. utils_app.py +125 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Traditional Furniture Demo
3
+ emoji: 👀
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.20.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ tags:
12
+ - keras-dreambooth
13
+ - wildcard
14
+ duplicated_from: keras-dreambooth/traditional-furniture-demo
15
+ ---
16
+
17
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import from_pretrained_keras
2
+ import keras_cv
3
+ import gradio as gr
4
+ from tensorflow import keras
5
+
6
+ keras.mixed_precision.set_global_policy("mixed_float16")
7
+ # load keras model
8
+ resolution = 512
9
+ dreambooth_model = keras_cv.models.StableDiffusion(
10
+ img_width=resolution, img_height=resolution, jit_compile=True,
11
+ )
12
+ loaded_diffusion_model = from_pretrained_keras("keras-dreambooth/keras-diffusion-traditional-furniture")
13
+ dreambooth_model._diffusion_model = loaded_diffusion_model
14
+
15
+
16
+ def generate_images(prompt: str, negative_prompt:str, num_imgs_to_gen: int, num_steps: int):
17
+ """
18
+ This function is used to generate images using our fine-tuned keras dreambooth stable diffusion model.
19
+ Args:
20
+ prompt (str): The text input given by the user based on which images will be generated.
21
+ num_imgs_to_gen (int): The number of images to be generated using given prompt.
22
+ num_steps (int): The number of denoising steps
23
+ Returns:
24
+ generated_img (List): List of images that were generated using the model
25
+ """
26
+ generated_img = dreambooth_model.text_to_image(
27
+ prompt,
28
+ negative_prompt=negative_prompt,
29
+ batch_size=num_imgs_to_gen,
30
+ num_steps=num_steps,
31
+ )
32
+
33
+ return generated_img
34
+
35
+ with gr.Blocks() as demo:
36
+ gr.HTML("<h2 style=\"font-size: 2em; font-weight: bold\" align=\"center\">Keras Dreambooth - Traditional Furniture Demo</h2>")
37
+ with gr.Row():
38
+ with gr.Column():
39
+ prompt = gr.Textbox(lines=1, value="sks traditional furniture", label="Base Prompt")
40
+ negative_prompt = gr.Textbox(lines=1, value="deformed", label="Negative Prompt")
41
+ samples = gr.Slider(minimum=1, maximum=10, default=1, step=1, label="Number of Image")
42
+ num_steps = gr.Slider(label="Inference Steps",value=50)
43
+ run = gr.Button(value="Run")
44
+ with gr.Column():
45
+ gallery = gr.Gallery(label="Outputs").style(grid=(1,2))
46
+
47
+ run.click(generate_images, inputs=[prompt,negative_prompt, samples, num_steps], outputs=gallery)
48
+
49
+ gr.Examples([["photo of traditional furniture","deformed", 1, 50]],
50
+ [prompt,negative_prompt, samples,num_steps], gallery, generate_images)
51
+ gr.Markdown('\n Demo created by: <a href=\"https://huggingface.co/kadirnar/\">Kadir Nar</a>')
52
+
53
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ keras_cv
2
+ tensorflow
utils_app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import from_pretrained_keras
2
+ from keras_cv import models
3
+ from tensorflow import keras
4
+ import tensorflow as tf
5
+ import gradio as gr
6
+
7
+
8
+ keras.mixed_precision.set_global_policy("mixed_float16")
9
+
10
+ keras_model_list = [
11
+ "kadirnar/dreambooth_diffusion_model_v5",
12
+ "kadirnar/dreambooth_diffusion_model_v3"
13
+ ]
14
+
15
+ stable_prompt_list = [
16
+ "a photo of sks traditional furniture",
17
+ ]
18
+
19
+ stable_negative_prompt_list = [
20
+ "bad, ugly",
21
+ "deformed"
22
+ ]
23
+
24
+ def keras_stable_diffusion(
25
+ model_path:str,
26
+ prompt:str,
27
+ negative_prompt:str,
28
+ guidance_scale:int,
29
+ num_inference_step:int,
30
+ height:int,
31
+ width:int,
32
+ ):
33
+
34
+ sd_dreambooth_model = models.StableDiffusion(
35
+ img_width=height,
36
+ img_height=width
37
+ )
38
+
39
+ db_diffusion_model = from_pretrained_keras(model_path)
40
+ sd_dreambooth_model._diffusion_model = db_diffusion_model
41
+
42
+ generated_images = sd_dreambooth_model.text_to_image(
43
+ prompt=prompt,
44
+ negative_prompt=negative_prompt,
45
+ num_steps=num_inference_step,
46
+ unconditional_guidance_scale=guidance_scale
47
+ )
48
+ tf.keras.backend.clear_session()
49
+
50
+
51
+ return generated_images
52
+
53
+ def keras_stable_diffusion_app():
54
+ with gr.Blocks():
55
+ with gr.Row():
56
+ with gr.Column():
57
+ keras_text2image_model_path = gr.Dropdown(
58
+ choices=keras_model_list,
59
+ value=keras_model_list[0],
60
+ label='Text-Image Model Id'
61
+ )
62
+
63
+ keras_text2image_prompt = gr.Textbox(
64
+ lines=1,
65
+ value=stable_prompt_list[0],
66
+ label='Prompt'
67
+ )
68
+
69
+ keras_text2image_negative_prompt = gr.Textbox(
70
+ lines=1,
71
+ value=stable_negative_prompt_list[0],
72
+ label='Negative Prompt'
73
+ )
74
+
75
+ with gr.Accordion("Advanced Options", open=False):
76
+ keras_text2image_guidance_scale = gr.Slider(
77
+ minimum=0.1,
78
+ maximum=15,
79
+ step=0.1,
80
+ value=7.5,
81
+ label='Guidance Scale'
82
+ )
83
+
84
+ keras_text2image_num_inference_step = gr.Slider(
85
+ minimum=1,
86
+ maximum=100,
87
+ step=1,
88
+ value=50,
89
+ label='Num Inference Step'
90
+ )
91
+
92
+ keras_text2image_height = gr.Slider(
93
+ minimum=128,
94
+ maximum=1280,
95
+ step=32,
96
+ value=512,
97
+ label='Image Height'
98
+ )
99
+
100
+ keras_text2image_width = gr.Slider(
101
+ minimum=128,
102
+ maximum=1280,
103
+ step=32,
104
+ value=512,
105
+ label='Image Height'
106
+ )
107
+
108
+ keras_text2image_predict = gr.Button(value='Generator')
109
+
110
+ with gr.Column():
111
+ output_image = gr.Gallery(label='Output')
112
+
113
+ keras_text2image_predict.click(
114
+ fn=keras_stable_diffusion,
115
+ inputs=[
116
+ keras_text2image_model_path,
117
+ keras_text2image_prompt,
118
+ keras_text2image_negative_prompt,
119
+ keras_text2image_guidance_scale,
120
+ keras_text2image_num_inference_step,
121
+ keras_text2image_height,
122
+ keras_text2image_width
123
+ ],
124
+ outputs=output_image
125
+ )