viba98 commited on
Commit
c1b02ca
·
1 Parent(s): 07ee45e
Files changed (2) hide show
  1. app.py +234 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import nullcontext
2
+ import gradio as gr
3
+ from torch import autocast
4
+ from removebg import RemoveBg
5
+ import os
6
+ import torch
7
+
8
+ import PIL
9
+ from PIL import Image
10
+
11
+ from diffusers import StableDiffusionPipeline
12
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
13
+
14
+ def image_grid(imgs, rows, cols):
15
+ assert len(imgs) == rows*cols
16
+
17
+ w, h = imgs[0].size
18
+ grid = Image.new('RGB', size=(cols*w, rows*h))
19
+ grid_w, grid_h = grid.size
20
+
21
+ for i, img in enumerate(imgs):
22
+ grid.paste(img, box=(i%cols*w, i//cols*h))
23
+ return grid
24
+
25
+ pretrained_model_name_or_path = "vmanot/valiant-effort-one" #@param {type:"string"}
26
+
27
+ tokenizer = CLIPTokenizer.from_pretrained(
28
+ pretrained_model_name_or_path,
29
+ subfolder="tokenizer",
30
+ )
31
+ text_encoder = CLIPTextModel.from_pretrained(
32
+ pretrained_model_name_or_path, subfolder="text_encoder", torch_dtype=torch.float16
33
+ )
34
+
35
+ device = "cuda" if torch.cuda.is_available() else "cpu"
36
+ context = autocast if device == "cuda" else nullcontext
37
+ dtype = torch.float16 if device == "cuda" else torch.float32
38
+
39
+ pipe = StableDiffusionPipeline.from_pretrained(
40
+ pretrained_model_name_or_path,
41
+ revision="main",
42
+ torch_dtype=torch.float16,
43
+ text_encoder=text_encoder,
44
+ tokenizer=tokenizer,
45
+ ).to("cuda")
46
+
47
+ disable_safety = True
48
+
49
+ if disable_safety:
50
+ def null_safety(images, **kwargs):
51
+ return images, False
52
+ pipe.safety_checker = null_safety
53
+
54
+
55
+ num_samples = 2 #@param {type:"number"}
56
+ num_rows = 2 #@param {type:"number"}
57
+
58
+ def infer(prompt, n_samples, steps, scale):
59
+ i = 0
60
+ with context("cuda"):
61
+ images = pipe(n_samples*[prompt], guidance_scale=scale, num_inference_steps=steps).images
62
+ for im in images:
63
+ name = 'im' + str(i) + '.png'
64
+ i += 1
65
+ im.save(name)
66
+ rmbg = RemoveBg("K74BwTbRWVgpszyvJ8VvXPmv", "error.log")
67
+ # with open(im) as image_file:
68
+ # encoded_string = base64.b64encode(image_file.read())
69
+ im = rmbg.remove_background_from_img_file(name)
70
+ # images[0].save('output.png')
71
+ return images
72
+
73
+ css = """
74
+ a {
75
+ color: inherit;
76
+ text-decoration: underline;
77
+ }
78
+ .gradio-container {
79
+ font-family: 'IBM Plex Sans', sans-serif;
80
+ }
81
+ .gr-button {
82
+ color: white;
83
+ border-color: #9d66e5;
84
+ background: #9d66e5;
85
+ }
86
+ input[type='range'] {
87
+ accent-color: green;
88
+ }
89
+ .dark input[type='range'] {
90
+ accent-color: green;
91
+ }
92
+ .container {
93
+ max-width: 730px;
94
+ margin: auto;
95
+ padding-top: 1.5rem;
96
+ }
97
+ #gallery {
98
+ min-height: 22rem;
99
+ margin-bottom: 15px;
100
+ margin-left: auto;
101
+ margin-right: auto;
102
+ border-bottom-right-radius: .5rem !important;
103
+ border-bottom-left-radius: .5rem !important;
104
+ }
105
+ #gallery>div>.h-full {
106
+ min-height: 20rem;
107
+ }
108
+ .details:hover {
109
+ text-decoration: underline;
110
+ }
111
+ .gr-button {
112
+ white-space: nowrap;
113
+ }
114
+ .gr-button:focus {
115
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
116
+ outline: none;
117
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
118
+ --tw-border-opacity: 1;
119
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
120
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
121
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
122
+ --tw-ring-opacity: .5;
123
+ }
124
+ #advanced-options {
125
+ margin-bottom: 20px;
126
+ }
127
+ .footer {
128
+ margin-bottom: 45px;
129
+ margin-top: 35px;
130
+ text-align: center;
131
+ border-bottom: 1px solid #e5e5e5;
132
+ }
133
+ .footer>p {
134
+ font-size: .8rem;
135
+ display: inline-block;
136
+ padding: 0 10px;
137
+ transform: translateY(10px);
138
+ background: white;
139
+ }
140
+ .dark .logo{ filter: invert(1); }
141
+ .dark .footer {
142
+ border-color: #303030;
143
+ }
144
+ .dark .footer>p {
145
+ background: #0b0f19;
146
+ }
147
+ .acknowledgments h4{
148
+ margin: 1.25em 0 .25em 0;
149
+ font-weight: bold;
150
+ font-size: 115%;
151
+ }
152
+ """
153
+
154
+ block = gr.Blocks(css=css)
155
+
156
+ examples = [
157
+ [
158
+ 'Yoda',
159
+ 2,
160
+ 7.5,
161
+ ],
162
+ [
163
+ 'Abraham Lincoln',
164
+ 2,
165
+ 7.5,
166
+ ],
167
+ [
168
+ 'George Washington',
169
+ 2,
170
+ 7,
171
+ ],
172
+ ]
173
+
174
+ with block:
175
+ gr.HTML(
176
+ """
177
+ <div style="text-align: center; max-width: 650px; margin: 0 auto;">
178
+ <div>
179
+ <img class="logo" src="https://lambdalabs.com/static/images/lambda-logo.svg" alt="Lambda Logo"
180
+ style="margin: auto; max-width: 7rem;">
181
+ <h1 style="font-weight: 900; font-size: 3rem;">
182
+ Icon Generator
183
+ </h1>
184
+ </div>
185
+ </div>
186
+ """
187
+ )
188
+ with gr.Group():
189
+ with gr.Box():
190
+ with gr.Row().style(mobile_collapse=False, equal_height=True):
191
+ text = gr.Textbox(
192
+ label="Enter your prompt",
193
+ show_label=False,
194
+ max_lines=1,
195
+ placeholder="Enter your prompt",
196
+ ).style(
197
+ border=(True, False, True, True),
198
+ rounded=(True, False, False, True),
199
+ container=False,
200
+ )
201
+ btn = gr.Button("Generate image").style(
202
+ margin=False,
203
+ rounded=(False, True, True, False),
204
+ )
205
+
206
+ gallery = gr.Gallery(
207
+ label="Generated images", show_label=False, elem_id="gallery"
208
+ ).style(grid=[2], height="auto")
209
+
210
+
211
+ with gr.Row(elem_id="advanced-options"):
212
+ samples = gr.Slider(label="Images", minimum=1, maximum=4, value=2, step=1)
213
+ steps = gr.Slider(label="Steps", minimum=5, maximum=50, value=25, step=5)
214
+ scale = gr.Slider(
215
+ label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
216
+ )
217
+
218
+
219
+ ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, scale], outputs=gallery, cache_examples=False)
220
+ ex.dataset.headers = [""]
221
+
222
+
223
+ text.submit(infer, inputs=[text, samples, steps, scale], outputs=gallery)
224
+ btn.click(infer, inputs=[text, samples, steps, scale], outputs=gallery)
225
+ gr.HTML(
226
+ """
227
+ <div class="footer">
228
+ <p> Gradio Demo by 🤗 Hugging Face
229
+ </p>
230
+ </div>
231
+ """
232
+ )
233
+
234
+ block.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ diffusers==0.4.1 transformers==4.22.1 ftfy
2
+ "ipywidgets>=7,<8"
3
+ --extra-index-url https://download.pytorch.org/whl/cu113
4
+ pytorch
5
+ scipy datasets
6
+ gradio
7
+ removebg