salomonsky commited on
Commit
3c2650c
1 Parent(s): 38ad96d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -15
app.py CHANGED
@@ -11,7 +11,7 @@ from PIL import Image
11
 
12
  translator = Translator()
13
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
14
- basemodel = "black-forest-labs/FLUX.1-dev"
15
  MAX_SEED = np.iinfo(np.int32).max
16
 
17
  CSS = """
@@ -27,12 +27,11 @@ JS = """function () {
27
  }
28
  }"""
29
 
30
- lora_add = gr.Textbox(
31
- label="Add Flux LoRA",
32
- info="Copy the HF LoRA model name here",
33
- lines=1,
34
- value="XLabs-AI/flux-RealismLora"
35
- )
36
 
37
  async def generate_image(
38
  prompt:str,
@@ -42,7 +41,8 @@ async def generate_image(
42
  height:int=1024,
43
  scales:float=3.5,
44
  steps:int=24,
45
- seed:int=-1):
 
46
 
47
  if seed == -1:
48
  seed = random.randint(0, MAX_SEED)
@@ -66,6 +66,42 @@ async def generate_image(
66
 
67
  return image, seed
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  async def gen(
70
  prompt:str,
71
  lora_add:str="",
@@ -75,20 +111,31 @@ async def gen(
75
  scales:float=3.5,
76
  steps:int=24,
77
  seed:int=-1,
 
 
 
 
 
 
 
 
 
78
  progress=gr.Progress(track_tqdm=True)
79
  ):
80
  model = enable_lora(lora_add)
81
  print(model)
82
- image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
83
- return image, seed
84
-
 
85
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
86
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
87
  gr.HTML("<p><center>Powered By HF Inference API</center></p>")
88
  with gr.Row():
89
  with gr.Column(scale=4):
90
  with gr.Row():
91
- img = gr.Image(type="filepath", label='flux Generated Image', height=600)
 
92
  with gr.Row():
93
  prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
94
  sendBtn = gr.Button(scale=1, variant='primary')
@@ -133,7 +180,7 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
133
  label="Add Flux LoRA",
134
  info="Copy the HF LoRA model name here",
135
  lines=1,
136
- placeholder="Please use Warm status model",
137
  )
138
  lora_word = gr.Textbox(
139
  label="Add Flux LoRA Trigger Word",
@@ -141,6 +188,71 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
141
  lines=1,
142
  value="",
143
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  gr.on(
146
  triggers=[
@@ -156,9 +268,18 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
156
  height,
157
  scales,
158
  steps,
159
- seed
 
 
 
 
 
 
 
 
 
160
  ],
161
- outputs=[img, seed]
162
  )
163
 
164
  if __name__ == "__main__":
 
11
 
12
  translator = Translator()
13
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
14
+ basemodel = "XLabs-AI/flux-RealismLora"
15
  MAX_SEED = np.iinfo(np.int32).max
16
 
17
  CSS = """
 
27
  }
28
  }"""
29
 
30
+ def enable_lora(lora_add):
31
+ if not lora_add:
32
+ return basemodel
33
+ else:
34
+ return lora_add
 
35
 
36
  async def generate_image(
37
  prompt:str,
 
41
  height:int=1024,
42
  scales:float=3.5,
43
  steps:int=24,
44
+ seed:int=-1
45
+ ):
46
 
47
  if seed == -1:
48
  seed = random.randint(0, MAX_SEED)
 
66
 
67
  return image, seed
68
 
69
+ async def upscale_image(
70
+ prompt:str,
71
+ img_path:str,
72
+ upscale_factor:int=2,
73
+ controlnet_scale:float=0.6,
74
+ controlnet_decay:float=1,
75
+ condition_scale:int=6,
76
+ tile_width:int=112,
77
+ tile_height:int=144,
78
+ denoise_strength:float=0.35,
79
+ num_inference_steps:int=18,
80
+ solver:str="DDIM"
81
+ ):
82
+ client = AsyncInferenceClient()
83
+ try:
84
+ result = await client.image_to_image(
85
+ prompt=prompt,
86
+ input_image=img_path,
87
+ negative_prompt="",
88
+ seed=42,
89
+ upscale_factor=upscale_factor,
90
+ controlnet_scale=controlnet_scale,
91
+ controlnet_decay=controlnet_decay,
92
+ condition_scale=condition_scale,
93
+ tile_width=tile_width,
94
+ tile_height=tile_height,
95
+ denoise_strength=denoise_strength,
96
+ num_inference_steps=num_inference_steps,
97
+ solver=solver,
98
+ model="finegrain/finegrain-image-enhancer",
99
+ )
100
+ except Exception as e:
101
+ raise gr.Error(f"Error in {e}")
102
+
103
+ return result[0]
104
+
105
  async def gen(
106
  prompt:str,
107
  lora_add:str="",
 
111
  scales:float=3.5,
112
  steps:int=24,
113
  seed:int=-1,
114
+ upscale_factor:int=2,
115
+ controlnet_scale:float=0.6,
116
+ controlnet_decay:float=1,
117
+ condition_scale:int=6,
118
+ tile_width:int=112,
119
+ tile_height:int=144,
120
+ denoise_strength:float=0.35,
121
+ num_inference_steps:int=18,
122
+ solver:str="DDIM",
123
  progress=gr.Progress(track_tqdm=True)
124
  ):
125
  model = enable_lora(lora_add)
126
  print(model)
127
+ image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
128
+ upscale_img = await upscale_image(prompt, image, upscale_factor, controlnet_scale, controlnet_decay, condition_scale, tile_width, tile_height, denoise_strength, num_inference_steps, solver)
129
+ return image, upscale_img, seed
130
+
131
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
132
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
133
  gr.HTML("<p><center>Powered By HF Inference API</center></p>")
134
  with gr.Row():
135
  with gr.Column(scale=4):
136
  with gr.Row():
137
+ img = gr.Image(type="filepath", label='Flux Image', height=600)
138
+ upscale_img = gr.Image(type="filepath", label='Upscale Image', height=600)
139
  with gr.Row():
140
  prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
141
  sendBtn = gr.Button(scale=1, variant='primary')
 
180
  label="Add Flux LoRA",
181
  info="Copy the HF LoRA model name here",
182
  lines=1,
183
+ value="XLabs-AI/flux-RealismLora"
184
  )
185
  lora_word = gr.Textbox(
186
  label="Add Flux LoRA Trigger Word",
 
188
  lines=1,
189
  value="",
190
  )
191
+ upscale_factor = gr.Radio(
192
+ label="UpScale Factor",
193
+ choices=[
194
+ 2, 3, 4
195
+ ],
196
+ value=2,
197
+ scale=2
198
+ )
199
+ controlnet_scale = gr.Slider(
200
+ label="ControlNet Scale",
201
+ minimum=0.1,
202
+ maximum=1.0,
203
+ step=0.1,
204
+ value=0.6
205
+ )
206
+ controlnet_decay = gr.Slider(
207
+ label="ControlNet Decay",
208
+ minimum=0.1,
209
+ maximum=1.0,
210
+ step=0.1,
211
+ value=1
212
+ )
213
+ condition_scale = gr.Slider(
214
+ label="Condition Scale",
215
+ minimum=1,
216
+ maximum=10,
217
+ step=1,
218
+ value=6
219
+ )
220
+ tile_width = gr.Slider(
221
+ label="Tile Width",
222
+ minimum=64,
223
+ maximum=256,
224
+ step=16,
225
+ value=112
226
+ )
227
+ tile_height = gr.Slider(
228
+ label="Tile Height",
229
+ minimum=64,
230
+ maximum=256,
231
+ step=16,
232
+ value=144
233
+ )
234
+ denoise_strength = gr.Slider(
235
+ label="Denoise Strength",
236
+ minimum=0.1,
237
+ maximum=1.0,
238
+ step=0.1,
239
+ value=0.35
240
+ )
241
+ num_inference_steps = gr.Slider(
242
+ label="Num Inference Steps",
243
+ minimum=1,
244
+ maximum=50,
245
+ step=1,
246
+ value=18
247
+ )
248
+ solver = gr.Radio(
249
+ label="Solver",
250
+ choices=[
251
+ "DDIM", "DPM"
252
+ ],
253
+ value="DDIM",
254
+ scale=2
255
+ )
256
 
257
  gr.on(
258
  triggers=[
 
268
  height,
269
  scales,
270
  steps,
271
+ seed,
272
+ upscale_factor,
273
+ controlnet_scale,
274
+ controlnet_decay,
275
+ condition_scale,
276
+ tile_width,
277
+ tile_height,
278
+ denoise_strength,
279
+ num_inference_steps,
280
+ solver
281
  ],
282
+ outputs=[img, upscale_img, seed]
283
  )
284
 
285
  if __name__ == "__main__":