Jonny001 commited on
Commit
50043a1
·
verified ·
1 Parent(s): 3528cb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -27
app.py CHANGED
@@ -3,44 +3,34 @@
3
  #model3 = gr.load("models/prashanth970/flux-lora-uncensored")
4
 
5
  import gradio as gr
6
- import random
7
- import os
8
 
 
 
9
 
10
- model = gr.load("models/pimpilikipilapi1/NSFW_master")
11
 
12
- def generate_image(text, seed, width, height, guidance_scale, num_inference_steps):
13
- if seed is not None:
14
- random.seed(seed)
15
-
16
- result_image = model(text)
17
-
18
- print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
19
-
20
  return result_image
21
 
22
- def randomize_parameters():
23
- seed = random.randint(0, 999999)
24
- width = random.randint(512, 2048)
25
- height = random.randint(512, 2048)
26
- guidance_scale = round(random.uniform(0.1, 20.0), 1)
27
- num_inference_steps = random.randint(1, 40)
28
-
29
- return seed, width, height, guidance_scale, num_inference_steps
30
 
31
  interface = gr.Interface(
32
  fn=generate_image,
33
  inputs=[
34
  gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
35
- gr.Slider(label="Seed", minimum=0, maximum=999999, step=1),
36
- gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1024),
37
- gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=1024),
38
- gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=3.0),
39
- gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
40
  ],
41
  outputs=gr.Image(label="Generated Image"),
42
- theme="NoCrypt/miku",
43
- description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
44
  )
45
 
46
- interface.launch()
 
3
  #model3 = gr.load("models/prashanth970/flux-lora-uncensored")
4
 
5
  import gradio as gr
 
 
6
 
7
+ model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
8
+ model2 = gr.load("models/prashanth970/flux-lora-uncensored")
9
 
 
10
 
11
+ def generate_image(text, selected_model):
12
+ if selected_model == "Model 1 (NSFW Master)":
13
+ result_image = model1(text)
14
+ elif selected_model == "Model 2 (Flux Lora Uncensored)":
15
+ result_image = model2(text)
16
+ else:
17
+ return "Invalid model selection."
 
18
  return result_image
19
 
 
 
 
 
 
 
 
 
20
 
21
  interface = gr.Interface(
22
  fn=generate_image,
23
  inputs=[
24
  gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
25
+ gr.Radio(
26
+ ["Model 1 (NSFW Master)", "Model 2 (Flux Lora Uncensored)"],
27
+ label="Select Model",
28
+ value="Model 1 (NSFW Master)",
29
+ ),
30
  ],
31
  outputs=gr.Image(label="Generated Image"),
32
+ theme="Yntec/HaleyCH_Theme_Orange",
33
+ description="Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
34
  )
35
 
36
+ interface.launch()