Jonny001 commited on
Commit
190c4ad
·
verified ·
1 Parent(s): 25cb0a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -26
app.py CHANGED
@@ -1,42 +1,49 @@
1
  import gradio as gr
 
 
2
 
3
-
4
  models = {
5
- "Flux Lora": "models/prashanth970/flux-lora-uncensored",
6
- "TrioHMH Flux": "models/DiegoJR1973/NSFW-TrioHMH-Flux",
7
- "Master": "models/pimpilikipilapi1/NSFW_master"
 
8
  }
9
 
 
 
 
10
 
11
- def generate_image(text, model_name):
12
- model_path = models[model_name]
13
- print(f"Fetching model from: {model_path}")
14
-
15
- try:
16
- model = gr.load(model_path)
17
- result_image = model(text)
18
- if isinstance(result_image, str):
19
- return gr.Image(value=result_image)
20
- elif isinstance(result_image, bytes):
21
- return gr.Image(value=result_image)
22
- else:
23
- return result_image
24
-
25
- except Exception as e:
26
- print(f"Error loading model: {e}")
27
- return None
28
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  interface = gr.Interface(
31
  fn=generate_image,
32
  inputs=[
33
- gr.Textbox(label="Type here your imagination:", placeholder="Type your description here..."),
34
- gr.Dropdown(label="Select Model", choices=list(models.keys()), value="Flux Lora")
 
 
 
 
35
  ],
36
- outputs=gr.Image(label="Generated Image"),
37
  theme="NoCrypt/miku",
38
  description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
39
  )
40
 
41
-
42
- interface.launch()
 
1
  import gradio as gr
2
+ import random
3
+ import os
4
 
5
+ # Load all models
6
  models = {
7
+ "Face Projection": gr.load("models/Purz/face-projection"),
8
+ "Flux LoRA Uncensored": gr.load("models/prashanth970/flux-lora-uncensored"),
9
+ "NSFW TrioHMH Flux": gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux"),
10
+ "NSFW Master": gr.load("models/pimpilikipilapi1/NSFW_master")
11
  }
12
 
13
+ def generate_image(text, seed, width, height, guidance_scale, num_inference_steps):
14
+ if seed is not None:
15
+ random.seed(seed)
16
 
17
+ result_images = {}
18
+ for model_name, model in models.items():
19
+ result_images[model_name] = model(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
22
+
23
+ return [result_images[model_name] for model_name in models]
24
+
25
+ def randomize_parameters():
26
+ seed = random.randint(0, 999999)
27
+ width = random.randint(512, 2048)
28
+ height = random.randint(512, 2048)
29
+ guidance_scale = round(random.uniform(0.1, 20.0), 1)
30
+ num_inference_steps = random.randint(1, 40)
31
+
32
+ return seed, width, height, guidance_scale, num_inference_steps
33
 
34
  interface = gr.Interface(
35
  fn=generate_image,
36
  inputs=[
37
+ gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
38
+ gr.Slider(label="Seed", minimum=0, maximum=999999, step=1),
39
+ gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1024),
40
+ gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=1024),
41
+ gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=3.0),
42
+ gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
43
  ],
44
+ outputs=[gr.Image(label=model_name) for model_name in models],
45
  theme="NoCrypt/miku",
46
  description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
47
  )
48
 
49
+ interface.launch()