AlekseyCalvin commited on
Commit
e14aae1
1 Parent(s): e40b201

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -122
app.py CHANGED
@@ -1,51 +1,36 @@
1
- import os
2
  import gradio as gr
3
- import numpy as np
4
  import json
5
- from accelerate import dispatch_model, infer_auto_device_map
6
- from accelerate.utils import get_balanced_memory
7
- from torch.cuda.amp import autocast
8
  import torch
9
- import spaces # Import this first to avoid CUDA initialization issues
 
 
 
 
10
  import random
11
  import time
12
- from PIL import Image
13
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel
14
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
 
 
15
 
16
- # Use the 'waffles' environment variable as the access token
17
- hf_token = os.getenv('waffles')
 
 
18
 
19
- # Ensure the token is loaded correctly
20
- if not hf_token:
21
- raise ValueError("Hugging Face API token not found. Please set the 'waffles' environment variable.")
22
 
23
- # Define the device
24
- dtype = torch.bfloat16
25
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
26
-
27
- if torch.cuda.is_available():
28
- device = torch.device("cuda")
29
- n_gpu = torch.cuda.device_count()
30
- torch.cuda.get_device_name(0)
31
- else:
32
- device = torch.device("cpu")
33
-
34
- count0 = torch.zeros(1).to(device)
35
- count1 = torch.zeros(1).to(device)
36
- count2 = torch.zeros(1).to(device)
37
 
38
  # Load LoRAs from JSON file
39
  with open('loras.json', 'r') as f:
40
  loras = json.load(f)
41
-
42
- # Initialize the base model with authentication and specify the device
43
- # Initialize the base model with authentication and specify the device
44
- pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype, token=hf_token).to(device)
45
-
46
- MAX_SEED = 2**32 - 1
47
- MAX_IMAGE_SIZE = 2048
48
-
49
 
50
  class calculateDuration:
51
  def __init__(self, activity_name=""):
@@ -63,36 +48,62 @@ class calculateDuration:
63
  else:
64
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
65
 
66
- @spaces.GPU(duration=90)
67
- def generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress):
68
- generator = torch.Generator(device=device).manual_seed(seed)
69
- images = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- with calculateDuration("Generating images"):
72
- for _ in range(num_images):
73
- # Generate each image
74
- image = pipe(
75
- prompt=f"{prompt} {trigger_word}",
76
- num_inference_steps=steps,
77
- guidance_scale=cfg_scale,
78
- width=width,
79
- height=height,
80
- generator=generator,
81
- joint_attention_kwargs={"scale": lora_scale},
82
- ).images[0]
83
- images.append(image)
84
- return images
85
-
86
- def run_lora(prompt, cfg_scale, steps, selected_repo, randomize_seed, seed, width, height, lora_scale, num_images, progress=gr.Progress(track_tqdm=True)):
87
- if not selected_repo:
88
  raise gr.Error("You must select a LoRA before proceeding.")
89
 
90
- selected_lora = next((lora for lora in loras if lora["repo"] == selected_repo), None)
91
- if not selected_lora:
92
- raise gr.Error("Selected LoRA not found.")
93
-
94
  lora_path = selected_lora["repo"]
95
  trigger_word = selected_lora["trigger_word"]
 
 
 
 
 
 
 
 
 
 
96
 
97
  # Load LoRA weights
98
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
@@ -106,15 +117,10 @@ def run_lora(prompt, cfg_scale, steps, selected_repo, randomize_seed, seed, widt
106
  if randomize_seed:
107
  seed = random.randint(0, MAX_SEED)
108
 
109
- images = generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress)
110
- pipe.to("cuda")
111
  pipe.unload_lora_weights()
112
- return images, seed
113
-
114
- def update_selection(evt: gr.SelectData):
115
- index = evt.index
116
- selected_lora = loras[index]
117
- return f"Selected LoRA: {selected_lora['title']}", selected_lora["repo"]
118
 
119
  run_lora.zerogpu = True
120
 
@@ -123,78 +129,70 @@ css = '''
123
  #title{text-align: center}
124
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
125
  #title img{width: 100px; margin-right: 0.5em}
126
- #gallery .grid-wrap{height: auto; width: auto;}
127
- #gallery .gallery-item{width: 50px; height: 50px; margin: 0px;} /* Make buttons 50% height and width */
128
- #gallery img{width: 100%; height: 100%; object-fit: cover;} /* Resize images to fit buttons */
129
- #info_blob {
130
- background-color: #f0f0f0;
131
- border: 2px solid #ccc;
132
- padding: 10px;
133
- margin: 10px 0;
134
- text-align: center;
135
- font-size: 1.2em;
136
- font-weight: bold;
137
- color: #333;
138
- border-radius: 8px;
139
- }
140
  '''
141
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
142
  title = gr.HTML(
143
- """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory on Schnell LoRas </h1>""",
144
  elem_id="title",
145
  )
146
-
147
- # Info blob stating what the app is running
148
  info_blob = gr.HTML(
149
- """<div id="info_blob"> Activist, Futurist, and Realist LoRa-stocked Quick-Use Image Manufactory (over Flux Schnell)</div>"""
150
  )
151
 
152
- selected_lora_text = gr.Markdown("Selected LoRA: None")
153
- selected_repo = gr.State(value="")
154
-
155
- # Prompt takes the full line
156
- prompt = gr.Textbox(label="Prompt", lines=5, placeholder="Type a prompt after selecting a LoRA", elem_id="full_line_prompt")
157
-
158
  with gr.Row():
159
- with gr.Column(scale=1): # LoRA collection on the left
 
 
 
 
 
 
160
  gallery = gr.Gallery(
161
  [(item["image"], item["title"]) for item in loras],
162
- label="LoRA Gallery",
163
  allow_preview=False,
164
  columns=3,
165
  elem_id="gallery"
166
  )
167
- with gr.Column(scale=1): # Generated images on the right
168
- result = gr.Gallery(label="Generated Images")
169
- seed = gr.Number(label="Seed", value=0, interactive=False)
170
-
171
- with gr.Column():
172
- with gr.Row():
173
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=1)
174
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=4)
175
-
176
- with gr.Row():
177
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
178
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
179
-
180
- with gr.Row():
181
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
182
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
183
- lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
184
- num_images = gr.Slider(label="Number of Images", minimum=1, maximum=4, step=1, value=1)
 
185
 
186
  gallery.select(
187
- fn=update_selection,
188
- inputs=[],
189
- outputs=[selected_lora_text, selected_repo]
190
  )
191
 
192
- generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
193
- generate_button.click(
194
- run_lora,
195
- inputs=[prompt, cfg_scale, steps, selected_repo, randomize_seed, seed, width, height, lora_scale, num_images],
196
  outputs=[result, seed]
197
  )
198
 
199
- app.queue()
200
- app.launch()
 
 
1
  import gradio as gr
 
2
  import json
3
+ import logging
4
+ import argparse
 
5
  import torch
6
+ import os
7
+ from os import path
8
+ from PIL import Image
9
+ import spaces
10
+ import copy
11
  import random
12
  import time
13
+ from huggingface_hub import hf_hub_download
14
+ from diffusers import FluxTransformer2DModel, FluxPipeline
15
+ import safetensors.torch
16
+ from safetensors.torch import load_file
17
+ import gc
18
 
19
+ cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
20
+ os.environ["TRANSFORMERS_CACHE"] = cache_path
21
+ os.environ["HF_HUB_CACHE"] = cache_path
22
+ os.environ["HF_HOME"] = cache_path
23
 
24
+ torch.backends.cuda.matmul.allow_tf32 = True
 
 
25
 
26
+ pipe = FluxPipeline.from_pretrained("John6666/nsfw-master-flux-lora-merged-with-flux1-dev-fp16-v10-fp8-flux", torch_dtype=torch.bfloat16)
27
+ pipe.to(device="cuda", dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # Load LoRAs from JSON file
30
  with open('loras.json', 'r') as f:
31
  loras = json.load(f)
32
+
33
+ MAX_SEED = 2**32-1
 
 
 
 
 
 
34
 
35
  class calculateDuration:
36
  def __init__(self, activity_name=""):
 
48
  else:
49
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
50
 
51
+
52
+ def update_selection(evt: gr.SelectData, width, height):
53
+ selected_lora = loras[evt.index]
54
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
55
+ lora_repo = selected_lora["repo"]
56
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
57
+ if "aspect" in selected_lora:
58
+ if selected_lora["aspect"] == "portrait":
59
+ width = 768
60
+ height = 1024
61
+ elif selected_lora["aspect"] == "landscape":
62
+ width = 1024
63
+ height = 768
64
+ return (
65
+ gr.update(placeholder=new_placeholder),
66
+ updated_text,
67
+ evt.index,
68
+ width,
69
+ height,
70
+ )
71
+
72
+ @spaces.GPU(duration=70)
73
+ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
74
+ pipe.to("cuda")
75
+ generator = torch.Generator(device="cuda").manual_seed(seed)
76
 
77
+ with calculateDuration("Generating image"):
78
+ # Generate image
79
+ image = pipe(
80
+ prompt=f"{prompt} {trigger_word}",
81
+ num_inference_steps=steps,
82
+ guidance_scale=cfg_scale,
83
+ width=width,
84
+ height=height,
85
+ generator=generator,
86
+ joint_attention_kwargs={"scale": lora_scale},
87
+ ).images[0]
88
+ return image
89
+
90
+ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
91
+ if selected_index is None:
 
 
92
  raise gr.Error("You must select a LoRA before proceeding.")
93
 
94
+ selected_lora = loras[selected_index]
 
 
 
95
  lora_path = selected_lora["repo"]
96
  trigger_word = selected_lora["trigger_word"]
97
+ if(trigger_word):
98
+ if "trigger_position" in selected_lora:
99
+ if selected_lora["trigger_position"] == "prepend":
100
+ prompt_mash = f"{trigger_word} {prompt}"
101
+ else:
102
+ prompt_mash = f"{prompt} {trigger_word}"
103
+ else:
104
+ prompt_mash = f"{trigger_word} {prompt}"
105
+ else:
106
+ prompt_mash = prompt
107
 
108
  # Load LoRA weights
109
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
 
117
  if randomize_seed:
118
  seed = random.randint(0, MAX_SEED)
119
 
120
+ image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
121
+ pipe.to("cpu")
122
  pipe.unload_lora_weights()
123
+ return image, seed
 
 
 
 
 
124
 
125
  run_lora.zerogpu = True
126
 
 
129
  #title{text-align: center}
130
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
131
  #title img{width: 100px; margin-right: 0.5em}
132
+ #gallery .grid-wrap{height: 10vh}
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  '''
134
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
135
  title = gr.HTML(
136
+ """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory </h1>""",
137
  elem_id="title",
138
  )
139
+ # Info blob stating what the app is running
 
140
  info_blob = gr.HTML(
141
+ """<div id="info_blob"> Activist & Futurealist LoRa-stocked Img Manufactory (on Flux Dev HYPER (8-Step))</div>"""
142
  )
143
 
144
+ # Info blob stating what the app is running
145
+ info_blob = gr.HTML(
146
+ """<div id="info_blob">Prephrase prompts w/: 1.RCA style 2. HST style autochrome 3. HST style 4.TOK hybrid 5.2004 photo 6.HST style 7.LEN Vladimir Lenin 8.TOK portra 9.HST portrait 10.flmft 11.HST in Peterhof 12.HST Soviet kodachrome 13. SOTS art 14.HST 15.photo 16.pficonics 17.wh3r3sw4ld0 18.retrofuturism 19-24.HST style photo 25.vintage cover </div>"""
147
+ )
148
+ selected_index = gr.State(None)
 
149
  with gr.Row():
150
+ with gr.Column(scale=3):
151
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
152
+ with gr.Column(scale=1, elem_id="gen_column"):
153
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
154
+ with gr.Row():
155
+ with gr.Column(scale=3):
156
+ selected_info = gr.Markdown("")
157
  gallery = gr.Gallery(
158
  [(item["image"], item["title"]) for item in loras],
159
+ label="LoRA Inventory",
160
  allow_preview=False,
161
  columns=3,
162
  elem_id="gallery"
163
  )
164
+
165
+ with gr.Column(scale=4):
166
+ result = gr.Image(label="Generated Image")
167
+
168
+ with gr.Row():
169
+ with gr.Accordion("Advanced Settings", open=True):
170
+ with gr.Column():
171
+ with gr.Row():
172
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
173
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=6)
174
+
175
+ with gr.Row():
176
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
177
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
178
+
179
+ with gr.Row():
180
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
181
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
182
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1.5, step=0.01, value=0.9)
183
 
184
  gallery.select(
185
+ update_selection,
186
+ inputs=[width, height],
187
+ outputs=[prompt, selected_info, selected_index, width, height]
188
  )
189
 
190
+ gr.on(
191
+ triggers=[generate_button.click, prompt.submit],
192
+ fn=run_lora,
193
+ inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
194
  outputs=[result, seed]
195
  )
196
 
197
+ app.queue(default_concurrency_limit=2).launch(show_error=True)
198
+ app.launch()