aifeifei798 commited on
Commit
b4dc422
β€’
1 Parent(s): 4d8a9dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -54
app.py CHANGED
@@ -9,8 +9,8 @@ import torch
9
  from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
10
  from huggingface_hub import hf_hub_download
11
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
12
- from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
13
  from openai import OpenAI
 
14
 
15
  # δ»ŽηŽ―ε’ƒε˜ι‡δΈ­θŽ·ε– API ε―†ι’₯
16
  api_key = os.getenv("MISTRAL_API_KEY")
@@ -57,12 +57,18 @@ css="""
57
  }
58
  """
59
  @spaces.GPU()
60
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True), guidance_scale=3.5):
61
  if randomize_seed:
62
  seed = random.randint(0, MAX_SEED)
63
 
64
  generator = torch.Generator().manual_seed(seed)
65
-
 
 
 
 
 
 
66
  image = pipe(
67
  prompt = prompt,
68
  width = width,
@@ -155,62 +161,69 @@ def predict(message, history, additional_dropdown):
155
  with gr.Blocks(css=css) as demo:
156
  with gr.Row():
157
  with gr.Column(scale=1):
158
- prompt = gr.Text(
159
- label="Prompt",
160
- show_label=False,
161
- placeholder="Enter your prompt",
162
- container=False
163
- )
164
- run_button = gr.Button("Run")
165
- result = gr.Image(label="Result", show_label=False, interactive=False)
166
-
167
- with gr.Accordion("Advanced Settings", open=False):
168
-
169
- seed = gr.Slider(
170
- label="Seed",
171
- minimum=0,
172
- maximum=MAX_SEED,
173
- step=1,
174
- value=0,
175
  )
 
 
176
 
177
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
178
 
179
- with gr.Row():
180
-
181
- width = gr.Slider(
182
- label="Width",
183
- minimum=256,
184
- maximum=MAX_IMAGE_SIZE,
185
- step=32,
186
- value=1024,
187
  )
188
 
189
- height = gr.Slider(
190
- label="Height",
191
- minimum=256,
192
- maximum=MAX_IMAGE_SIZE,
193
- step=32,
194
- value=1024,
195
- )
196
-
197
- with gr.Row():
198
 
199
-
200
- num_inference_steps = gr.Slider(
201
- label="Number of inference steps",
202
- minimum=1,
203
- maximum=50,
204
- step=1,
205
- value=4,
206
- )
207
- guidancescale = gr.Slider(
208
- label="Guidance scale",
209
- minimum=0,
210
- maximum=10,
211
- step=0.1,
212
- value=3.5,
213
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  with gr.Column(scale=3,elem_id="col-container"):
215
  gr.ChatInterface(
216
  predict,
@@ -231,7 +244,7 @@ with gr.Blocks(css=css) as demo:
231
  gr.on(
232
  triggers=[run_button.click, prompt.submit],
233
  fn = infer,
234
- inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps, guidancescale],
235
  outputs = [result, seed]
236
  )
237
  if __name__ == "__main__":
 
9
  from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
10
  from huggingface_hub import hf_hub_download
11
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
 
12
  from openai import OpenAI
13
+ import config
14
 
15
  # δ»ŽηŽ―ε’ƒε˜ι‡δΈ­θŽ·ε– API ε―†ι’₯
16
  api_key = os.getenv("MISTRAL_API_KEY")
 
57
  }
58
  """
59
  @spaces.GPU()
60
+ def infer(prompt, quality_select, styles_Radio, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True), guidance_scale=3.5):
61
  if randomize_seed:
62
  seed = random.randint(0, MAX_SEED)
63
 
64
  generator = torch.Generator().manual_seed(seed)
65
+ if quality_select:
66
+ prompt += ", masterpiece, best quality, very aesthetic, absurdres"
67
+ if styles_Radio:
68
+ for style_name in styles_Radio:
69
+ for style in config.style_list:
70
+ if style["name"] == style_name:
71
+ prompt += style["prompt"].replace("{prompt}", "")
72
  image = pipe(
73
  prompt = prompt,
74
  width = width,
 
161
  with gr.Blocks(css=css) as demo:
162
  with gr.Row():
163
  with gr.Column(scale=1):
164
+ with gr.Tab("Generator"):
165
+ prompt = gr.Text(
166
+ label="Prompt",
167
+ show_label=False,
168
+ placeholder="Enter your prompt",
169
+ max_lines = 12,
170
+ container=False
 
 
 
 
 
 
 
 
 
 
171
  )
172
+ run_button = gr.Button("Run")
173
+ result = gr.Image(label="Result", show_label=False, interactive=False)
174
 
175
+ with gr.Accordion("Advanced Settings", open=False):
176
 
177
+ seed = gr.Slider(
178
+ label="Seed",
179
+ minimum=0,
180
+ maximum=MAX_SEED,
181
+ step=1,
182
+ value=0,
 
 
183
  )
184
 
185
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
 
 
 
 
 
 
 
186
 
187
+ with gr.Row():
188
+
189
+ width = gr.Slider(
190
+ label="Width",
191
+ minimum=256,
192
+ maximum=MAX_IMAGE_SIZE,
193
+ step=32,
194
+ value=1024,
195
+ )
196
+
197
+ height = gr.Slider(
198
+ label="Height",
199
+ minimum=256,
200
+ maximum=MAX_IMAGE_SIZE,
201
+ step=32,
202
+ value=1024,
203
+ )
204
+
205
+ with gr.Row():
206
+
207
+
208
+ num_inference_steps = gr.Slider(
209
+ label="Number of inference steps",
210
+ minimum=1,
211
+ maximum=50,
212
+ step=1,
213
+ value=4,
214
+ )
215
+ guidancescale = gr.Slider(
216
+ label="Guidance scale",
217
+ minimum=0,
218
+ maximum=10,
219
+ step=0.1,
220
+ value=3.5,
221
+ )
222
+ with gr.Tab("Styles"):
223
+ quality_select = gr.Checkbox(label="high quality")
224
+ styles_name = [style["name"] for style in config.style_list]
225
+ styles_Radio = gr.Dropdown(styles_name,label="Styles",multiselect=True)
226
+
227
  with gr.Column(scale=3,elem_id="col-container"):
228
  gr.ChatInterface(
229
  predict,
 
244
  gr.on(
245
  triggers=[run_button.click, prompt.submit],
246
  fn = infer,
247
+ inputs = [prompt, quality_select, styles_Radio, seed, randomize_seed, width, height, num_inference_steps, guidancescale],
248
  outputs = [result, seed]
249
  )
250
  if __name__ == "__main__":