KingNish commited on
Commit
5079df0
Β·
verified Β·
1 Parent(s): 4a8a4c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -35
app.py CHANGED
@@ -6,32 +6,24 @@ import gradio as gr
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
- from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
 
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  dtype = torch.float16
16
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
17
 
18
  repo = "fluently/Fluently-XL-Final"
 
 
 
 
19
 
20
- pipe_best = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
21
- pipe_best.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
22
- pipe_best.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
23
- pipe_best.set_adapters(["lora","dalle"], adapter_weights=[1.5, 0.5])
24
- pipe_best.to("cuda")
25
-
26
- pipe_3D = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
27
- pipe_3D.load_lora_weights("artificialguybr/3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="3D")
28
- pipe_3D.set_adapters(["3D"])
29
- pipe_3D.to("cuda")
30
-
31
- pipe_logo = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
32
- pipe_logo.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="logo")
33
- pipe_logo.set_adapters(["logo"])
34
- pipe_logo.to("cuda")
35
 
36
  help_text = """
37
  To optimize image results:
@@ -66,7 +58,7 @@ pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_
66
  pipe_edit.to("cuda")
67
 
68
  # Generator
69
- @spaces.GPU(duration=45, queue=False)
70
  def king(type ,
71
  input_image ,
72
  instruction: str ,
@@ -77,7 +69,7 @@ def king(type ,
77
  image_cfg_scale: float = 1.7,
78
  width: int = 1024,
79
  height: int = 1024,
80
- style="BEST",
81
  use_resolution_binning: bool = True,
82
  progress=gr.Progress(track_tqdm=True),
83
  ):
@@ -99,14 +91,23 @@ def king(type ,
99
  if randomize_seed:
100
  seed = random.randint(0, 99999)
101
  generator = torch.Generator().manual_seed(seed)
102
- if style=="3D":
103
- instruction = f"3DRenderAF, 3D Render, {instruction}"
104
- image = pipe_3D( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
105
- elif style=="Logo":
106
- instruction = f"LogoRedAF, {instruction}"
107
- image = pipe_logo( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
108
- else:
109
- image = pipe_best( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
 
 
 
 
 
 
 
 
 
110
  return seed, image
111
 
112
  client = InferenceClient()
@@ -176,15 +177,10 @@ with gr.Blocks(css=css) as demo:
176
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
177
  with gr.Column(scale=1):
178
  generate_button = gr.Button("Generate")
179
- with gr.Row():
180
- style = gr.Radio(choices=["BEST","3D","Logo"],label="Style", value="BEST", interactive=True)
181
  with gr.Row():
182
  input_image = gr.Image(label="Image", type="pil", interactive=True)
183
 
184
- with gr.Row():
185
- width = gr.Number(value=1024, step=16,label="Width", interactive=True)
186
- height = gr.Number(value=1024, step=16,label="Height", interactive=True)
187
-
188
  with gr.Row():
189
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
190
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
@@ -207,7 +203,9 @@ with gr.Blocks(css=css) as demo:
207
  )
208
 
209
  gr.Markdown(help_text)
 
210
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
 
211
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
212
 
213
  gr.on(triggers=[
@@ -223,9 +221,6 @@ with gr.Blocks(css=css) as demo:
223
  seed,
224
  text_cfg_scale,
225
  image_cfg_scale,
226
- width,
227
- height,
228
- style
229
  ],
230
  outputs=[seed, input_image],
231
  )
 
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
+ from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
13
+ from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  dtype = torch.float16
17
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
18
 
19
  repo = "fluently/Fluently-XL-Final"
20
+ pipe = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
21
+ pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
22
+ pipe.set_adapters("lora")
23
+ pipe.to("cuda")
24
 
25
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
26
+ refiner.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  help_text = """
29
  To optimize image results:
 
58
  pipe_edit.to("cuda")
59
 
60
  # Generator
61
+ @spaces.GPU(duration=30, queue=False)
62
  def king(type ,
63
  input_image ,
64
  instruction: str ,
 
69
  image_cfg_scale: float = 1.7,
70
  width: int = 1024,
71
  height: int = 1024,
72
+ guidance_scale: float = 6,
73
  use_resolution_binning: bool = True,
74
  progress=gr.Progress(track_tqdm=True),
75
  ):
 
91
  if randomize_seed:
92
  seed = random.randint(0, 99999)
93
  generator = torch.Generator().manual_seed(seed)
94
+ image = pipe(
95
+ prompt = instruction,
96
+ guidance_scale =6,
97
+ num_inference_steps = steps,
98
+ width = width,
99
+ height = height,
100
+ generator = generator
101
+ output_type="latent",
102
+ ).images
103
+
104
+ refine = refiner(
105
+ prompt=instruction,
106
+ guidance_scale=6,
107
+ num_inference_steps=25,
108
+ image=image,
109
+ generator=generator,
110
+ ).images[0]
111
  return seed, image
112
 
113
  client = InferenceClient()
 
177
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
178
  with gr.Column(scale=1):
179
  generate_button = gr.Button("Generate")
180
+
 
181
  with gr.Row():
182
  input_image = gr.Image(label="Image", type="pil", interactive=True)
183
 
 
 
 
 
184
  with gr.Row():
185
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
186
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
 
203
  )
204
 
205
  gr.Markdown(help_text)
206
+
207
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
208
+
209
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
210
 
211
  gr.on(triggers=[
 
221
  seed,
222
  text_cfg_scale,
223
  image_cfg_scale,
 
 
 
224
  ],
225
  outputs=[seed, input_image],
226
  )