alfredplpl commited on
Commit
e75a87d
·
verified ·
1 Parent(s): db13200

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -1,13 +1,25 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- from diffusers import DiffusionPipeline
5
  import torch
6
  import spaces
7
 
 
 
 
8
  device = "cuda"
9
 
10
- pipe = DiffusionPipeline.from_pretrained("aipicasso/emi-2",torch_dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
11
  pipe = pipe.to(device)
12
 
13
 
@@ -17,7 +29,7 @@ MAX_IMAGE_SIZE = 2048
17
  @spaces.GPU
18
  def infer(seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
19
  prompt="1girl"
20
- negative_prompt="bad hands"
21
 
22
  if randomize_seed:
23
  seed = random.randint(0, MAX_SEED)
@@ -55,7 +67,7 @@ with gr.Blocks(css=css) as demo:
55
  run_button = gr.Button("Run", scale=0)
56
 
57
  result = gr.Image(label="Result", show_label=False)
58
- generated_prompt = gr.TextArea(show_label=False)
59
  with gr.Accordion("Advanced Settings", open=False):
60
 
61
  seed = gr.Slider(
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
5
  import torch
6
  import spaces
7
 
8
+ from huggingface_hub import hf_hub_download
9
+ from safetensors.torch import load_file
10
+
11
  device = "cuda"
12
 
13
+ token=os.environ["TOKEN"]
14
+
15
+ scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id,subfolder="scheduler",token=token)
16
+ pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.bfloat16,token=token)
17
+
18
+ negative_ti_file = hf_hub_download(repo_id="Aikimi/unaestheticXL_Negative_TI", filename="unaestheticXLv31.safetensors")
19
+ state_dict = load_file(negative_ti_file)
20
+ pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
21
+ pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
22
+
23
  pipe = pipe.to(device)
24
 
25
 
 
29
  @spaces.GPU
30
  def infer(seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
31
  prompt="1girl"
32
+ negative_prompt="unaestheticXLv31"
33
 
34
  if randomize_seed:
35
  seed = random.randint(0, MAX_SEED)
 
67
  run_button = gr.Button("Run", scale=0)
68
 
69
  result = gr.Image(label="Result", show_label=False)
70
+ generated_prompt = gr.TextArea(label="Generated prompt", show_label=False)
71
  with gr.Accordion("Advanced Settings", open=False):
72
 
73
  seed = gr.Slider(