sub314xxl Manjushri commited on
Commit
b8daa73
0 Parent(s):

Duplicate from Manjushri/SDXL-1.0

Browse files

Co-authored-by: Manjushri Bodhisattva <Manjushri@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +60 -0
  4. requirements.txt +8 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SDXL-1.0
3
+ emoji: ⚡
4
+ colorFrom: green
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: Manjushri/SDXL-1.0
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ import modin.pandas as pd
5
+ from PIL import Image
6
+ from diffusers import DiffusionPipeline
7
+ from huggingface_hub import login
8
+ import os
9
+
10
+ login(token=os.environ.get('HF_KEY'))
11
+
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ torch.cuda.max_memory_allocated(device='cuda')
14
+ torch.cuda.empty_cache()
15
+
16
+ def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaler):
17
+ torch.cuda.max_memory_allocated(device='cuda')
18
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
19
+ pipe = pipe.to(device)
20
+ pipe.enable_xformers_memory_efficient_attention()
21
+ torch.cuda.empty_cache()
22
+ generator = torch.Generator(device=device).manual_seed(seed)
23
+ int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
24
+ torch.cuda.empty_cache()
25
+ if upscaler == 'Yes':
26
+ torch.cuda.max_memory_allocated(device='cuda')
27
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
28
+ pipe = pipe.to(device)
29
+ pipe.enable_xformers_memory_efficient_attention()
30
+ image = pipe(prompt=prompt, image=int_image).images[0]
31
+ torch.cuda.empty_cache()
32
+ torch.cuda.max_memory_allocated(device='cuda')
33
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
34
+ pipe.to("cuda")
35
+ pipe.enable_xformers_memory_efficient_attention()
36
+ upscaled = pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
37
+ torch.cuda.empty_cache()
38
+ return (image, upscaled)
39
+ else:
40
+ torch.cuda.empty_cache()
41
+ torch.cuda.max_memory_allocated(device=device)
42
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
43
+ pipe = pipe.to(device)
44
+ pipe.enable_xformers_memory_efficient_attention()
45
+ image = pipe(prompt=prompt, negative_prompt=negative_prompt, image=int_image).images[0]
46
+ torch.cuda.empty_cache()
47
+ return (image, image)
48
+
49
+ gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!'),
50
+ gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
51
+ gr.Slider(512, 1024, 768, step=128, label='Height'),
52
+ gr.Slider(512, 1024, 768, step=128, label='Width'),
53
+ gr.Slider(1, 15, 10, step=.25, label='Guidance Scale: How Closely the AI follows the Prompt'),
54
+ gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
55
+ gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True, label='Seed'),
56
+ gr.Radio(['Yes', 'No'], label='Upscale?')],
57
+ outputs=['image', 'image'],
58
+ title="Stable Diffusion XL 1.0 GPU",
59
+ description="SDXL 1.0 GPU. <br><br><b>WARNING: Capable of producing NSFW (Softcore) images.</b>",
60
+ article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ diffusers
3
+ accelerate
4
+ torch
5
+ ftfy
6
+ modin[all]
7
+ xformers
8
+ invisible_watermark