Saif-M fffiloni commited on
Commit
48c1a0f
0 Parent(s):

Duplicate from gradio-templates/text-to-image-gradio-template

Browse files

Co-authored-by: Sylvain Filoni <fffiloni@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +12 -0
  3. app.py +146 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Text-to-Image Gradio Template
3
+ emoji: 🖼
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.26.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ from diffusers import DiffusionPipeline
5
+ import torch
6
+
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ if torch.cuda.is_available():
10
+ torch.cuda.max_memory_allocated(device=device)
11
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
+ pipe.enable_xformers_memory_efficient_attention()
13
+ pipe = pipe.to(device)
14
+ else:
15
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
+ pipe = pipe.to(device)
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ MAX_IMAGE_SIZE = 1024
20
+
21
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
+
23
+ if randomize_seed:
24
+ seed = random.randint(0, MAX_SEED)
25
+
26
+ generator = torch.Generator().manual_seed(seed)
27
+
28
+ image = pipe(
29
+ prompt = prompt,
30
+ negative_prompt = negative_prompt,
31
+ guidance_scale = guidance_scale,
32
+ num_inference_steps = num_inference_steps,
33
+ width = width,
34
+ height = height,
35
+ generator = generator
36
+ ).images[0]
37
+
38
+ return image
39
+
40
+ examples = [
41
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
+ "An astronaut riding a green horse",
43
+ "A delicious ceviche cheesecake slice",
44
+ ]
45
+
46
+ css="""
47
+ #col-container {
48
+ margin: 0 auto;
49
+ max-width: 520px;
50
+ }
51
+ """
52
+
53
+ if torch.cuda.is_available():
54
+ power_device = "GPU"
55
+ else:
56
+ power_device = "CPU"
57
+
58
+ with gr.Blocks(css=css) as demo:
59
+
60
+ with gr.Column(elem_id="col-container"):
61
+ gr.Markdown(f"""
62
+ # Text-to-Image Gradio Template
63
+ Currently running on {power_device}.
64
+ """)
65
+
66
+ with gr.Row():
67
+
68
+ prompt = gr.Text(
69
+ label="Prompt",
70
+ show_label=False,
71
+ max_lines=1,
72
+ placeholder="Enter your prompt",
73
+ container=False,
74
+ )
75
+
76
+ run_button = gr.Button("Run", scale=0)
77
+
78
+ result = gr.Image(label="Result", show_label=False)
79
+
80
+ with gr.Accordion("Advanced Settings", open=False):
81
+
82
+ negative_prompt = gr.Text(
83
+ label="Negative prompt",
84
+ max_lines=1,
85
+ placeholder="Enter a negative prompt",
86
+ visible=False,
87
+ )
88
+
89
+ seed = gr.Slider(
90
+ label="Seed",
91
+ minimum=0,
92
+ maximum=MAX_SEED,
93
+ step=1,
94
+ value=0,
95
+ )
96
+
97
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
+
99
+ with gr.Row():
100
+
101
+ width = gr.Slider(
102
+ label="Width",
103
+ minimum=256,
104
+ maximum=MAX_IMAGE_SIZE,
105
+ step=32,
106
+ value=512,
107
+ )
108
+
109
+ height = gr.Slider(
110
+ label="Height",
111
+ minimum=256,
112
+ maximum=MAX_IMAGE_SIZE,
113
+ step=32,
114
+ value=512,
115
+ )
116
+
117
+ with gr.Row():
118
+
119
+ guidance_scale = gr.Slider(
120
+ label="Guidance scale",
121
+ minimum=0.0,
122
+ maximum=10.0,
123
+ step=0.1,
124
+ value=0.0,
125
+ )
126
+
127
+ num_inference_steps = gr.Slider(
128
+ label="Number of inference steps",
129
+ minimum=1,
130
+ maximum=12,
131
+ step=1,
132
+ value=2,
133
+ )
134
+
135
+ gr.Examples(
136
+ examples = examples,
137
+ inputs = [prompt]
138
+ )
139
+
140
+ run_button.click(
141
+ fn = infer,
142
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
+ outputs = [result]
144
+ )
145
+
146
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ invisible_watermark
4
+ torch
5
+ transformers
6
+ xformers