DeadfoxX commited on
Commit
c66b58c
1 Parent(s): 9c673eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -145
app.py CHANGED
@@ -1,154 +1,35 @@
1
- from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
 
6
- model_id = 'Souleater_Diffusion/souleater-diffusion.ckpt'
7
- prefix = ''
8
-
9
- scheduler = DPMSolverMultistepScheduler(
10
- beta_start=0.00085,
11
- beta_end=0.012,
12
- beta_schedule="scaled_linear",
13
- num_train_timesteps=1000,
14
- trained_betas=None,
15
- predict_epsilon=True,
16
- thresholding=True,
17
- algorithm_type="dpmsolver++",
18
- solver_type="midpoint",
19
- lower_order_final=True,
20
- )
21
 
22
- pipe = StableDiffusionPipeline.from_pretrained(
23
- model_id,
24
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
25
- scheduler=scheduler)
 
 
 
26
 
27
- pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
28
- model_id,
29
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
30
- scheduler=scheduler)
31
 
32
- if torch.cuda.is_available():
33
- pipe = pipe.to("cuda")
34
- pipe_i2i = pipe_i2i.to("cuda")
35
 
36
- def error_str(error, title="Error"):
37
- return f"""#### {title}
38
- {error}""" if error else ""
39
 
40
- def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=True):
 
 
 
41
 
42
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
43
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
44
 
45
- try:
46
- if img is not None:
47
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
48
- else:
49
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
50
- except Exception as e:
51
- return None, error_str(e)
52
 
53
- def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
54
-
55
- result = pipe(
56
- prompt,
57
- negative_prompt = neg_prompt,
58
- num_inference_steps = int(steps),
59
- guidance_scale = guidance,
60
- width = width,
61
- height = height,
62
- generator = generator)
63
-
64
- return replace_nsfw_images(result)
65
-
66
- def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
67
-
68
- ratio = min(height / img.height, width / img.width)
69
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
70
- result = pipe_i2i(
71
- prompt,
72
- negative_prompt = neg_prompt,
73
- init_image = img,
74
- num_inference_steps = int(steps),
75
- strength = strength,
76
- guidance_scale = guidance,
77
- width = width,
78
- height = height,
79
- generator = generator)
80
-
81
- return replace_nsfw_images(result)
82
-
83
- def replace_nsfw_images(results):
84
-
85
- for i in range(len(results.images)):
86
- if results.nsfw_content_detected[i]:
87
- results.images[i] = Image.open("nsfw.png")
88
- return results.images[0]
89
-
90
- css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
91
- """
92
- with gr.Blocks(css=css) as demo:
93
- gr.HTML(
94
- f"""
95
- <div class="main-div">
96
- <div>
97
- <h1>Stable Diffusion 2</h1>
98
- </div>
99
- <p>
100
- Demo for <a href="https://huggingface.co/stabilityai/stable-diffusion-2">Stable Diffusion 2</a> Stable Diffusion model.<br>
101
- Add the following tokens to your prompts for the model to work properly: <b></b>.
102
- </p>
103
- Running on <b>{"GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"}</b>
104
- </div>
105
- """
106
- )
107
- with gr.Row():
108
-
109
- with gr.Column(scale=55):
110
- with gr.Group():
111
- with gr.Row():
112
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
113
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
114
-
115
- image_out = gr.Image(height=512)
116
- error_output = gr.Markdown()
117
-
118
- with gr.Column(scale=45):
119
- with gr.Tab("Options"):
120
- with gr.Group():
121
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
122
- auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=True)
123
-
124
- with gr.Row():
125
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
126
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
127
-
128
- with gr.Row():
129
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
130
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
131
-
132
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
133
-
134
- with gr.Tab("Image to image"):
135
- with gr.Group():
136
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
137
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
138
-
139
- auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
140
-
141
- inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
142
- outputs = [image_out, error_output]
143
- prompt.submit(inference, inputs=inputs, outputs=outputs)
144
- generate.click(inference, inputs=inputs, outputs=outputs)
145
-
146
- gr.HTML("""
147
- <div style="border-top: 1px solid #303030;">
148
- <br>
149
- <p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
150
- </div>
151
- """)
152
-
153
- demo.queue(concurrency_count=1)
154
- demo.launch()
 
1
+ import tensorflow as tf
2
+ import transformers
 
 
3
 
4
+ # Load the model
5
+ model = transformers.TFGPT2LMHeadModel.from_pretrained("souleater-diffusion.ckpt")
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Generate pictures using the model
8
+ def generate_picture(prompt):
9
+ input_ids = transformers.preprocessing.text.text_to_sequence(prompt, model.tokenizer.tokenize)
10
+ input_ids = tf.expand_dims(input_ids, 0)
11
+ output = model.generate(input_ids)
12
+ generated_text = model.tokenizer.decode(output[0], skip_special_tokens=True)
13
+ return generated_text
14
 
15
+ # GUI to enter the prompts
16
+ from tkinter import *
 
 
17
 
18
+ root = Tk()
19
+ root.title("souleater-diffusion.ckpt Model")
 
20
 
21
+ prompt_entry = Entry(root)
22
+ prompt_entry.pack()
 
23
 
24
+ def generate_callback():
25
+ prompt = prompt_entry.get()
26
+ result = generate_picture(prompt)
27
+ result_label.config(text=result)
28
 
29
+ generate_button = Button(root, text="Generate", command=generate_callback)
30
+ generate_button.pack()
31
 
32
+ result_label = Label(root, text="")
33
+ result_label.pack()
 
 
 
 
 
34
 
35
+ root.mainloop()