bsjd commited on
Commit
42dfc4e
1 Parent(s): 95ebb11

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -0
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import gradio as gr
4
+ import torch
5
+ import random
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ # Setup and model loading
10
+ os.chdir('/content')
11
+ !git clone -b totoro2 https://github.com/camenduru/ComfyUI /content/TotoroUI
12
+ os.chdir('/content/TotoroUI')
13
+
14
+ # Create requirements.txt if it doesn't exist
15
+ requirements_content = """torch
16
+ torchsde
17
+ einops
18
+ diffusers
19
+ accelerate
20
+ xformers==0.0.26.post1
21
+ gradio"""
22
+
23
+ with open("requirements.txt", "w") as f:
24
+ f.write(requirements_content)
25
+
26
+ # Install dependencies from requirements.txt
27
+ !pip install -r requirements.txt
28
+
29
+ # Install aria2
30
+ !apt -y install -qq aria2
31
+
32
+ # Download model weights
33
+ !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/adamo1139/stable-diffusion-3-medium-ungated/resolve/main/sd3_medium_incl_clips_t5xxlfp8.safetensors -d /content/TotoroUI/model -o sd3_medium_incl_clips_t5xxlfp8.safetensors
34
+
35
+ # Add TotoroUI to sys.path
36
+ sys.path.append('/content/TotoroUI')
37
+
38
+ # Import after adding to sys.path
39
+ import node_helpers
40
+ from totoro.sd import load_checkpoint_guess_config
41
+ import nodes
42
+
43
+ # Check for GPU availability and CUDA
44
+ use_cuda = torch.cuda.is_available()
45
+
46
+ model_patcher, clip, vae, clipvision = load_checkpoint_guess_config(
47
+ "/content/TotoroUI/model/sd3_medium_incl_clips_t5xxlfp8.safetensors",
48
+ output_vae=True, output_clip=True, embedding_directory=None
49
+ )
50
+
51
+ def zero_out(conditioning):
52
+ c = []
53
+ for t in conditioning:
54
+ d = t[1].copy()
55
+ if "pooled_output" in d:
56
+ d["pooled_output"] = torch.zeros_like(d["pooled_output"])
57
+ n = [torch.zeros_like(t[0]), d]
58
+ c.append(n)
59
+ return (c, )
60
+
61
+ def generate_image(prompt, negative_prompt, steps):
62
+ with torch.inference_mode():
63
+ latent = {"samples": torch.ones([1, 16, 1024 // 8, 1024 // 8]) * 0.0609}
64
+
65
+ cond, pooled = clip.encode_from_tokens(clip.tokenize(prompt), return_pooled=True)
66
+ cond = [[cond, {"pooled_output": pooled}]]
67
+
68
+ n_cond, n_pooled = clip.encode_from_tokens(clip.tokenize(negative_prompt), return_pooled=True)
69
+ n_cond = [[n_cond, {"pooled_output": n_pooled}]]
70
+
71
+ n_cond1 = node_helpers.conditioning_set_values(n_cond, {"start_percent": 0, "end_percent": 0.1})
72
+ n_cond2 = zero_out(n_cond)
73
+ n_cond2 = node_helpers.conditioning_set_values(n_cond2[0], {"start_percent": 0.1, "end_percent": 1.0})
74
+ n_cond = n_cond1 + n_cond2
75
+
76
+ seed = random.randint(0, 18446744073709551615)
77
+
78
+ sample = nodes.common_ksampler(
79
+ model=model_patcher,
80
+ seed=seed,
81
+ steps=steps,
82
+ cfg=4.5,
83
+ sampler_name="dpmpp_2m",
84
+ scheduler="sgm_uniform",
85
+ positive=cond,
86
+ negative=n_cond,
87
+ latent=latent,
88
+ denoise=1
89
+ )
90
+
91
+ sample = sample[0]["samples"].to(torch.float16)
92
+
93
+ if use_cuda:
94
+ vae.first_stage_model.cuda()
95
+ decoded = vae.decode_tiled(sample).detach()
96
+
97
+ return Image.fromarray(np.array(decoded*255, dtype=np.uint8)[0])
98
+
99
+ # Gradio interface
100
+ interface = gr.Interface(
101
+ fn=generate_image,
102
+ inputs=[
103
+ gr.Textbox(label="Prompt"),
104
+ gr.Textbox(label="Negative Prompt"),
105
+ gr.Slider(label="Steps", minimum=1, maximum=200, step=1, default=28)
106
+ ],
107
+ outputs=gr.Image(label="Generated Image")
108
+ )
109
+
110
+ interface.launch()