fixing design block
Browse files- S2I/commons/controller.py +2 -3
- S2I/modules/models.py +4 -5
- S2I/modules/sketch2image.py +4 -4
S2I/commons/controller.py
CHANGED
@@ -56,8 +56,7 @@ class Sketch2ImageController():
|
|
56 |
|
57 |
def artwork(self, options, image, prompt, prompt_template, style_name, seed, val_r, faster, model_name, type_flag, prompt_quality):
|
58 |
self.load_pipeline(zero_options=options)
|
59 |
-
|
60 |
-
prompt_enhanced = prompt_template.replace("{prompt}", prompt_enhanced)
|
61 |
|
62 |
# if type_flag == 'live-sketch':
|
63 |
# img = Image.fromarray(np.array(image["composite"])[:, :, -1])
|
@@ -80,7 +79,7 @@ class Sketch2ImageController():
|
|
80 |
noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
|
81 |
|
82 |
with torch.no_grad():
|
83 |
-
output_image = self.pipe.generate(c_t,
|
84 |
|
85 |
output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
|
86 |
|
|
|
56 |
|
57 |
def artwork(self, options, image, prompt, prompt_template, style_name, seed, val_r, faster, model_name, type_flag, prompt_quality):
|
58 |
self.load_pipeline(zero_options=options)
|
59 |
+
prompt = prompt_template.replace("{prompt}", prompt)
|
|
|
60 |
|
61 |
# if type_flag == 'live-sketch':
|
62 |
# img = Image.fromarray(np.array(image["composite"])[:, :, -1])
|
|
|
79 |
noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
|
80 |
|
81 |
with torch.no_grad():
|
82 |
+
output_image = self.pipe.generate(c_t, prompt, prompt_quality, r=val_r, noise_map=noise, half_model=faster, model_name=model_name)
|
83 |
|
84 |
output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
|
85 |
|
S2I/modules/models.py
CHANGED
@@ -64,12 +64,11 @@ class PrimaryModel:
|
|
64 |
sd = torch.load(p_ckpt, map_location="cpu")
|
65 |
return sd
|
66 |
def from_pretrained(self, model_name, r):
|
67 |
-
if
|
68 |
-
|
69 |
-
self.global_medium_prompt = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device='cuda')
|
70 |
|
71 |
-
|
72 |
-
|
73 |
|
74 |
if self.global_tokenizer is None:
|
75 |
self.global_tokenizer = AutoTokenizer.from_pretrained("myn0908/stable-diffusion-3", subfolder="tokenizer_2")
|
|
|
64 |
sd = torch.load(p_ckpt, map_location="cpu")
|
65 |
return sd
|
66 |
def from_pretrained(self, model_name, r):
|
67 |
+
if self.global_medium_prompt is None:
|
68 |
+
self.global_medium_prompt = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device='cuda')
|
|
|
69 |
|
70 |
+
if self.global_long_prompt is None:
|
71 |
+
self.global_long_prompt = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance-Long", device='cuda')
|
72 |
|
73 |
if self.global_tokenizer is None:
|
74 |
self.global_tokenizer = AutoTokenizer.from_pretrained("myn0908/stable-diffusion-3", subfolder="tokenizer_2")
|
S2I/modules/sketch2image.py
CHANGED
@@ -13,14 +13,15 @@ class Sketch2ImagePipeline(PrimaryModel):
|
|
13 |
super().__init__()
|
14 |
self.timestep = torch.tensor([999], device="cuda").long()
|
15 |
|
16 |
-
def generate(self, c_t, prompt=None, prompt_tokens=None, r=1.0, noise_map=None, half_model=None, model_name=None):
|
17 |
self.from_pretrained(model_name=model_name, r=r)
|
|
|
18 |
assert (prompt is None) != (prompt_tokens is None), "Either prompt or prompt_tokens should be provided"
|
19 |
|
20 |
if half_model == 'float16':
|
21 |
-
output_image = self._generate_fp16(c_t,
|
22 |
else:
|
23 |
-
output_image = self._generate_full_precision(c_t,
|
24 |
|
25 |
return output_image
|
26 |
|
@@ -74,7 +75,6 @@ class Sketch2ImagePipeline(PrimaryModel):
|
|
74 |
set_weights_and_activate_adapters(self.global_vae, ["vae_skip"], [r])
|
75 |
|
76 |
def automatic_enhance_prompt(self, input_prompt, model_choice):
|
77 |
-
self.from_pretrained(model_name=None, r=None)
|
78 |
if model_choice == "short-sentences":
|
79 |
result = self.global_medium_prompt("Enhance the description: " + input_prompt)
|
80 |
enhanced_text = result[0]['summary_text']
|
|
|
13 |
super().__init__()
|
14 |
self.timestep = torch.tensor([999], device="cuda").long()
|
15 |
|
16 |
+
def generate(self, c_t, prompt=None, prompt_quality=None, prompt_tokens=None, r=1.0, noise_map=None, half_model=None, model_name=None):
|
17 |
self.from_pretrained(model_name=model_name, r=r)
|
18 |
+
prompt_enhanced = self.automatic_enhance_prompt(prompt, prompt_quality)
|
19 |
assert (prompt is None) != (prompt_tokens is None), "Either prompt or prompt_tokens should be provided"
|
20 |
|
21 |
if half_model == 'float16':
|
22 |
+
output_image = self._generate_fp16(c_t, prompt_enhanced, prompt_tokens, r, noise_map)
|
23 |
else:
|
24 |
+
output_image = self._generate_full_precision(c_t, prompt_enhanced, prompt_tokens, r, noise_map)
|
25 |
|
26 |
return output_image
|
27 |
|
|
|
75 |
set_weights_and_activate_adapters(self.global_vae, ["vae_skip"], [r])
|
76 |
|
77 |
def automatic_enhance_prompt(self, input_prompt, model_choice):
|
|
|
78 |
if model_choice == "short-sentences":
|
79 |
result = self.global_medium_prompt("Enhance the description: " + input_prompt)
|
80 |
enhanced_text = result[0]['summary_text']
|