Spaces:
Running
on
Zero
Running
on
Zero
CiaraRowles
commited on
Update controlnet/callable_functions.py
Browse files
controlnet/callable_functions.py
CHANGED
@@ -10,7 +10,7 @@ from transformers import AutoProcessor, SiglipVisionModel
|
|
10 |
|
11 |
|
12 |
|
13 |
-
def process_single_image(image_path, image=None):
|
14 |
|
15 |
# Set up model components
|
16 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
@@ -19,7 +19,7 @@ def process_single_image(image_path, image=None):
|
|
19 |
stylecodes_model= stylecodes_model.to("cuda")
|
20 |
|
21 |
|
22 |
-
stylecodes_model.load_model(
|
23 |
# Load and preprocess image
|
24 |
if image is None:
|
25 |
image = Image.open(image_path).convert("RGB")
|
@@ -40,7 +40,7 @@ def process_single_image(image_path, image=None):
|
|
40 |
return code
|
41 |
|
42 |
|
43 |
-
def process_single_image_both_ways(image_path, prompt, num_inference_steps,image=None):
|
44 |
# Load and preprocess image
|
45 |
# Set up model components
|
46 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
@@ -56,7 +56,7 @@ def process_single_image_both_ways(image_path, prompt, num_inference_steps,image
|
|
56 |
steps_offset=1,
|
57 |
)
|
58 |
|
59 |
-
stylecodes_model.load_model(
|
60 |
|
61 |
pipe = StableDiffusionPipelineXSv2.from_pretrained(
|
62 |
"runwayml/stable-diffusion-v1-5",
|
@@ -95,7 +95,7 @@ def process_single_image_both_ways(image_path, prompt, num_inference_steps,image
|
|
95 |
# Save the output image
|
96 |
|
97 |
|
98 |
-
def make_stylecode(image_path, image=None):
|
99 |
|
100 |
# Set up model components
|
101 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
@@ -104,7 +104,7 @@ def make_stylecode(image_path, image=None):
|
|
104 |
stylecodes_model= stylecodes_model.to("cuda")
|
105 |
|
106 |
|
107 |
-
stylecodes_model.load_model(
|
108 |
# Load and preprocess image
|
109 |
if image is None:
|
110 |
image = Image.open(image_path).convert("RGB")
|
|
|
10 |
|
11 |
|
12 |
|
13 |
+
def process_single_image(model,image_path, image=None):
|
14 |
|
15 |
# Set up model components
|
16 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
|
|
19 |
stylecodes_model= stylecodes_model.to("cuda")
|
20 |
|
21 |
|
22 |
+
stylecodes_model.load_model(model)
|
23 |
# Load and preprocess image
|
24 |
if image is None:
|
25 |
image = Image.open(image_path).convert("RGB")
|
|
|
40 |
return code
|
41 |
|
42 |
|
43 |
+
def process_single_image_both_ways(model,image_path, prompt, num_inference_steps,image=None):
|
44 |
# Load and preprocess image
|
45 |
# Set up model components
|
46 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
|
|
56 |
steps_offset=1,
|
57 |
)
|
58 |
|
59 |
+
stylecodes_model.load_model(model)
|
60 |
|
61 |
pipe = StableDiffusionPipelineXSv2.from_pretrained(
|
62 |
"runwayml/stable-diffusion-v1-5",
|
|
|
95 |
# Save the output image
|
96 |
|
97 |
|
98 |
+
def make_stylecode(model,image_path, image=None):
|
99 |
|
100 |
# Set up model components
|
101 |
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
|
|
|
104 |
stylecodes_model= stylecodes_model.to("cuda")
|
105 |
|
106 |
|
107 |
+
stylecodes_model.load_model(model)
|
108 |
# Load and preprocess image
|
109 |
if image is None:
|
110 |
image = Image.open(image_path).convert("RGB")
|