Spaces:
Running
Running
OmPrakashSingh1704
commited on
Commit
•
2893544
1
Parent(s):
e098fac
One_Model
Browse files
options/Banner_Model/Image2Image.py
CHANGED
@@ -8,10 +8,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
8 |
print(f"Using device for I2I: {device}")
|
9 |
|
10 |
# Load the inpainting pipeline
|
11 |
-
pipe = AutoPipelineForInpainting.from_pretrained(
|
12 |
-
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
13 |
-
torch_dtype=torch.float16, variant="fp16").to(device)
|
14 |
-
|
15 |
|
16 |
def resize_image(image, height, width):
|
17 |
"""Resize image tensor to the desired height and width."""
|
@@ -30,6 +26,11 @@ def dummy(img):
|
|
30 |
|
31 |
|
32 |
def I2I(prompt, image, width=1024, height=1024, guidance_scale=8.0, num_inference_steps=20, strength=0.99):
|
|
|
|
|
|
|
|
|
|
|
33 |
img_url, mask = dummy(image)
|
34 |
|
35 |
# Resize image and mask to the target dimensions (height x width)
|
|
|
8 |
print(f"Using device for I2I: {device}")
|
9 |
|
10 |
# Load the inpainting pipeline
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def resize_image(image, height, width):
|
13 |
"""Resize image tensor to the desired height and width."""
|
|
|
26 |
|
27 |
|
28 |
def I2I(prompt, image, width=1024, height=1024, guidance_scale=8.0, num_inference_steps=20, strength=0.99):
|
29 |
+
|
30 |
+
pipe = AutoPipelineForInpainting.from_pretrained(
|
31 |
+
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
32 |
+
torch_dtype=torch.float16, variant="fp16").to(device)
|
33 |
+
|
34 |
img_url, mask = dummy(image)
|
35 |
|
36 |
# Resize image and mask to the target dimensions (height x width)
|
options/Banner_Model/Image2Image_2.py
CHANGED
@@ -5,17 +5,18 @@ from PIL import Image
|
|
5 |
|
6 |
device= "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
print("Using device for I2I_2:", device)
|
8 |
-
processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
9 |
|
10 |
-
checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
|
11 |
-
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
|
12 |
-
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
13 |
-
"radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
|
14 |
-
).to(device)
|
15 |
-
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
16 |
-
# pipe.enable_model_cpu_offload()
|
17 |
|
18 |
def I2I_2(image, prompt,size,num_inference_steps):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
if not isinstance(image, Image.Image):
|
20 |
image = Image.fromarray(image)
|
21 |
image.resize((size,size))
|
|
|
5 |
|
6 |
device= "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
print("Using device for I2I_2:", device)
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def I2I_2(image, prompt,size,num_inference_steps):
|
11 |
+
processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
12 |
+
|
13 |
+
checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
|
14 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
|
15 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
16 |
+
"radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
|
17 |
+
).to(device)
|
18 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
19 |
+
pipe.enable_model_cpu_offload()
|
20 |
if not isinstance(image, Image.Image):
|
21 |
image = Image.fromarray(image)
|
22 |
image.resize((size,size))
|
options/Video_model/Model.py
CHANGED
@@ -7,17 +7,19 @@ login(token=os.getenv("TOKEN"))
|
|
7 |
# Check if CUDA (GPU) is available, otherwise use CPU
|
8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
|
10 |
-
# Load the pipeline and move it to the appropriate device (GPU or CPU)
|
11 |
-
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
12 |
-
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float32
|
13 |
-
).to(device)
|
14 |
-
|
15 |
-
# Enable model offloading if using the CPU
|
16 |
-
if device == "cpu":
|
17 |
-
pipeline.enable_model_cpu_offload()
|
18 |
|
19 |
# Function to generate the video
|
20 |
def Video(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
image = Image.fromarray(image)
|
22 |
image = image.resize((1024, 576))
|
23 |
|
|
|
7 |
# Check if CUDA (GPU) is available, otherwise use CPU
|
8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Function to generate the video
|
12 |
def Video(image):
|
13 |
+
|
14 |
+
|
15 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(
|
16 |
+
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float32
|
17 |
+
).to(device)
|
18 |
+
|
19 |
+
# Enable model offloading if using the CPU
|
20 |
+
if device == "cpu":
|
21 |
+
pipeline.enable_model_cpu_offload()
|
22 |
+
|
23 |
image = Image.fromarray(image)
|
24 |
image = image.resize((1024, 576))
|
25 |
|
options/Video_model/__pycache__/Model.cpython-310.pyc
CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ
|
|