File size: 1,418 Bytes
9caffc7 f918f30 9caffc7 f918f30 9caffc7 f918f30 9caffc7 3e374e0 9caffc7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr
from PIL import Image
# Load the model and pipeline
model_id = "ares1123/virtual-dress-try-on"
pipeline = StableDiffusionPipeline.from_pretrained(model_id)
pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
def virtual_try_on(image, clothing_image):
# Convert images to proper format and get dimensions
width, height = image.size
# Ensure dimensions are multiples of 8
width = (width // 8) * 8
height = (height // 8) * 8
# Resize images to fit the model's expected input
image = image.resize((width, height))
clothing_image = clothing_image.resize((width, height))
# Define a prompt describing what you want the model to do
prompt = "A person wearing new clothes"
# Process the images using the model
result = pipeline(prompt=prompt, image=image, conditioning_image=clothing_image)
try_on_image = result.images[0]
return try_on_image
# Set up a simple Gradio interface for testing
interface = gr.Interface(
fn=virtual_try_on,
inputs=[gr.Image(type="pil", label="User Image"),
gr.Image(type="pil", label="Clothing Image")],
outputs=gr.Image(type="pil"),
title="Virtual Dress Try-On",
description="Upload an image of yourself and a clothing image to try it on virtually!"
)
# Launch the interface
interface.launch(share=True)
|