insideman commited on
Commit
0779147
·
verified ·
1 Parent(s): 2e8966b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -37
app.py CHANGED
@@ -1,52 +1,45 @@
1
  import gradio as gr
2
- import torch
3
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
4
- from PIL import Image
5
- import numpy as np
6
 
7
- def load_model():
8
- controlnet = ControlNetModel.from_pretrained("Kwai-Kolors/Kolors-Virtual-Try-On")
9
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
10
- "Kwai-Kolors/Kolors-Virtual-Try-On",
11
- controlnet=controlnet,
12
- torch_dtype=torch.float16
13
- )
14
- if torch.cuda.is_available():
15
- pipe = pipe.to("cuda")
16
- return pipe
17
-
18
- # Model'i global olarak yükle
19
- try:
20
- model = load_model()
21
- print("Model başarıyla yüklendi!")
22
- except Exception as e:
23
- print(f"Model yüklenirken hata: {str(e)}")
24
 
25
  def virtual_try_on(person_image, garment_image):
26
  """
27
- Virtual try-on process
 
 
 
 
 
28
  """
29
  try:
30
- # Resimleri uygun formata dönüştür
31
- if person_image is None or garment_image is None:
32
- return None, "Error: Both images are required"
33
-
34
- # Model inference
35
- output = model(
36
- person_image,
37
- garment_image,
38
- num_inference_steps=30,
39
- guidance_scale=7.5
 
 
 
 
40
  )
41
 
42
- # Sonuç resmini al
43
- result_image = output.images[0]
44
  return result_image, "Success"
45
-
46
  except Exception as e:
47
  return None, f"Error: {str(e)}"
48
 
49
- # Gradio arayüzü
50
  demo = gr.Interface(
51
  fn=virtual_try_on,
52
  inputs=[
@@ -57,7 +50,7 @@ demo = gr.Interface(
57
  gr.Image(type="pil", label="Result"),
58
  gr.Text(label="Status")
59
  ],
60
- title="Virtual Try-On",
61
  description="Upload a person image and a garment image to see how the garment would look on the person."
62
  )
63
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import PIL.Image
4
+ import io
 
5
 
6
+ client = InferenceClient(
7
+ model="Kwai-Kolors/Kolors-Virtual-Try-On"
8
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def virtual_try_on(person_image, garment_image):
11
  """
12
+ Process the virtual try-on request
13
+ Args:
14
+ person_image: PIL Image of the person
15
+ garment_image: PIL Image of the garment
16
+ Returns:
17
+ PIL Image of the result
18
  """
19
  try:
20
+ # Convert images to bytes
21
+ person_bytes = io.BytesIO()
22
+ garment_bytes = io.BytesIO()
23
+ person_image.save(person_bytes, format='PNG')
24
+ garment_image.save(garment_bytes, format='PNG')
25
+
26
+ # Make API request
27
+ response = client.post(
28
+ json={
29
+ "inputs": [
30
+ {"image": person_bytes.getvalue()},
31
+ {"image": garment_bytes.getvalue()}
32
+ ]
33
+ }
34
  )
35
 
36
+ # Convert response to image
37
+ result_image = PIL.Image.open(io.BytesIO(response))
38
  return result_image, "Success"
 
39
  except Exception as e:
40
  return None, f"Error: {str(e)}"
41
 
42
+ # Create Gradio interface
43
  demo = gr.Interface(
44
  fn=virtual_try_on,
45
  inputs=[
 
50
  gr.Image(type="pil", label="Result"),
51
  gr.Text(label="Status")
52
  ],
53
+ title="Virtual Try-On API",
54
  description="Upload a person image and a garment image to see how the garment would look on the person."
55
  )
56