VikramSingh178 commited on
Commit
6e67c16
1 Parent(s): 6b857b8

Former-commit-id: 522ceaeca59f5a0db3c0e1e300b6ac98b69656af

__pycache__/config_settings.cpython-310.pyc ADDED
Binary file (598 Bytes). View file
 
api/models/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/api/models/__pycache__/__init__.cpython-310.pyc and b/api/models/__pycache__/__init__.cpython-310.pyc differ
 
api/models/__pycache__/sdxl_input.cpython-310.pyc CHANGED
Binary files a/api/models/__pycache__/sdxl_input.cpython-310.pyc and b/api/models/__pycache__/sdxl_input.cpython-310.pyc differ
 
api/routers/__pycache__/painting.cpython-310.pyc CHANGED
Binary files a/api/routers/__pycache__/painting.cpython-310.pyc and b/api/routers/__pycache__/painting.cpython-310.pyc differ
 
api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc CHANGED
Binary files a/api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc and b/api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc differ
 
api/routers/painting.py CHANGED
@@ -1,22 +1,20 @@
1
- from pathlib import Path
2
  import os
3
  import uuid
4
  from typing import List, Tuple, Any, Dict
5
- from fastapi import APIRouter, File, UploadFile, HTTPException, Form, Depends, Body
6
  from pydantic import BaseModel, Field
7
  from PIL import Image
8
  import lightning.pytorch as pl
9
  from scripts.api_utils import pil_to_s3_json, pil_to_b64_json, ImageAugmentation, accelerator
10
- from inpainting_pipeline import AutoPaintingPipeline, load_pipeline
11
  from hydra import compose, initialize
12
  from async_batcher.batcher import AsyncBatcher
13
  import json
14
  from functools import lru_cache
15
- import asyncio
16
  pl.seed_everything(42)
17
  router = APIRouter()
18
 
19
- # Initialize Hydra configuration
20
  with initialize(version_base=None, config_path="../../configs"):
21
  cfg = compose(config_name="inpainting")
22
 
@@ -31,7 +29,6 @@ def load_pipeline_wrapper():
31
  """
32
  pipeline = load_pipeline(cfg.model, accelerator(), enable_compile=True)
33
  return pipeline
34
-
35
  inpainting_pipeline = load_pipeline_wrapper()
36
 
37
  class InpaintingRequest(BaseModel):
@@ -45,7 +42,8 @@ class InpaintingRequest(BaseModel):
45
  guidance_scale: float = Field(..., description="Guidance scale for inference")
46
  mode: str = Field(..., description="Mode for output ('b64_json' or 's3_json')")
47
  num_images: int = Field(..., description="Number of images to generate")
48
-
 
49
  class InpaintingBatchRequestModel(BaseModel):
50
  """
51
  Model representing a batch request for inpainting inference.
@@ -68,14 +66,35 @@ async def save_image(image: UploadFile) -> str:
68
  f.write(await image.read())
69
  return file_path
70
 
71
- def run_inference(cfg, image_path: str, mask_image_path: str, request: InpaintingRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  """
73
  Run inference using an inpainting pipeline on an image.
74
 
75
  Args:
76
  cfg (dict): Configuration dictionary.
77
  image_path (str): Path to the image file.
78
- mask_image_path (str): Path to the mask image file.
79
  request (InpaintingRequest): Pydantic model containing inference parameters.
80
 
81
  Returns:
@@ -84,8 +103,17 @@ def run_inference(cfg, image_path: str, mask_image_path: str, request: Inpaintin
84
  Raises:
85
  ValueError: If an invalid mode is provided.
86
  """
87
- image = Image.open(image_path)
88
- mask_image = Image.open(mask_image_path)
 
 
 
 
 
 
 
 
 
89
  painting_pipeline = AutoPaintingPipeline(
90
  pipeline=inpainting_pipeline,
91
  image=image,
@@ -107,33 +135,26 @@ def run_inference(cfg, image_path: str, mask_image_path: str, request: Inpaintin
107
  raise ValueError("Invalid mode. Supported modes are 'b64_json' and 's3_json'.")
108
 
109
  class InpaintingBatcher(AsyncBatcher):
110
- def __init__(self, max_batch_size: int):
111
- super().__init__(max_batch_size)
112
-
113
- async def process_batch(self, batch: Tuple[List[str], List[str], List[InpaintingRequest]]) -> List[Dict[str, Any]]:
114
  """
115
  Process a batch of images and requests for inpainting inference.
116
 
117
  Args:
118
- batch (Tuple[List[str], List[str], List[InpaintingRequest]]): Tuple of image paths, mask image paths, and corresponding requests.
119
 
120
  Returns:
121
  List[Dict[str, Any]]: List of resulting images in the specified mode ('b64_json' or 's3_json').
122
  """
123
- image_paths, mask_image_paths, requests = batch
124
  results = []
125
- for image_path, mask_image_path, request in zip(image_paths, mask_image_paths, requests):
126
- try:
127
- result = run_inference(cfg, image_path, mask_image_path, request)
128
- results.append(result)
129
- except Exception as e:
130
- results.append({"error": str(e)})
131
  return results
132
 
133
  @router.post("/inpainting")
134
  async def inpainting_inference(
135
  image: UploadFile = File(...),
136
- mask_image: UploadFile = File(...),
137
  request_data: str = Form(...),
138
  ):
139
  """
@@ -141,7 +162,6 @@ async def inpainting_inference(
141
 
142
  Args:
143
  image (UploadFile): Uploaded image file.
144
- mask_image (UploadFile): Uploaded mask image file.
145
  request_data (str): JSON string of the request parameters.
146
 
147
  Returns:
@@ -152,10 +172,9 @@ async def inpainting_inference(
152
  """
153
  try:
154
  image_path = await save_image(image)
155
- mask_image_path = await save_image(mask_image)
156
  request_dict = json.loads(request_data)
157
  request = InpaintingRequest(**request_dict)
158
- result = run_inference(cfg, image_path, mask_image_path, request)
159
  return result
160
  except Exception as e:
161
  raise HTTPException(status_code=500, detail=str(e))
@@ -163,7 +182,6 @@ async def inpainting_inference(
163
  @router.post("/inpainting/batch")
164
  async def inpainting_batch_inference(
165
  images: List[UploadFile] = File(...),
166
- mask_images: List[UploadFile] = File(...),
167
  request_data: str = Form(...),
168
  ):
169
  """
@@ -171,7 +189,6 @@ async def inpainting_batch_inference(
171
 
172
  Args:
173
  images (List[UploadFile]): List of uploaded image files.
174
- mask_images (List[UploadFile]): List of uploaded mask image files.
175
  request_data (str): JSON string of the request parameters.
176
 
177
  Returns:
@@ -185,13 +202,12 @@ async def inpainting_batch_inference(
185
  batch_request = InpaintingBatchRequestModel(**request_dict)
186
  requests = batch_request.requests
187
 
188
- if len(images) != len(requests) or len(images) != len(mask_images):
189
- raise HTTPException(status_code=400, detail="The number of images, mask images, and requests must match.")
190
 
191
  batcher = InpaintingBatcher(max_batch_size=64)
192
- image_paths = [save_image(image) for image in images]
193
- mask_image_paths = [save_image(mask_image) for mask_image in mask_images]
194
- results = await batcher.process_batch((image_paths, mask_image_paths, requests))
195
 
196
  return results
197
  except Exception as e:
 
 
1
  import os
2
  import uuid
3
  from typing import List, Tuple, Any, Dict
4
+ from fastapi import APIRouter, File, UploadFile, HTTPException, Form
5
  from pydantic import BaseModel, Field
6
  from PIL import Image
7
  import lightning.pytorch as pl
8
  from scripts.api_utils import pil_to_s3_json, pil_to_b64_json, ImageAugmentation, accelerator
9
+ from scripts.inpainting_pipeline import AutoPaintingPipeline, load_pipeline
10
  from hydra import compose, initialize
11
  from async_batcher.batcher import AsyncBatcher
12
  import json
13
  from functools import lru_cache
 
14
  pl.seed_everything(42)
15
  router = APIRouter()
16
 
17
+
18
  with initialize(version_base=None, config_path="../../configs"):
19
  cfg = compose(config_name="inpainting")
20
 
 
29
  """
30
  pipeline = load_pipeline(cfg.model, accelerator(), enable_compile=True)
31
  return pipeline
 
32
  inpainting_pipeline = load_pipeline_wrapper()
33
 
34
  class InpaintingRequest(BaseModel):
 
42
  guidance_scale: float = Field(..., description="Guidance scale for inference")
43
  mode: str = Field(..., description="Mode for output ('b64_json' or 's3_json')")
44
  num_images: int = Field(..., description="Number of images to generate")
45
+ use_augmentation: bool = Field(True, description="Whether to use image augmentation")
46
+
47
  class InpaintingBatchRequestModel(BaseModel):
48
  """
49
  Model representing a batch request for inpainting inference.
 
66
  f.write(await image.read())
67
  return file_path
68
 
69
+ def augment_image(image_path, target_width, target_height, roi_scale, segmentation_model_name, detection_model_name):
70
+ """
71
+ Augment an image by extending its dimensions and generating masks.
72
+
73
+ Args:
74
+ image_path (str): Path to the image file.
75
+ target_width (int): Target width for augmentation.
76
+ target_height (int): Target height for augmentation.
77
+ roi_scale (float): Scale factor for region of interest.
78
+ segmentation_model_name (str): Name of the segmentation model.
79
+ detection_model_name (str): Name of the detection model.
80
+
81
+ Returns:
82
+ Tuple[Image.Image, Image.Image]: Augmented image and inverted mask.
83
+ """
84
+ image = Image.open(image_path)
85
+ image_augmentation = ImageAugmentation(target_width, target_height, roi_scale)
86
+ image = image_augmentation.extend_image(image)
87
+ mask = image_augmentation.generate_mask_from_bbox(image, segmentation_model_name, detection_model_name)
88
+ inverted_mask = image_augmentation.invert_mask(mask)
89
+ return image, inverted_mask
90
+
91
+ def run_inference(cfg, image_path: str, request: InpaintingRequest):
92
  """
93
  Run inference using an inpainting pipeline on an image.
94
 
95
  Args:
96
  cfg (dict): Configuration dictionary.
97
  image_path (str): Path to the image file.
 
98
  request (InpaintingRequest): Pydantic model containing inference parameters.
99
 
100
  Returns:
 
103
  Raises:
104
  ValueError: If an invalid mode is provided.
105
  """
106
+ if request.use_augmentation:
107
+ image, mask_image = augment_image(image_path,
108
+ cfg['target_width'],
109
+ cfg['target_height'],
110
+ cfg['roi_scale'],
111
+ cfg['segmentation_model'],
112
+ cfg['detection_model'])
113
+ else:
114
+ image = Image.open(image_path)
115
+ mask_image = None
116
+
117
  painting_pipeline = AutoPaintingPipeline(
118
  pipeline=inpainting_pipeline,
119
  image=image,
 
135
  raise ValueError("Invalid mode. Supported modes are 'b64_json' and 's3_json'.")
136
 
137
  class InpaintingBatcher(AsyncBatcher):
138
+ async def process_batch(self, batch: Tuple[List[str], List[InpaintingRequest]]) -> List[Dict[str, Any]]:
 
 
 
139
  """
140
  Process a batch of images and requests for inpainting inference.
141
 
142
  Args:
143
+ batch (Tuple[List[str], List[InpaintingRequest]]): Tuple of image paths and corresponding requests.
144
 
145
  Returns:
146
  List[Dict[str, Any]]: List of resulting images in the specified mode ('b64_json' or 's3_json').
147
  """
148
+ image_paths, requests = batch
149
  results = []
150
+ for image_path, request in zip(image_paths, requests):
151
+ result = run_inference(cfg, image_path, request)
152
+ results.append(result)
 
 
 
153
  return results
154
 
155
  @router.post("/inpainting")
156
  async def inpainting_inference(
157
  image: UploadFile = File(...),
 
158
  request_data: str = Form(...),
159
  ):
160
  """
 
162
 
163
  Args:
164
  image (UploadFile): Uploaded image file.
 
165
  request_data (str): JSON string of the request parameters.
166
 
167
  Returns:
 
172
  """
173
  try:
174
  image_path = await save_image(image)
 
175
  request_dict = json.loads(request_data)
176
  request = InpaintingRequest(**request_dict)
177
+ result = run_inference(cfg, image_path, request)
178
  return result
179
  except Exception as e:
180
  raise HTTPException(status_code=500, detail=str(e))
 
182
  @router.post("/inpainting/batch")
183
  async def inpainting_batch_inference(
184
  images: List[UploadFile] = File(...),
 
185
  request_data: str = Form(...),
186
  ):
187
  """
 
189
 
190
  Args:
191
  images (List[UploadFile]): List of uploaded image files.
 
192
  request_data (str): JSON string of the request parameters.
193
 
194
  Returns:
 
202
  batch_request = InpaintingBatchRequestModel(**request_dict)
203
  requests = batch_request.requests
204
 
205
+ if len(images) != len(requests):
206
+ raise HTTPException(status_code=400, detail="The number of images and requests must match.")
207
 
208
  batcher = InpaintingBatcher(max_batch_size=64)
209
+ image_paths = [await save_image(image) for image in images]
210
+ results = batcher.process_batch((image_paths, requests))
 
211
 
212
  return results
213
  except Exception as e:
api/routers/sdxl_text_to_image.py CHANGED
@@ -1,4 +1,4 @@
1
- import config
2
  from fastapi import APIRouter, HTTPException
3
  from typing import List
4
  from diffusers import DiffusionPipeline
 
1
+ import scripts.config as config
2
  from fastapi import APIRouter, HTTPException
3
  from typing import List
4
  from diffusers import DiffusionPipeline
api/yolov8l.pt.REMOVED.git-id ADDED
@@ -0,0 +1 @@
 
 
1
+ 85f4cb9e9454a5944aebcc4ac73fd5571f3ddcd3
picpilot.egg-info/SOURCES.txt CHANGED
@@ -16,8 +16,9 @@ scripts/config.py
16
  scripts/inpainting_pipeline.py
17
  scripts/kandinsky3_inpainting.py
18
  scripts/load_pipeline.py
19
- scripts/logger.py
20
  scripts/products10k_captions.py
21
  scripts/s3_manager.py
22
  scripts/sdxl_lora_inference.py
23
- scripts/sdxl_lora_tuner.py
 
 
 
16
  scripts/inpainting_pipeline.py
17
  scripts/kandinsky3_inpainting.py
18
  scripts/load_pipeline.py
 
19
  scripts/products10k_captions.py
20
  scripts/s3_manager.py
21
  scripts/sdxl_lora_inference.py
22
+ scripts/sdxl_lora_tuner.py
23
+ ui/__init__.py
24
+ ui/ui.py
picpilot.egg-info/top_level.txt CHANGED
@@ -1,2 +1,3 @@
1
  api
2
  scripts
 
 
1
  api
2
  scripts
3
+ ui
sample_data/mask_image.png ADDED
scripts/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (127 Bytes). View file
 
scripts/__pycache__/api_utils.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
scripts/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
scripts/__pycache__/inpainting_pipeline.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
scripts/__pycache__/s3_manager.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
scripts/api_utils.py CHANGED
@@ -3,10 +3,10 @@ from ultralytics import YOLO
3
  from transformers import SamModel, SamProcessor
4
  import numpy as np
5
  from PIL import Image, ImageOps
6
- from config import SEGMENTATION_MODEL_NAME, DETECTION_MODEL_NAME
7
  from diffusers.utils import load_image
8
  import gc
9
- from s3_manager import S3ManagerService
10
  import io
11
  from io import BytesIO
12
  import base64
 
3
  from transformers import SamModel, SamProcessor
4
  import numpy as np
5
  from PIL import Image, ImageOps
6
+ from scripts.config import SEGMENTATION_MODEL_NAME, DETECTION_MODEL_NAME
7
  from diffusers.utils import load_image
8
  import gc
9
+ from scripts.s3_manager import S3ManagerService
10
  import io
11
  from io import BytesIO
12
  import base64
scripts/inpainting_pipeline.py CHANGED
@@ -1,7 +1,7 @@
1
  import torch
2
  from diffusers import AutoPipelineForInpainting,DiffusionPipeline
3
  from diffusers.utils import load_image
4
- from api_utils import accelerator, ImageAugmentation
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
 
1
  import torch
2
  from diffusers import AutoPipelineForInpainting,DiffusionPipeline
3
  from diffusers.utils import load_image
4
+ from scripts.api_utils import accelerator, ImageAugmentation
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
scripts/kandinsky3_inpainting.py CHANGED
@@ -1,12 +1,11 @@
1
- import sys
2
  import torch
3
- sys.path.append('../')
4
  from Kandinsky.kandinsky3 import get_inpainting_pipeline
5
- from api_utils import ImageAugmentation
6
  from diffusers.utils import load_image
 
7
  from PIL import Image
8
 
9
- device_map = torch.device('cuda:0')
10
  dtype_map = {
11
  'unet': torch.float16,
12
  'text_encoder': torch.float16,
@@ -18,12 +17,13 @@ pipe = get_inpainting_pipeline(
18
  device_map, dtype_map,
19
  )
20
 
21
- augmenter = ImageAugmentation(target_width=2560, target_height=1440)
22
- image = Image.open(image_path='/home/product_diffusion_api/sample_data/example1.jpg')
23
- extended_image = augmenter.extend_image(image)
24
- mask_image = augmenter.generate_mask_from_bbox(extended_image, segmentation_model='facebook/sam-vit-base', detection_model='yolov8s')
25
- mask_image = augmenter.invert_mask(mask_image)
26
 
27
 
28
- image = pipe( "Product on the Kitchen used for cooking", extended_image, mask_image)
 
 
29
  image.save('output.jpg')
 
 
1
  import torch
 
2
  from Kandinsky.kandinsky3 import get_inpainting_pipeline
3
+ from scripts.api_utils import ImageAugmentation,accelerator
4
  from diffusers.utils import load_image
5
+ import numpy as np
6
  from PIL import Image
7
 
8
+ device_map = torch.device(accelerator())
9
  dtype_map = {
10
  'unet': torch.float16,
11
  'text_encoder': torch.float16,
 
17
  device_map, dtype_map,
18
  )
19
 
20
+ image = Image.open('/home/PicPilot/sample_data/image.png')
21
+ mask_image = Image.open('/home/PicPilot/sample_data/mask_image.png')
22
+ image = load_image(image=image)
23
+ mask_image = np.array(mask_image)
 
24
 
25
 
26
+
27
+
28
+ image = pipe( "Product on the Kitchen used for cooking", image, mask_image)
29
  image.save('output.jpg')
scripts/load_pipeline.py CHANGED
@@ -2,7 +2,7 @@ from config import MODEL_NAME,ADAPTER_NAME
2
  import torch
3
  from diffusers import DiffusionPipeline
4
  from wandb.integration.diffusers import autolog
5
- from config import PROJECT_NAME
6
  autolog(init=dict(project=PROJECT_NAME))
7
 
8
 
 
2
  import torch
3
  from diffusers import DiffusionPipeline
4
  from wandb.integration.diffusers import autolog
5
+ from scripts.config import PROJECT_NAME
6
  autolog(init=dict(project=PROJECT_NAME))
7
 
8
 
ui/__pycache__/ui.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
ui/ui.py CHANGED
@@ -1,92 +1,41 @@
1
  import gradio as gr
2
  import numpy as np
3
  import requests
4
- from io import BytesIO
5
  import json
6
  from PIL import Image
7
- from pydantic import BaseModel, Field
8
  from diffusers.utils import load_image
 
9
 
10
  # API endpoints
11
- sdxl_inference_endpoint = 'https://vikramsingh178-picpilot-server.hf.space/api/v1/product-diffusion/sdxl_v0_lora_inference'
12
- kandinsky_inpainting_inference = 'https://vikramsingh178-picpilot-server.hf.space/api/v1/product-diffusion/inpainting'
13
-
14
- class InputRequestSDXL(BaseModel):
15
- prompt: str
16
- num_inference_steps: int
17
- guidance_scale: float
18
- negative_prompt: str
19
- num_images: int
20
- mode: str
21
-
22
- class InpaintingRequestKandinsky(BaseModel):
23
- prompt: str = Field(..., description="Prompt text for inference")
24
- negative_prompt: str = Field(..., description="Negative prompt text for inference")
25
- num_inference_steps: int = Field(..., description="Number of inference steps")
26
- strength: float = Field(..., description="Strength of the inference")
27
- guidance_scale: float = Field(..., description="Guidance scale for inference")
28
- mode: str = Field(..., description="Mode for output ('b64_json' or 's3_json')")
29
- num_images: int = Field(..., description="Number of images to generate")
30
 
31
  def generate_sdxl_lora_image(prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode):
32
- payload = InputRequestSDXL(
33
- prompt=prompt,
34
- negative_prompt=negative_prompt,
35
- num_inference_steps=num_inference_steps,
36
- guidance_scale=guidance_scale,
37
- num_images=num_images,
38
- mode=mode
39
- ).dict()
40
-
41
- try:
42
- response = requests.post(sdxl_inference_endpoint, json=payload)
43
- response.raise_for_status()
44
- response_data = response.json()
45
- url = response_data['url']
46
- image = load_image(url)
47
- return image
48
- except requests.exceptions.RequestException as e:
49
- print(f"Error in SDXL-Lora API request: {e}")
50
- return None
51
-
52
- def process_masked_image(img):
53
- if img is None or "composite" not in img:
54
- return None, None
55
-
56
- base_image = Image.fromarray(img["composite"]).convert("RGB")
57
-
58
- if "layers" in img and len(img["layers"]) > 0:
59
- alpha_channel = img["layers"][0][:, :, 3]
60
- mask = np.where(alpha_channel == 0, 0, 255).astype(np.uint8)
61
- mask = Image.fromarray(mask).convert("L")
62
- else:
63
- mask = Image.new("L", base_image.size, 0)
64
-
65
- return base_image, mask
66
-
67
- def generate_outpainting(prompt, negative_prompt, num_inference_steps, strength, guidance_scale, mode, num_images, masked_image, width, height):
68
- base_image, mask = process_masked_image(masked_image)
69
-
70
- if base_image is None or mask is None:
71
- return None, None
72
-
73
- # Resize base image and mask
74
- base_image_resized = base_image.resize((width, height))
75
- mask_resized = mask.resize((width, height))
76
 
77
- # Convert the resized images to bytes
 
 
 
 
 
 
 
 
78
  img_byte_arr = BytesIO()
79
- base_image_resized.save(img_byte_arr, format='PNG')
80
  img_byte_arr = img_byte_arr.getvalue()
81
 
82
- mask_byte_arr = BytesIO()
83
- mask_resized.save(mask_byte_arr, format='PNG')
84
- mask_byte_arr = mask_byte_arr.getvalue()
85
-
86
  # Prepare the files for multipart/form-data
87
  files = {
88
- 'image': ('image.png', img_byte_arr, 'image/png'),
89
- 'mask_image': ('mask.png', mask_byte_arr, 'image/png'),
90
  }
91
 
92
  # Prepare the request data
@@ -97,7 +46,9 @@ def generate_outpainting(prompt, negative_prompt, num_inference_steps, strength,
97
  "strength": strength,
98
  "guidance_scale": guidance_scale,
99
  "mode": mode,
100
- "num_images": num_images
 
 
101
  }
102
 
103
  # Convert request_data to a JSON string
@@ -108,30 +59,15 @@ def generate_outpainting(prompt, negative_prompt, num_inference_steps, strength,
108
  'request_data': request_data_json
109
  }
110
 
111
- try:
112
- response = requests.post(kandinsky_inpainting_inference, files=files, data=form_data)
113
- response.raise_for_status()
114
- response_data = response.json()
115
- url = response_data['url']
116
- outpainted_image = load_image(url)
117
- return mask_resized, outpainted_image
118
- except requests.exceptions.RequestException as e:
119
- print(f"Error in Kandinsky Inpainting API request: {e}")
120
- return None, None
121
-
122
- def generate_mask_preview(img):
123
- base_image, mask = process_masked_image(img)
124
- if mask is None:
125
- return None
126
- return mask
127
-
128
- def resize_image(img, width, height):
129
- if img is None:
130
- return None
131
- resized_img = img.resize((width, height))
132
- return resized_img
133
 
134
- # Gradio interface setup
135
  with gr.Blocks(theme='VikramSingh178/Webui-Theme') as demo:
136
  with gr.Tab("SdxL-Lora"):
137
  with gr.Row():
@@ -149,32 +85,25 @@ with gr.Blocks(theme='VikramSingh178/Webui-Theme') as demo:
149
  image_preview = gr.Image(label="Generated Image (SDXL-Lora)", show_download_button=True, show_share_button=True, container=True)
150
  generate_button.click(generate_sdxl_lora_image, inputs=[prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode], outputs=[image_preview])
151
 
152
-
153
  with gr.Tab("Inpainting"):
154
  with gr.Row():
155
  with gr.Column():
156
  with gr.Group():
157
- masked_image = gr.ImageMask(label="Upload Image and Draw Mask", format='png')
158
  prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
159
  negative_prompt= gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
160
  num_inference_steps = gr.Slider(minimum=1, maximum=100, step=1, value=20, label="Inference Steps")
161
  strength = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Strength")
162
  guidance_scale = gr.Slider(minimum=1.0, maximum=10.0, step=0.1, value=7.5, label="Guidance Scale")
163
- num_images= gr.Slider(minimum=1, maximum=10, step=1, value=1, label="Number of Images")
164
  mode_kandinsky = gr.Dropdown(choices=["s3_json", "b64_json"], value="s3_json", label="Mode")
165
  width_slider = gr.Slider(minimum=512, maximum=1024, step=1, value=800, label="Image Width")
166
  height_slider = gr.Slider(minimum=512, maximum=1024, step=1, value=800, label="Image Height")
167
- resize_button = gr.Button("Resize Image", variant='secondary')
168
  generate_button = gr.Button("Generate Inpainting", variant='primary')
169
- generate_mask_button_painting = gr.Button("Generate Mask", variant='primary')
170
 
171
  with gr.Column(scale=1):
172
- mask_preview= gr.Image(label="Mask Preview", show_download_button=True, container=True)
173
  outpainted_image_preview = gr.Image(label="Outpainted Image (Kandinsky)", show_download_button=True, show_share_button=True, container=True)
174
- resize_button.click(resize_image, inputs=[masked_image, width_slider, height_slider], outputs=[masked_image])
175
- generate_mask_button_painting.click(generate_mask_preview, inputs=masked_image, outputs=[mask_preview])
176
- generate_button.click(generate_outpainting,
177
- inputs=[prompt, negative_prompt, num_inference_steps, strength, guidance_scale, mode_kandinsky, num_images, masked_image, width_slider, height_slider],
178
- outputs=[mask_preview, outpainted_image_preview])
179
 
180
- demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
  import requests
 
4
  import json
5
  from PIL import Image
 
6
  from diffusers.utils import load_image
7
+ from io import BytesIO
8
 
9
  # API endpoints
10
+ sdxl_inference_endpoint = 'http://localhost:7860/api/v1/product-diffusion/sdxl_v0_lora_inference'
11
+ kandinsky_inpainting_inference = 'http://localhost:7860/api/v1/product-diffusion/inpainting'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def generate_sdxl_lora_image(prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode):
14
+ payload = {
15
+ "prompt": prompt,
16
+ "negative_prompt": negative_prompt,
17
+ "num_inference_steps": num_inference_steps,
18
+ "guidance_scale": guidance_scale,
19
+ "num_images": num_images,
20
+ "mode": mode
21
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ response = requests.post(sdxl_inference_endpoint, json=payload)
24
+ response.raise_for_status()
25
+ response_data = response.json()
26
+ url = response_data['url']
27
+ image = load_image(url)
28
+ return image
29
+
30
+ def generate_outpainting(prompt, negative_prompt, num_inference_steps, strength, guidance_scale, mode, num_images, image, width, height):
31
+ # Convert the image to bytes
32
  img_byte_arr = BytesIO()
33
+ image.save(img_byte_arr, format='PNG')
34
  img_byte_arr = img_byte_arr.getvalue()
35
 
 
 
 
 
36
  # Prepare the files for multipart/form-data
37
  files = {
38
+ 'image': ('image.png', img_byte_arr, 'image/png')
 
39
  }
40
 
41
  # Prepare the request data
 
46
  "strength": strength,
47
  "guidance_scale": guidance_scale,
48
  "mode": mode,
49
+ "num_images": num_images,
50
+ "width": width,
51
+ "height": height
52
  }
53
 
54
  # Convert request_data to a JSON string
 
59
  'request_data': request_data_json
60
  }
61
 
62
+ response = requests.post(kandinsky_inpainting_inference, files=files, data=form_data)
63
+ response.raise_for_status()
64
+ response_data = response.json()
65
+ image_url = response_data['image_url']
66
+ mask_url = response_data['mask_url']
67
+ outpainted_image = load_image(image_url)
68
+ mask_image = load_image(mask_url)
69
+ return outpainted_image, mask_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
 
71
  with gr.Blocks(theme='VikramSingh178/Webui-Theme') as demo:
72
  with gr.Tab("SdxL-Lora"):
73
  with gr.Row():
 
85
  image_preview = gr.Image(label="Generated Image (SDXL-Lora)", show_download_button=True, show_share_button=True, container=True)
86
  generate_button.click(generate_sdxl_lora_image, inputs=[prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode], outputs=[image_preview])
87
 
 
88
  with gr.Tab("Inpainting"):
89
  with gr.Row():
90
  with gr.Column():
91
  with gr.Group():
92
+ input_image = gr.Image(label="Upload Image", type="pil")
93
  prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
94
  negative_prompt= gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
95
  num_inference_steps = gr.Slider(minimum=1, maximum=100, step=1, value=20, label="Inference Steps")
96
  strength = gr.Slider(minimum=0.1, maximum=1, step=0.1, value=0.8, label="Strength")
97
  guidance_scale = gr.Slider(minimum=1.0, maximum=10.0, step=0.1, value=7.5, label="Guidance Scale")
98
+ num_images = gr.Slider(minimum=1, maximum=10, step=1, value=1, label="Number of Images")
99
  mode_kandinsky = gr.Dropdown(choices=["s3_json", "b64_json"], value="s3_json", label="Mode")
100
  width_slider = gr.Slider(minimum=512, maximum=1024, step=1, value=800, label="Image Width")
101
  height_slider = gr.Slider(minimum=512, maximum=1024, step=1, value=800, label="Image Height")
 
102
  generate_button = gr.Button("Generate Inpainting", variant='primary')
 
103
 
104
  with gr.Column(scale=1):
 
105
  outpainted_image_preview = gr.Image(label="Outpainted Image (Kandinsky)", show_download_button=True, show_share_button=True, container=True)
106
+ mask_image_preview = gr.Image(label="Generated Mask", show_download_button=True, show_share_button=True, container=True)
107
+ generate_button.click(generate_outpainting, inputs=[prompt, negative_prompt, num_inference_steps, strength, guidance_scale, mode_kandinsky, num_images, input_image, width_slider, height_slider], outputs=[outpainted_image_preview, mask_image_preview])
 
 
 
108
 
109
+ demo.launch()