VikramSingh178 commited on
Commit
d2a2d86
1 Parent(s): d8b5430

chore: Add inpainting and outpainting router to product diffusion API

Browse files
product_diffusion_api/endpoints.py CHANGED
@@ -1,7 +1,7 @@
1
  from fastapi import FastAPI
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from routers import sdxl_text_to_image
4
-
5
 
6
 
7
 
@@ -16,6 +16,8 @@ app.add_middleware(
16
  )
17
 
18
  app.include_router(sdxl_text_to_image.router, prefix='/api/v1/product-diffusion')
 
 
19
 
20
 
21
 
 
1
  from fastapi import FastAPI
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from routers import sdxl_text_to_image
4
+ from routers import painting
5
 
6
 
7
 
 
16
  )
17
 
18
  app.include_router(sdxl_text_to_image.router, prefix='/api/v1/product-diffusion')
19
+ app.include_router(painting.router,prefix='/api/v1/product-diffusion')
20
+
21
 
22
 
23
 
product_diffusion_api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc CHANGED
Binary files a/product_diffusion_api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc and b/product_diffusion_api/routers/__pycache__/sdxl_text_to_image.cpython-310.pyc differ
 
product_diffusion_api/routers/painting.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException
2
+
3
+
4
+
5
+
6
+
7
+ router = APIRouter()
product_diffusion_api/routers/sdxl_text_to_image.py CHANGED
@@ -231,6 +231,7 @@ async def sdxl_v0_lora_inference_batch(data: BatchInputFormat):
231
  item.num_images,
232
  item.num_inference_steps,
233
  item.guidance_scale,
 
234
  )
235
  output_json = inference.run_inference()
236
  processed_requests.append(output_json)
 
231
  item.num_images,
232
  item.num_inference_steps,
233
  item.guidance_scale,
234
+ item.mode,
235
  )
236
  output_json = inference.run_inference()
237
  processed_requests.append(output_json)
scripts/config.py CHANGED
@@ -3,6 +3,8 @@ ADAPTER_NAME = "VikramSingh178/sdxl-lora-finetune-product-caption"
3
  VAE_NAME= "madebyollin/sdxl-vae-fp16-fix"
4
  DATASET_NAME= "hahminlew/kream-product-blip-captions"
5
  PROJECT_NAME = "Product Photography"
 
 
6
 
7
  class Config:
8
  def __init__(self):
 
3
  VAE_NAME= "madebyollin/sdxl-vae-fp16-fix"
4
  DATASET_NAME= "hahminlew/kream-product-blip-captions"
5
  PROJECT_NAME = "Product Photography"
6
+ PRODUCTS_10k_DATASET = "amaye15/Products-10k"
7
+
8
 
9
  class Config:
10
  def __init__(self):
scripts/products10k_captions.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from config import PRODUCTS_10k_DATASET
3
+ from transformers import BlipProcessor, BlipForConditionalGeneration
4
+ from tqdm import tqdm
5
+ import torch
6
+
7
+
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
+ dataset = load_dataset(PRODUCTS_10k_DATASET)
10
+
11
+
12
+ def image_captioning(processor , )
13
+
14
+
15
+
16
+
17
+
18
+