VikramSingh178 commited on
Commit
76afedf
1 Parent(s): e9cb859

chore: Update Dockerfile and requirements.txt

Browse files

Former-commit-id: 0caea23b864f7ac1e3663f3f59551de462daf546
Former-commit-id: 69e7898d5bb389e4e83b2115cd78a7088774cb9c

Dockerfile CHANGED
@@ -1,14 +1,14 @@
1
  # Use the official Python base image
2
- FROM python:3.11-slim
3
 
4
  # Set the initial working directory
5
- WORKDIR /api
6
 
7
  # Copy the requirements.txt file from the api directory
8
- COPY api/requirements.txt ./
9
 
10
  # Install dependencies specified in requirements.txt
11
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
12
 
13
  # Create a non-root user and set up the environment
14
  RUN useradd -m -u 1000 user
 
1
  # Use the official Python base image
2
+ FROM python:3.10-slim
3
 
4
  # Set the initial working directory
5
+ WORKDIR /app
6
 
7
  # Copy the requirements.txt file from the api directory
8
+ COPY ../api/requirements.txt ./
9
 
10
  # Install dependencies specified in requirements.txt
11
+ RUN pip install -r requirements.txt
12
 
13
  # Create a non-root user and set up the environment
14
  RUN useradd -m -u 1000 user
api/requirements.txt CHANGED
@@ -7,7 +7,7 @@ lightning==2.2.3
7
  logfire==0.42.0
8
  Pillow==10.3.0
9
  pydantic==2.7.4
10
- torch==2.2.0
11
  utils==1.0.2
12
  uvicorn==0.30.1
13
  boto3
 
7
  logfire==0.42.0
8
  Pillow==10.3.0
9
  pydantic==2.7.4
10
+ torch
11
  utils==1.0.2
12
  uvicorn==0.30.1
13
  boto3
api/routers/sdxl_text_to_image.py CHANGED
@@ -10,7 +10,6 @@ import uuid
10
  from diffusers import DiffusionPipeline
11
  import torch
12
  from functools import lru_cache
13
- from s3_manager import S3ManagerService
14
  from PIL import Image
15
  import io
16
  from utils import accelerator
 
10
  from diffusers import DiffusionPipeline
11
  import torch
12
  from functools import lru_cache
 
13
  from PIL import Image
14
  import io
15
  from utils import accelerator
gradio-ui/ui.py DELETED
@@ -1,59 +0,0 @@
1
- import gradio as gr
2
- import requests
3
- from pydantic import BaseModel
4
- from diffusers.utils import load_image
5
-
6
-
7
- SDXL_LORA_API_URL = 'http://127.0.0.1:8000/api/v1/product-diffusion/sdxl_v0_lora_inference'
8
-
9
- # Define the InpaintingRequest model
10
- class InpaintingRequest(BaseModel):
11
- prompt: str
12
- num_inference_steps: int
13
- guidance_scale: float
14
- negative_prompt: str
15
- num_images: int
16
- mode: str
17
-
18
- def generate_sdxl_lora_image(prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode):
19
- # Prepare the payload for SDXL LORA API
20
- payload = InpaintingRequest(
21
- prompt=prompt,
22
- negative_prompt=negative_prompt,
23
- num_inference_steps=num_inference_steps,
24
- guidance_scale=guidance_scale,
25
- num_images=num_images,
26
- mode=mode
27
- ).model_dump()
28
-
29
- response = requests.post(SDXL_LORA_API_URL, json=payload)
30
- response_json = response.json()
31
- url = response_json['url']
32
-
33
- image = load_image(url)
34
- return image
35
-
36
- with gr.Blocks(theme='gradio/soft') as demo:
37
- with gr.Tab("SDXL LORA TEXT-TO-IMAGE"):
38
- with gr.Row():
39
- with gr.Column(scale=1):
40
-
41
- prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
42
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
43
-
44
-
45
-
46
- with gr.Column(scale=1):
47
- num_inference_steps = gr.Slider(minimum=1, maximum=1000, step=1, value=20, label="Inference Steps")
48
- guidance_scale = gr.Slider(minimum=1.0, maximum=10.0, step=0.1, value=7.5, label="Guidance Scale")
49
- num_images = gr.Slider(minimum=1, maximum=10, step=1, value=1, label="Number of Images")
50
- mode = gr.Dropdown(choices=["s3_json", "b64_json"], value="s3_json", label="Mode")
51
- generate_button = gr.Button("Generate Image")
52
-
53
-
54
- image_preview = gr.Image(label="Generated Image", height=512, width=512,scale=1,show_download_button=True,show_share_button=True,container=True)
55
-
56
- generate_button.click(generate_sdxl_lora_image, inputs=[prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode], outputs=[image_preview])
57
-
58
-
59
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,25 +0,0 @@
1
- diffusers
2
- datasets
3
- fastapi
4
- wandb
5
- lightning
6
- torchvision
7
- pandas
8
- numpy
9
- rich
10
- tqdm
11
- transformers
12
- fastapi
13
- uvicorn
14
- matplotlib
15
- accelerate
16
- torchvision
17
- ftfy
18
- tensorboard
19
- Jinja2
20
- datasets
21
- peft
22
- async-batcher
23
- ultralytics
24
- opencv-python-headless
25
- boto3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/inpainting_pipeline.py CHANGED
@@ -5,7 +5,6 @@ from utils import accelerator, ImageAugmentation
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
8
- from functools import lru_cache
9
 
10
 
11
  def load_pipeline(model_name: str, device, enable_compile: bool = True):
 
5
  import hydra
6
  from omegaconf import DictConfig
7
  from PIL import Image
 
8
 
9
 
10
  def load_pipeline(model_name: str, device, enable_compile: bool = True):