VikramSingh178 commited on
Commit
4d01864
1 Parent(s): 4b8ee81

chore: Update SDXL LORA TEXT-TO-IMAGE gradio UI theme to 'gradio/soft'

Browse files

Former-commit-id: 731cce08b3feac70284eaa11f48f8de3855baaec [formerly fb82dce3a4067c5c2b3b03e7dfab059ee9372a80]
Former-commit-id: e9d63ad1c7d652c7928f28c89c2513c4f8b4d289

api/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (143 Bytes). View file
 
api/models/__pycache__/sdxl_input.cpython-310.pyc ADDED
Binary file (523 Bytes). View file
 
api/routers/sdxl_text_to_image.py CHANGED
@@ -60,7 +60,7 @@ loaded_pipeline = load_pipeline(config.MODEL_NAME, config.ADAPTER_NAME, config.E
60
 
61
 
62
  # SDXLLoraInference class for running inference
63
- class SDXLLoraInference(AsyncBatcher):
64
  """
65
  Class for performing SDXL Lora inference.
66
 
@@ -182,9 +182,9 @@ async def sdxl_v0_lora_inference(data: InputFormat):
182
 
183
  @router.post("/sdxl_v0_lora_inference/batch")
184
  async def sdxl_v0_lora_inference_batch(data: List[InputFormat]):
185
- batcher = SDXLLoraBatcher(max_batch_size=64, max_queue_time=0.001)
186
  try:
187
- predictions = await batcher.process(batch=data)
188
  return predictions
189
  except Exception as e:
190
  print(f"Error in /sdxl_v0_lora_inference/batch: {e}")
 
60
 
61
 
62
  # SDXLLoraInference class for running inference
63
+ class SDXLLoraInference:
64
  """
65
  Class for performing SDXL Lora inference.
66
 
 
182
 
183
  @router.post("/sdxl_v0_lora_inference/batch")
184
  async def sdxl_v0_lora_inference_batch(data: List[InputFormat]):
185
+ batcher = SDXLLoraBatcher(max_batch_size=64)
186
  try:
187
+ predictions = batcher.process_batch(data)
188
  return predictions
189
  except Exception as e:
190
  print(f"Error in /sdxl_v0_lora_inference/batch: {e}")
gradio-ui/ui.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ from pydantic import BaseModel
4
+ from diffusers.utils import load_image
5
+
6
+
7
+ SDXL_LORA_API_URL = 'http://127.0.0.1:8000/api/v1/product-diffusion/sdxl_v0_lora_inference'
8
+
9
+ # Define the InpaintingRequest model
10
+ class InpaintingRequest(BaseModel):
11
+ prompt: str
12
+ num_inference_steps: int
13
+ guidance_scale: float
14
+ negative_prompt: str
15
+ num_images: int
16
+ mode: str
17
+
18
+ def generate_sdxl_lora_image(prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode):
19
+ # Prepare the payload for SDXL LORA API
20
+ payload = InpaintingRequest(
21
+ prompt=prompt,
22
+ negative_prompt=negative_prompt,
23
+ num_inference_steps=num_inference_steps,
24
+ guidance_scale=guidance_scale,
25
+ num_images=num_images,
26
+ mode=mode
27
+ ).model_dump()
28
+
29
+ response = requests.post(SDXL_LORA_API_URL, json=payload)
30
+ response_json = response.json()
31
+ url = response_json['url']
32
+
33
+ image = load_image(url)
34
+ return image
35
+
36
+ with gr.Blocks(theme='gradio/soft') as demo:
37
+ with gr.Tab("SDXL LORA TEXT-TO-IMAGE"):
38
+ with gr.Row():
39
+ with gr.Column(scale=1):
40
+
41
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
42
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
43
+
44
+
45
+
46
+ with gr.Column(scale=1):
47
+ num_inference_steps = gr.Slider(minimum=1, maximum=1000, step=1, value=20, label="Inference Steps")
48
+ guidance_scale = gr.Slider(minimum=1.0, maximum=10.0, step=0.1, value=7.5, label="Guidance Scale")
49
+ num_images = gr.Slider(minimum=1, maximum=10, step=1, value=1, label="Number of Images")
50
+ mode = gr.Dropdown(choices=["s3_json", "b64_json"], value="s3_json", label="Mode")
51
+ generate_button = gr.Button("Generate Image")
52
+
53
+
54
+ image_preview = gr.Image(label="Generated Image", height=512, width=512,scale=1,show_download_button=True,show_share_button=True,container=True)
55
+
56
+ generate_button.click(generate_sdxl_lora_image, inputs=[prompt, negative_prompt, num_inference_steps, guidance_scale, num_images, mode], outputs=[image_preview])
57
+
58
+
59
+ demo.launch()
scripts/__pycache__/config.cpython-310.pyc CHANGED
Binary files a/scripts/__pycache__/config.cpython-310.pyc and b/scripts/__pycache__/config.cpython-310.pyc differ