wemab39501 commited on
Commit
9dbaece
1 Parent(s): 3970b50

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -39,28 +39,28 @@ def infer(prompt):
39
  #generator = torch.Generator(device=device).manual_seed(seed)
40
  #print("Is GPU busy? ", is_gpu_busy)
41
  images = []
42
- #if(not is_gpu_busy):
43
- # is_gpu_busy = True
44
- # images_list = pipe(
45
- # [prompt] * samples,
46
- # num_inference_steps=steps,
47
- # guidance_scale=scale,
48
  #generator=generator,
49
- # )
50
- # is_gpu_busy = False
51
- # safe_image = Image.open(r"unsafe.png")
52
- # for i, image in enumerate(images_list["sample"]):
53
  # if(images_list["nsfw_content_detected"][i]):
54
  # images.append(safe_image)
55
  # else:
56
- # images.append(image)
57
  #else:
58
- url = os.getenv('JAX_BACKEND_URL')
59
- payload = {'prompt': prompt}
60
- images_request = requests.post(url, json = payload)
61
- for image in images_request.json()["images"]:
62
- image_b64 = (f"data:image/jpeg;base64,{image}")
63
- images.append(image_b64)
64
 
65
  return images
66
 
 
39
  #generator = torch.Generator(device=device).manual_seed(seed)
40
  #print("Is GPU busy? ", is_gpu_busy)
41
  images = []
42
+ if(not is_gpu_busy):
43
+ is_gpu_busy = True
44
+ images_list = pipe(
45
+ [prompt] * samples,
46
+ num_inference_steps=steps,
47
+ guidance_scale=scale,
48
  #generator=generator,
49
+ )
50
+ is_gpu_busy = False
51
+ #safe_image = Image.open(r"unsafe.png")
52
+ # for i, image in enumerate(images_list["sample"]):
53
  # if(images_list["nsfw_content_detected"][i]):
54
  # images.append(safe_image)
55
  # else:
56
+ images.append(image)
57
  #else:
58
+ #url = os.getenv('JAX_BACKEND_URL')
59
+ #payload = {'prompt': prompt}
60
+ #images_request = requests.post(url, json = payload)
61
+ #for image in images_request.json()["images"]:
62
+ # image_b64 = (f"data:image/jpeg;base64,{image}")
63
+ # images.append(image_b64)
64
 
65
  return images
66