jonathanpark commited on
Commit
c6d0824
1 Parent(s): 081c459

manually unpack base64 urls

Browse files
Files changed (1) hide show
  1. handler.py +43 -30
handler.py CHANGED
@@ -2,9 +2,24 @@ from typing import Dict, List, Any
2
  import torch
3
  from PIL import Image
4
  from io import BytesIO
5
- from urllib import request
6
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # set device
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
@@ -36,38 +51,36 @@ class EndpointHandler():
36
  prompt = data.pop("inputs", data)
37
  url = data.pop("url", data)
38
 
39
- with request.urlopen(url) as response:
40
- data = response.read()
41
- init_image = Image.open(data).convert("RGB")
42
- init_image = Image.open(url)
43
- init_image.thumbnail((512, 512))
44
 
45
 
46
- params = data.pop("parameters", data)
47
 
48
- # hyperparamters
49
- num_inference_steps = params.pop("num_inference_steps", 25)
50
- guidance_scale = params.pop("guidance_scale", 7.5)
51
- negative_prompt = params.pop("negative_prompt", None)
52
- prompt = params.pop("prompt", None)
53
- height = params.pop("height", None)
54
- width = params.pop("width", None)
55
- manual_seed = params.pop("manual_seed", -1)
56
 
57
- out = None
58
 
59
- generator = torch.Generator(device='cuda')
60
- generator.manual_seed(manual_seed)
61
- # run img2img pipeline
62
- out = self.imgPipe(prompt,
63
- image=init_image,
64
- num_inference_steps=num_inference_steps,
65
- guidance_scale=guidance_scale,
66
- num_images_per_prompt=1,
67
- negative_prompt=negative_prompt,
68
- height=height,
69
- width=width
70
- )
71
 
72
- # return first generated PIL image
73
- return out.images[0]
 
2
  import torch
3
  from PIL import Image
4
  from io import BytesIO
 
5
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
6
 
7
+ import base64
8
+ import requests
9
+ from io import BytesIO
10
+ from PIL import Image
11
+
12
+ def load_image(image_url):
13
+ if image_url.startswith('data:'):
14
+ # Decode base64 data_uri
15
+ image_data = base64.b64decode(image_url.split(',')[1])
16
+ image = Image.open(BytesIO(image_data))
17
+ else:
18
+ # Load standard image url
19
+ response = requests.get(image_url)
20
+ image = Image.open(BytesIO(response.content))
21
+ return image
22
+
23
  # set device
24
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
 
 
51
  prompt = data.pop("inputs", data)
52
  url = data.pop("url", data)
53
 
54
+ init_image = load_image(url).convert("RGB")
55
+ init_image = Image.open(url)
56
+ init_image.thumbnail((512, 512))
 
 
57
 
58
 
59
+ params = data.pop("parameters", data)
60
 
61
+ # hyperparamters
62
+ num_inference_steps = params.pop("num_inference_steps", 25)
63
+ guidance_scale = params.pop("guidance_scale", 7.5)
64
+ negative_prompt = params.pop("negative_prompt", None)
65
+ prompt = params.pop("prompt", None)
66
+ height = params.pop("height", None)
67
+ width = params.pop("width", None)
68
+ manual_seed = params.pop("manual_seed", -1)
69
 
70
+ out = None
71
 
72
+ generator = torch.Generator(device='cuda')
73
+ generator.manual_seed(manual_seed)
74
+ # run img2img pipeline
75
+ out = self.imgPipe(prompt,
76
+ image=init_image,
77
+ num_inference_steps=num_inference_steps,
78
+ guidance_scale=guidance_scale,
79
+ num_images_per_prompt=1,
80
+ negative_prompt=negative_prompt,
81
+ height=height,
82
+ width=width
83
+ )
84
 
85
+ # return first generated PIL image
86
+ return out.images[0]