0xadamm commited on
Commit
8a4950c
1 Parent(s): ba9ac78

added handler.py

Browse files
Files changed (2) hide show
  1. .vscode/settings.json +6 -0
  2. handler.py +109 -0
.vscode/settings.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter"
4
+ },
5
+ "python.formatting.provider": "none"
6
+ }
handler.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # handler.py
2
+
3
+ from PIL import Image
4
+ from diffusers import (
5
+ StableDiffusionControlNetImg2ImgPipeline,
6
+ ControlNetModel,
7
+ DDIMScheduler,
8
+ )
9
+ from diffusers.utils import load_image
10
+ import torch
11
+ import openai
12
+ from io import BytesIO
13
+ import base64
14
+ import qrcode
15
+
16
+
17
+ class QRImageHandler:
18
+ def __init__(
19
+ self,
20
+ controlnet_path="DionTimmer/controlnet_qrcode-control_v11p_sd21",
21
+ pipeline_path="stabilityai/stable-diffusion-2-1",
22
+ ):
23
+ self.controlnet = ControlNetModel.from_pretrained(
24
+ controlnet_path, torch_dtype=torch.float16
25
+ )
26
+
27
+ self.pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
28
+ pipeline_path,
29
+ controlnet=self.controlnet,
30
+ safety_checker=None,
31
+ torch_dtype=torch.float16,
32
+ )
33
+
34
+ self.pipe.enable_xformers_memory_efficient_attention()
35
+ self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
36
+ self.pipe.enable_model_cpu_offload()
37
+
38
+ @staticmethod
39
+ def resize_for_condition_image(input_image: Image, resolution: int):
40
+ input_image = input_image.convert("RGB")
41
+ W, H = input_image.size
42
+ k = float(resolution) / min(H, W)
43
+ H *= k
44
+ W *= k
45
+ H = int(round(H / 64.0)) * 64
46
+ W = int(round(W / 64.0)) * 64
47
+ img = input_image.resize((W, H), resample=Image.LANCZOS)
48
+ return img
49
+
50
+ def __call__(
51
+ self,
52
+ prompt,
53
+ negative_prompt,
54
+ qrcode_data,
55
+ guidance_scale,
56
+ controlnet_conditioning_scale,
57
+ strength,
58
+ generator_seed,
59
+ width,
60
+ height,
61
+ num_inference_steps,
62
+ ):
63
+ openai.api_key = "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"
64
+
65
+ qr = qrcode.QRCode(
66
+ version=1,
67
+ error_correction=qrcode.constants.ERROR_CORRECT_H,
68
+ box_size=10,
69
+ border=4,
70
+ )
71
+ qr.add_data(qrcode_data)
72
+ qr.make(fit=True)
73
+ img = qr.make_image(fill_color="black", back_color="white")
74
+
75
+ # Resize image
76
+ basewidth = 768
77
+ wpercent = basewidth / float(img.size[0])
78
+ hsize = int((float(img.size[1]) * float(wpercent)))
79
+ qrcode_image = img.resize((basewidth, hsize), Image.LANCZOS)
80
+
81
+ response = openai.Image.create(prompt=prompt, n=1, size="1024x1024")
82
+ image_url = response.data[0].url
83
+ init_image = load_image(image_url)
84
+
85
+ control_image = qrcode_image
86
+ init_image = self.resize_for_condition_image(init_image, 768)
87
+
88
+ generator = torch.manual_seed(generator_seed)
89
+
90
+ image = self.pipe(
91
+ prompt=prompt,
92
+ negative_prompt=negative_prompt,
93
+ image=init_image,
94
+ control_image=control_image,
95
+ width=width,
96
+ height=height,
97
+ guidance_scale=guidance_scale,
98
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
99
+ generator=generator,
100
+ strength=strength,
101
+ num_inference_steps=num_inference_steps,
102
+ )
103
+
104
+ pil_image = image.images[0]
105
+ buffered = BytesIO()
106
+ pil_image.save(buffered, format="PNG")
107
+ image_base64 = base64.b64encode(buffered.getvalue()).decode()
108
+
109
+ return image_base64