Coercer commited on
Commit
eaebbb3
verified
1 Parent(s): f83fb1d

Upload ametralladora_API.py

Browse files
Files changed (1) hide show
  1. ametralladora_API.py +229 -0
ametralladora_API.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #This is an example that uses the websockets api to know when a prompt execution is done
2
+ #Once the prompt execution is done it downloads the images using the /history endpoint
3
+
4
+ import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
5
+ import uuid
6
+ import json
7
+ import urllib.request
8
+ import urllib.parse
9
+
10
+ server_address = "127.0.0.1:8188"
11
+ client_id = str(uuid.uuid4())
12
+
13
+ def queue_prompt(prompt, prompt_id):
14
+ p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id}
15
+ data = json.dumps(p).encode('utf-8')
16
+ req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
17
+ urllib.request.urlopen(req).read()
18
+
19
+ def get_image(filename, subfolder, folder_type):
20
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
21
+ url_values = urllib.parse.urlencode(data)
22
+ with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
23
+ return response.read()
24
+
25
+ def get_history(prompt_id):
26
+ with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
27
+ return json.loads(response.read())
28
+
29
+ def get_images(ws, prompt):
30
+ prompt_id = str(uuid.uuid4())
31
+ queue_prompt(prompt, prompt_id)
32
+ output_images = {}
33
+ while True:
34
+ out = ws.recv()
35
+ if isinstance(out, str):
36
+ message = json.loads(out)
37
+ if message['type'] == 'executing':
38
+ data = message['data']
39
+ if data['node'] is None and data['prompt_id'] == prompt_id:
40
+ break #Execution is done
41
+ else:
42
+ # If you want to be able to decode the binary stream for latent previews, here is how you can do it:
43
+ # bytesIO = BytesIO(out[8:])
44
+ # preview_image = Image.open(bytesIO) # This is your preview in PIL image format, store it in a global
45
+ continue #previews are binary data
46
+
47
+ history = get_history(prompt_id)[prompt_id]
48
+ for node_id in history['outputs']:
49
+ node_output = history['outputs'][node_id]
50
+ images_output = []
51
+ if 'images' in node_output:
52
+ for image in node_output['images']:
53
+ image_data = get_image(image['filename'], image['subfolder'], image['type'])
54
+ images_output.append(image_data)
55
+ output_images[node_id] = images_output
56
+
57
+ return output_images
58
+
59
+ prompt_text = """
60
+ {
61
+ "3": {
62
+ "inputs": {
63
+ "seed": 473371463840349,
64
+ "steps": 8,
65
+ "cfg": 1,
66
+ "sampler_name": "lcm",
67
+ "scheduler": "beta",
68
+ "denoise": 1,
69
+ "model": [
70
+ "12",
71
+ 0
72
+ ],
73
+ "positive": [
74
+ "10",
75
+ 0
76
+ ],
77
+ "negative": [
78
+ "7",
79
+ 0
80
+ ],
81
+ "latent_image": [
82
+ "16",
83
+ 0
84
+ ]
85
+ },
86
+ "class_type": "KSampler",
87
+ "_meta": {
88
+ "title": "KSampler"
89
+ }
90
+ },
91
+ "4": {
92
+ "inputs": {
93
+ "ckpt_name": "novaFurryXL_illustriousV110.safetensors"
94
+ },
95
+ "class_type": "CheckpointLoaderSimple",
96
+ "_meta": {
97
+ "title": "Cargar Punto de Control"
98
+ }
99
+ },
100
+ "7": {
101
+ "inputs": {
102
+ "text": "Xx_NEGPROMPT_xX",
103
+ "clip": [
104
+ "11",
105
+ 1
106
+ ]
107
+ },
108
+ "class_type": "CLIPTextEncode",
109
+ "_meta": {
110
+ "title": "Codificar Texto CLIP (Prompt)"
111
+ }
112
+ },
113
+ "8": {
114
+ "inputs": {
115
+ "samples": [
116
+ "3",
117
+ 0
118
+ ],
119
+ "vae": [
120
+ "4",
121
+ 2
122
+ ]
123
+ },
124
+ "class_type": "VAEDecode",
125
+ "_meta": {
126
+ "title": "Decodificaci贸n VAE"
127
+ }
128
+ },
129
+ "9": {
130
+ "inputs": {
131
+ "filename_prefix": "Fast",
132
+ "images": [
133
+ "8",
134
+ 0
135
+ ]
136
+ },
137
+ "class_type": "SaveImage",
138
+ "_meta": {
139
+ "title": "Guardar Imagen"
140
+ }
141
+ },
142
+ "10": {
143
+ "inputs": {
144
+ "text": "Xx_PROMPT_xX",
145
+ "clip": [
146
+ "11",
147
+ 1
148
+ ]
149
+ },
150
+ "class_type": "CLIPTextEncodeWithBreak",
151
+ "_meta": {
152
+ "title": "CLIPTextEncode with BREAK syntax"
153
+ }
154
+ },
155
+ "11": {
156
+ "inputs": {
157
+ "lora_name": "dmd2_sdxl_4step_lora_fp16.safetensors",
158
+ "strength_model": 1,
159
+ "strength_clip": 1,
160
+ "model": [
161
+ "4",
162
+ 0
163
+ ],
164
+ "clip": [
165
+ "4",
166
+ 1
167
+ ]
168
+ },
169
+ "class_type": "LoraLoader",
170
+ "_meta": {
171
+ "title": "Cargar LoRA"
172
+ }
173
+ },
174
+ "12": {
175
+ "inputs": {
176
+ "block_number": 3,
177
+ "downscale_factor": 2,
178
+ "start_percent": 0,
179
+ "end_percent": 0.5,
180
+ "downscale_after_skip": true,
181
+ "downscale_method": "bicubic",
182
+ "upscale_method": "bicubic",
183
+ "model": [
184
+ "11",
185
+ 0
186
+ ]
187
+ },
188
+ "class_type": "PatchModelAddDownscale",
189
+ "_meta": {
190
+ "title": "PatchModelAddDownscale (Kohya Deep Shrink)"
191
+ }
192
+ },
193
+ "16": {
194
+ "inputs": {
195
+ "width": 1024,
196
+ "height": 1024,
197
+ "batch_size": 1
198
+ },
199
+ "class_type": "EmptyLatentImage",
200
+ "_meta": {
201
+ "title": "Imagen Latente Vac铆a"
202
+ }
203
+ }
204
+ }
205
+ """
206
+
207
+ prompt = json.loads(prompt_text)
208
+ #set the text prompt for our positive CLIPTextEncode
209
+ prompt["10"]["inputs"]["text"] = "masterpiece best quality man"
210
+
211
+ #set the text prompt for our negative CLIPTextEncode
212
+ prompt["7"]["inputs"]["text"] = "worst quailty"
213
+
214
+ #set seed
215
+ prompt["3"]["inputs"]["seed"] = 5345435
216
+
217
+ ws = websocket.WebSocket()
218
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
219
+ images = get_images(ws, prompt)
220
+ ws.close() # for in case this example is used in an environment where it will be repeatedly called, like in a Gradio app. otherwise, you'll randomly receive connection timeouts
221
+ #Commented out code to display the output images:
222
+
223
+ for node_id in images:
224
+ for image_data in images[node_id]:
225
+ from PIL import Image
226
+ import io
227
+ image = Image.open(io.BytesIO(image_data))
228
+ image.show()
229
+