surena26 commited on
Commit
6ecc84a
·
verified ·
1 Parent(s): 650bd72

Upload folder using huggingface_hub

Browse files
ComfyUI/script_examples/basic_api_example.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from urllib import request, parse
3
+ import random
4
+
5
+ #This is the ComfyUI api prompt format.
6
+
7
+ #If you want it for a specific workflow you can "enable dev mode options"
8
+ #in the settings of the UI (gear beside the "Queue Size: ") this will enable
9
+ #a button on the UI to save workflows in api format.
10
+
11
+ #keep in mind ComfyUI is pre alpha software so this format will change a bit.
12
+
13
+ #this is the one for the default workflow
14
+ prompt_text = """
15
+ {
16
+ "3": {
17
+ "class_type": "KSampler",
18
+ "inputs": {
19
+ "cfg": 8,
20
+ "denoise": 1,
21
+ "latent_image": [
22
+ "5",
23
+ 0
24
+ ],
25
+ "model": [
26
+ "4",
27
+ 0
28
+ ],
29
+ "negative": [
30
+ "7",
31
+ 0
32
+ ],
33
+ "positive": [
34
+ "6",
35
+ 0
36
+ ],
37
+ "sampler_name": "euler",
38
+ "scheduler": "normal",
39
+ "seed": 8566257,
40
+ "steps": 20
41
+ }
42
+ },
43
+ "4": {
44
+ "class_type": "CheckpointLoaderSimple",
45
+ "inputs": {
46
+ "ckpt_name": "v1-5-pruned-emaonly.ckpt"
47
+ }
48
+ },
49
+ "5": {
50
+ "class_type": "EmptyLatentImage",
51
+ "inputs": {
52
+ "batch_size": 1,
53
+ "height": 512,
54
+ "width": 512
55
+ }
56
+ },
57
+ "6": {
58
+ "class_type": "CLIPTextEncode",
59
+ "inputs": {
60
+ "clip": [
61
+ "4",
62
+ 1
63
+ ],
64
+ "text": "masterpiece best quality girl"
65
+ }
66
+ },
67
+ "7": {
68
+ "class_type": "CLIPTextEncode",
69
+ "inputs": {
70
+ "clip": [
71
+ "4",
72
+ 1
73
+ ],
74
+ "text": "bad hands"
75
+ }
76
+ },
77
+ "8": {
78
+ "class_type": "VAEDecode",
79
+ "inputs": {
80
+ "samples": [
81
+ "3",
82
+ 0
83
+ ],
84
+ "vae": [
85
+ "4",
86
+ 2
87
+ ]
88
+ }
89
+ },
90
+ "9": {
91
+ "class_type": "SaveImage",
92
+ "inputs": {
93
+ "filename_prefix": "ComfyUI",
94
+ "images": [
95
+ "8",
96
+ 0
97
+ ]
98
+ }
99
+ }
100
+ }
101
+ """
102
+
103
+ def queue_prompt(prompt):
104
+ p = {"prompt": prompt}
105
+ data = json.dumps(p).encode('utf-8')
106
+ req = request.Request("http://127.0.0.1:8188/prompt", data=data)
107
+ request.urlopen(req)
108
+
109
+
110
+ prompt = json.loads(prompt_text)
111
+ #set the text prompt for our positive CLIPTextEncode
112
+ prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
113
+
114
+ #set the seed for our KSampler node
115
+ prompt["3"]["inputs"]["seed"] = 5
116
+
117
+
118
+ queue_prompt(prompt)
119
+
120
+
ComfyUI/script_examples/websockets_api_example.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #This is an example that uses the websockets api to know when a prompt execution is done
2
+ #Once the prompt execution is done it downloads the images using the /history endpoint
3
+
4
+ import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
5
+ import uuid
6
+ import json
7
+ import urllib.request
8
+ import urllib.parse
9
+
10
+ server_address = "127.0.0.1:8188"
11
+ client_id = str(uuid.uuid4())
12
+
13
+ def queue_prompt(prompt):
14
+ p = {"prompt": prompt, "client_id": client_id}
15
+ data = json.dumps(p).encode('utf-8')
16
+ req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
17
+ return json.loads(urllib.request.urlopen(req).read())
18
+
19
+ def get_image(filename, subfolder, folder_type):
20
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
21
+ url_values = urllib.parse.urlencode(data)
22
+ with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
23
+ return response.read()
24
+
25
+ def get_history(prompt_id):
26
+ with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
27
+ return json.loads(response.read())
28
+
29
+ def get_images(ws, prompt):
30
+ prompt_id = queue_prompt(prompt)['prompt_id']
31
+ output_images = {}
32
+ while True:
33
+ out = ws.recv()
34
+ if isinstance(out, str):
35
+ message = json.loads(out)
36
+ if message['type'] == 'executing':
37
+ data = message['data']
38
+ if data['node'] is None and data['prompt_id'] == prompt_id:
39
+ break #Execution is done
40
+ else:
41
+ continue #previews are binary data
42
+
43
+ history = get_history(prompt_id)[prompt_id]
44
+ for o in history['outputs']:
45
+ for node_id in history['outputs']:
46
+ node_output = history['outputs'][node_id]
47
+ if 'images' in node_output:
48
+ images_output = []
49
+ for image in node_output['images']:
50
+ image_data = get_image(image['filename'], image['subfolder'], image['type'])
51
+ images_output.append(image_data)
52
+ output_images[node_id] = images_output
53
+
54
+ return output_images
55
+
56
+ prompt_text = """
57
+ {
58
+ "3": {
59
+ "class_type": "KSampler",
60
+ "inputs": {
61
+ "cfg": 8,
62
+ "denoise": 1,
63
+ "latent_image": [
64
+ "5",
65
+ 0
66
+ ],
67
+ "model": [
68
+ "4",
69
+ 0
70
+ ],
71
+ "negative": [
72
+ "7",
73
+ 0
74
+ ],
75
+ "positive": [
76
+ "6",
77
+ 0
78
+ ],
79
+ "sampler_name": "euler",
80
+ "scheduler": "normal",
81
+ "seed": 8566257,
82
+ "steps": 20
83
+ }
84
+ },
85
+ "4": {
86
+ "class_type": "CheckpointLoaderSimple",
87
+ "inputs": {
88
+ "ckpt_name": "v1-5-pruned-emaonly.ckpt"
89
+ }
90
+ },
91
+ "5": {
92
+ "class_type": "EmptyLatentImage",
93
+ "inputs": {
94
+ "batch_size": 1,
95
+ "height": 512,
96
+ "width": 512
97
+ }
98
+ },
99
+ "6": {
100
+ "class_type": "CLIPTextEncode",
101
+ "inputs": {
102
+ "clip": [
103
+ "4",
104
+ 1
105
+ ],
106
+ "text": "masterpiece best quality girl"
107
+ }
108
+ },
109
+ "7": {
110
+ "class_type": "CLIPTextEncode",
111
+ "inputs": {
112
+ "clip": [
113
+ "4",
114
+ 1
115
+ ],
116
+ "text": "bad hands"
117
+ }
118
+ },
119
+ "8": {
120
+ "class_type": "VAEDecode",
121
+ "inputs": {
122
+ "samples": [
123
+ "3",
124
+ 0
125
+ ],
126
+ "vae": [
127
+ "4",
128
+ 2
129
+ ]
130
+ }
131
+ },
132
+ "9": {
133
+ "class_type": "SaveImage",
134
+ "inputs": {
135
+ "filename_prefix": "ComfyUI",
136
+ "images": [
137
+ "8",
138
+ 0
139
+ ]
140
+ }
141
+ }
142
+ }
143
+ """
144
+
145
+ prompt = json.loads(prompt_text)
146
+ #set the text prompt for our positive CLIPTextEncode
147
+ prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
148
+
149
+ #set the seed for our KSampler node
150
+ prompt["3"]["inputs"]["seed"] = 5
151
+
152
+ ws = websocket.WebSocket()
153
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
154
+ images = get_images(ws, prompt)
155
+
156
+ #Commented out code to display the output images:
157
+
158
+ # for node_id in images:
159
+ # for image_data in images[node_id]:
160
+ # from PIL import Image
161
+ # import io
162
+ # image = Image.open(io.BytesIO(image_data))
163
+ # image.show()
164
+
ComfyUI/script_examples/websockets_api_example_ws_images.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #This is an example that uses the websockets api and the SaveImageWebsocket node to get images directly without
2
+ #them being saved to disk
3
+
4
+ import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
5
+ import uuid
6
+ import json
7
+ import urllib.request
8
+ import urllib.parse
9
+
10
+ server_address = "127.0.0.1:8188"
11
+ client_id = str(uuid.uuid4())
12
+
13
+ def queue_prompt(prompt):
14
+ p = {"prompt": prompt, "client_id": client_id}
15
+ data = json.dumps(p).encode('utf-8')
16
+ req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
17
+ return json.loads(urllib.request.urlopen(req).read())
18
+
19
+ def get_image(filename, subfolder, folder_type):
20
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
21
+ url_values = urllib.parse.urlencode(data)
22
+ with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
23
+ return response.read()
24
+
25
+ def get_history(prompt_id):
26
+ with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
27
+ return json.loads(response.read())
28
+
29
+ def get_images(ws, prompt):
30
+ prompt_id = queue_prompt(prompt)['prompt_id']
31
+ output_images = {}
32
+ current_node = ""
33
+ while True:
34
+ out = ws.recv()
35
+ if isinstance(out, str):
36
+ message = json.loads(out)
37
+ if message['type'] == 'executing':
38
+ data = message['data']
39
+ if data['prompt_id'] == prompt_id:
40
+ if data['node'] is None:
41
+ break #Execution is done
42
+ else:
43
+ current_node = data['node']
44
+ else:
45
+ if current_node == 'save_image_websocket_node':
46
+ images_output = output_images.get(current_node, [])
47
+ images_output.append(out[8:])
48
+ output_images[current_node] = images_output
49
+
50
+ return output_images
51
+
52
+ prompt_text = """
53
+ {
54
+ "3": {
55
+ "class_type": "KSampler",
56
+ "inputs": {
57
+ "cfg": 8,
58
+ "denoise": 1,
59
+ "latent_image": [
60
+ "5",
61
+ 0
62
+ ],
63
+ "model": [
64
+ "4",
65
+ 0
66
+ ],
67
+ "negative": [
68
+ "7",
69
+ 0
70
+ ],
71
+ "positive": [
72
+ "6",
73
+ 0
74
+ ],
75
+ "sampler_name": "euler",
76
+ "scheduler": "normal",
77
+ "seed": 8566257,
78
+ "steps": 20
79
+ }
80
+ },
81
+ "4": {
82
+ "class_type": "CheckpointLoaderSimple",
83
+ "inputs": {
84
+ "ckpt_name": "v1-5-pruned-emaonly.ckpt"
85
+ }
86
+ },
87
+ "5": {
88
+ "class_type": "EmptyLatentImage",
89
+ "inputs": {
90
+ "batch_size": 1,
91
+ "height": 512,
92
+ "width": 512
93
+ }
94
+ },
95
+ "6": {
96
+ "class_type": "CLIPTextEncode",
97
+ "inputs": {
98
+ "clip": [
99
+ "4",
100
+ 1
101
+ ],
102
+ "text": "masterpiece best quality girl"
103
+ }
104
+ },
105
+ "7": {
106
+ "class_type": "CLIPTextEncode",
107
+ "inputs": {
108
+ "clip": [
109
+ "4",
110
+ 1
111
+ ],
112
+ "text": "bad hands"
113
+ }
114
+ },
115
+ "8": {
116
+ "class_type": "VAEDecode",
117
+ "inputs": {
118
+ "samples": [
119
+ "3",
120
+ 0
121
+ ],
122
+ "vae": [
123
+ "4",
124
+ 2
125
+ ]
126
+ }
127
+ },
128
+ "save_image_websocket_node": {
129
+ "class_type": "SaveImageWebsocket",
130
+ "inputs": {
131
+ "images": [
132
+ "8",
133
+ 0
134
+ ]
135
+ }
136
+ }
137
+ }
138
+ """
139
+
140
+ prompt = json.loads(prompt_text)
141
+ #set the text prompt for our positive CLIPTextEncode
142
+ prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
143
+
144
+ #set the seed for our KSampler node
145
+ prompt["3"]["inputs"]["seed"] = 5
146
+
147
+ ws = websocket.WebSocket()
148
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
149
+ images = get_images(ws, prompt)
150
+
151
+ #Commented out code to display the output images:
152
+
153
+ # for node_id in images:
154
+ # for image_data in images[node_id]:
155
+ # from PIL import Image
156
+ # import io
157
+ # image = Image.open(io.BytesIO(image_data))
158
+ # image.show()
159
+