anthro / txt2videoAnimateDiffworkflow.json
Stkzzzz222's picture
Upload txt2videoAnimateDiffworkflow.json
075705d
raw
history blame
35.3 kB
{
"last_node_id": 77,
"last_link_id": 159,
"nodes": [
{
"id": 21,
"type": "VAEEncode",
"pos": [
1522,
1422
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 30,
"mode": 0,
"inputs": [
{
"name": "pixels",
"type": "IMAGE",
"link": 29
},
{
"name": "vae",
"type": "VAE",
"link": 154
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
31
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEEncode"
}
},
{
"id": 20,
"type": "VHS_DuplicateImages",
"pos": [
1112,
1436
],
"size": {
"0": 315,
"1": 78
},
"flags": {},
"order": 29,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 76
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
29
],
"shape": 3,
"slot_index": 0
},
{
"name": "count",
"type": "INT",
"links": null,
"shape": 3
}
],
"properties": {
"Node name for S&R": "VHS_DuplicateImages"
},
"widgets_values": {
"multiply_by": 16
}
},
{
"id": 22,
"type": "ADE_AnimateDiffLoaderWithContext",
"pos": [
504,
-119
],
"size": {
"0": 342.5999755859375,
"1": 190
},
"flags": {},
"order": 26,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 46
},
{
"name": "context_options",
"type": "CONTEXT_OPTIONS",
"link": null
},
{
"name": "motion_lora",
"type": "MOTION_LORA",
"link": 33
},
{
"name": "motion_model_settings",
"type": "MOTION_MODEL_SETTINGS",
"link": null
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
43
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ADE_AnimateDiffLoaderWithContext"
},
"widgets_values": [
"mm_sd_v15_v2.ckpt",
"sqrt_linear (AnimateDiff)",
1,
true
]
},
{
"id": 26,
"type": "IPAdapterApply",
"pos": [
1342,
-178
],
"size": {
"0": 315,
"1": 258
},
"flags": {},
"order": 34,
"mode": 0,
"inputs": [
{
"name": "ipadapter",
"type": "IPADAPTER",
"link": 38
},
{
"name": "clip_vision",
"type": "CLIP_VISION",
"link": 39
},
{
"name": "image",
"type": "IMAGE",
"link": 68
},
{
"name": "model",
"type": "MODEL",
"link": 43
},
{
"name": "attn_mask",
"type": "MASK",
"link": null
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
44
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "IPAdapterApply"
},
"widgets_values": [
0.73,
0.3,
"original",
0,
0.40700000000000003,
false
]
},
{
"id": 27,
"type": "CLIPVisionLoader",
"pos": [
942,
-10
],
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "CLIP_VISION",
"type": "CLIP_VISION",
"links": [
39
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPVisionLoader"
},
"widgets_values": [
"SD1.5\\pytorch_model.bin"
]
},
{
"id": 25,
"type": "IPAdapterModelLoader",
"pos": [
959,
-143
],
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "IPADAPTER",
"type": "IPADAPTER",
"links": [
38
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "IPAdapterModelLoader"
},
"widgets_values": [
"ip-adapter_sd15.bin"
]
},
{
"id": 8,
"type": "PreviewImage",
"pos": [
2605,
-137
],
"size": {
"0": 428.0494689941406,
"1": 410.13134765625
},
"flags": {},
"order": 38,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 10
}
],
"properties": {
"Node name for S&R": "PreviewImage"
}
},
{
"id": 31,
"type": "VAEDecode",
"pos": [
987,
795
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 32,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 60
},
{
"name": "vae",
"type": "VAE",
"link": 153
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
62,
68,
98
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 7,
"type": "VAEDecode",
"pos": [
2217,
-162
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 37,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 8
},
{
"name": "vae",
"type": "VAE",
"link": 82
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
10,
105
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 43,
"type": "RIFE VFI",
"pos": [
2109,
-17
],
"size": {
"0": 443.4000244140625,
"1": 198
},
"flags": {},
"order": 39,
"mode": 0,
"inputs": [
{
"name": "frames",
"type": "IMAGE",
"link": 105
},
{
"name": "optional_interpolation_states",
"type": "INTERPOLATION_STATES",
"link": null
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
106
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "RIFE VFI"
},
"widgets_values": [
"rife47.pth",
10,
2,
true,
true,
1
]
},
{
"id": 39,
"type": "VAELoader",
"pos": [
940,
138
],
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 2,
"mode": 0,
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
82,
83
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"vae-ft-mse-840000-ema-pruned.safetensors"
]
},
{
"id": 37,
"type": "VAEDecode",
"pos": [
399,
1296
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 27,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 73
},
{
"name": "vae",
"type": "VAE",
"link": 74
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
75,
76
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 63,
"type": "Note",
"pos": [
-101,
1468
],
"size": {
"0": 244.24563598632812,
"1": 174.30503845214844
},
"flags": {},
"order": 3,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"First Ksampler- It renders the base picture, from this picture the animation will be created"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 64,
"type": "Note",
"pos": [
840,
1386
],
"size": {
"0": 244.24563598632812,
"1": 174.30503845214844
},
"flags": {},
"order": 4,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"Second Ksampler- It creates variations of the original picture. Increasing the denoise will create more variations in the picture, that will drive to more animation but also less coherence. For fire and smoke is better to have lower values."
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 33,
"type": "VAEEncode",
"pos": [
1343,
752
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 35,
"mode": 0,
"inputs": [
{
"name": "pixels",
"type": "IMAGE",
"link": 98
},
{
"name": "vae",
"type": "VAE",
"link": 83
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
104
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEEncode"
}
},
{
"id": 29,
"type": "CLIPTextEncode",
"pos": [
180,
279
],
"size": {
"0": 210,
"1": 54
},
"flags": {
"collapsed": false
},
"order": 23,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 67
},
{
"name": "text",
"type": "STRING",
"link": 56,
"widget": {
"name": "text"
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
151
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 71,
"type": "Note",
"pos": [
-757,
93
],
"size": {
"0": 343.9232177734375,
"1": 135.80557250976562
},
"flags": {},
"order": 5,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"SD1.5 Model for animation. I like to use epicrealism_naturalSin for realistic pictures and Mistoon Anime for Anime."
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 73,
"type": "Note",
"pos": [
-556,
1442
],
"size": {
"0": 305.2947692871094,
"1": 79.59019470214844
},
"flags": {},
"order": 6,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"I love this lora for XL, it's not mandatory but I love it\n\nxl_more_art"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 34,
"type": "CheckpointLoaderSimple",
"pos": [
-1030,
1286
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 7,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
121
],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
122
],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [
74,
153,
154
],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"sd_xl_turbo_1.0_fp16.safetensors"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 75,
"type": "Note",
"pos": [
-1267,
738
],
"size": {
"0": 284.1612854003906,
"1": 110.83155822753906
},
"flags": {},
"order": 8,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"Adding an Style can affect negatively or positively to the render, more experimentation is required."
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 76,
"type": "Note",
"pos": [
-1244,
942
],
"size": {
"0": 210,
"1": 58
},
"flags": {},
"order": 9,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"This will set the final resolution of the animation"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 24,
"type": "VHS_VideoCombine",
"pos": [
2057,
426
],
"size": [
970,
676
],
"flags": {},
"order": 40,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 106
}
],
"outputs": [],
"properties": {
"Node name for S&R": "VHS_VideoCombine"
},
"widgets_values": {
"frame_rate": 16,
"loop_count": 0,
"filename_prefix": "AnimateDiff",
"format": "video/h264-mp4",
"pingpong": false,
"save_image": true,
"crf": 20,
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "AnimateDiff_07812.mp4",
"subfolder": "",
"type": "output",
"format": "video/h264-mp4"
}
}
}
},
{
"id": 74,
"type": "Note",
"pos": [
-1463,
99
],
"size": {
"0": 595.811279296875,
"1": 89.92710876464844
},
"flags": {
"collapsed": false
},
"order": 10,
"mode": 0,
"title": "INTRODUCTION",
"properties": {
"text": ""
},
"widgets_values": [
"This workflow is very optimized. It uses SDXLTurbo, a SD 1.5 and LCM. \n\nThese models are not prepared for high resolutions.\n\nI tried with 512-512 600-400 700-400\n"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 36,
"type": "EmptyLatentImage",
"pos": [
-952,
910
],
"size": {
"0": 315,
"1": 106
},
"flags": {},
"order": 11,
"mode": 0,
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
72
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
800,
400,
1
]
},
{
"id": 38,
"type": "PreviewImage",
"pos": [
153,
1477
],
"size": {
"0": 210,
"1": 246
},
"flags": {},
"order": 28,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 75
}
],
"properties": {
"Node name for S&R": "PreviewImage"
}
},
{
"id": 51,
"type": "LoraLoader",
"pos": [
-566,
1284
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 18,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 121
},
{
"name": "clip",
"type": "CLIP",
"link": 122
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
148,
149
],
"shape": 3,
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
147,
157
],
"shape": 3,
"slot_index": 1
}
],
"properties": {
"Node name for S&R": "LoraLoader"
},
"widgets_values": [
"xl_more_art-full_v1.safetensors",
1,
1
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
-480,
1180
],
"size": {
"0": 210,
"1": 54
},
"flags": {
"collapsed": false
},
"order": 21,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 157
},
{
"name": "text",
"type": "STRING",
"link": 79,
"widget": {
"name": "text"
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
71,
81
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 66,
"type": "Note",
"pos": [
-942,
471
],
"size": {
"0": 365.60003662109375,
"1": 155.20001220703125
},
"flags": {},
"order": 12,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"Main Prompt, and BatchPromptScheduler. \n\nI Wasn't able to find a good way to pass text to BatchPromptSchedule, so given that we have two BatchPromptSchedules, they will need to have the same orders and you will have to copy and pate the orders in both nodes.\n\nIn case you don't want to use BatchPromptScheduler, you will have to copy the next text in both nodes.\n\n\"0\" :\"\",\n\"1\" :\"\""
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 65,
"type": "Note",
"pos": [
1797.030856323242,
152.557113342285
],
"size": {
"0": 264.6352844238281,
"1": 176.0887908935547
},
"flags": {},
"order": 13,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"Third Ksampler- It interpolates the pictures obtained in the second k-sampler to obtain the animation. Higher values of denoise will soft the animation and sometimes can correct inconsistencies in the animation, but also will degrade the quality of the pictures. "
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 18,
"type": "ModelSamplingDiscrete",
"pos": [
43,
-297
],
"size": {
"0": 315,
"1": 82
},
"flags": {},
"order": 22,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 24
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
46
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelSamplingDiscrete"
},
"widgets_values": [
"lcm",
false
]
},
{
"id": 23,
"type": "ADE_AnimateDiffLoRALoader",
"pos": [
24,
-43
],
"size": {
"0": 355.20001220703125,
"1": 82
},
"flags": {},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "prev_motion_lora",
"type": "MOTION_LORA",
"link": null
}
],
"outputs": [
{
"name": "MOTION_LORA",
"type": "MOTION_LORA",
"links": [
33
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ADE_AnimateDiffLoRALoader"
},
"widgets_values": [
"v2_lora_ZoomIn.ckpt",
0.606
]
},
{
"id": 17,
"type": "LoraLoader",
"pos": [
-355,
278
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 19,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 48
},
{
"name": "clip",
"type": "CLIP",
"link": 49
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
24
],
"shape": 3,
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
67,
85
],
"shape": 3,
"slot_index": 1
}
],
"properties": {
"Node name for S&R": "LoraLoader"
},
"widgets_values": [
"lcm-lora-sdv1-5.safetensors",
0.3,
1
]
},
{
"id": 72,
"type": "Note",
"pos": [
-1009,
1147
],
"size": {
"0": 278.58172607421875,
"1": 80.27725219726562
},
"flags": {},
"order": 15,
"mode": 0,
"properties": {
"text": ""
},
"widgets_values": [
"Model for creating the pictures that are going to be the base of the animation. Manga style usually works better for anime."
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 32,
"type": "PreviewImage",
"pos": [
1380,
930
],
"size": {
"0": 354.6648254394531,
"1": 401.10565185546875
},
"flags": {},
"order": 33,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 62
}
],
"properties": {
"Node name for S&R": "PreviewImage"
}
},
{
"id": 19,
"type": "KSampler",
"pos": [
871,
1101
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 31,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 148
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 87
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 81
},
{
"name": "latent_image",
"type": "LATENT",
"link": 31
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
60
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
143599132557193,
"fixed",
3,
0.98,
"euler_ancestral",
"karras",
0.5
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
1742,
-179
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 36,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 44
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 150
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 151
},
{
"name": "latent_image",
"type": "LATENT",
"link": 104
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
8
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
1110580512300929,
"fixed",
13,
5.01,
"uni_pc_bh2",
"normal",
0.71
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 1,
"type": "CheckpointLoaderSimple",
"pos": [
-747,
284
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 16,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
48
],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
49
],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"epicrealism_naturalSin.safetensors"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 41,
"type": "BatchPromptSchedule",
"pos": [
-323,
766
],
"size": {
"0": 555.8451538085938,
"1": 303.3249816894531
},
"flags": {},
"order": 20,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 147
},
{
"name": "pre_text",
"type": "STRING",
"link": null,
"widget": {
"name": "pre_text"
}
},
{
"name": "app_text",
"type": "STRING",
"link": 158,
"widget": {
"name": "app_text"
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
87,
146
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "BatchPromptSchedule"
},
"widgets_values": [
"\"0\" :\"seriorus, eyes open\",\n\"8\" :\"serious, eyes closed\"\n",
16,
"\"0\" :\"looking side\", \"8\" :\"looking front\", \"16\" :\"\"",
"",
4,
4,
4,
4
]
},
{
"id": 40,
"type": "BatchPromptSchedule",
"pos": [
301,
764
],
"size": {
"0": 599.68505859375,
"1": 286.6015319824219
},
"flags": {},
"order": 24,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 85
},
{
"name": "pre_text",
"type": "STRING",
"link": null,
"widget": {
"name": "pre_text"
}
},
{
"name": "app_text",
"type": "STRING",
"link": 159,
"widget": {
"name": "app_text"
}
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
150
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "BatchPromptSchedule"
},
"widgets_values": [
"\"0\" :\"seriorus, eyes open\",\n\"8\" :\"serious, eyes closed\"\n\n",
16,
"\"0\" :\"looking side\", \"8\" :\"looking front\", \"16\" :\"\"",
"",
4,
4,
4,
4
]
},
{
"id": 35,
"type": "KSampler",
"pos": [
272,
1149
],
"size": {
"0": 401.2197570800781,
"1": 262
},
"flags": {},
"order": 25,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 149
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 146
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 71
},
{
"name": "latent_image",
"type": "LATENT",
"link": 72
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
73
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
380066260484349,
"fixed",
3,
1.32,
"euler_ancestral",
"karras",
1
]
},
{
"id": 5,
"type": "SDXLPromptStyler",
"pos": [
-961,
667
],
"size": {
"0": 400,
"1": 200
},
"flags": {},
"order": 17,
"mode": 0,
"outputs": [
{
"name": "text_positive",
"type": "STRING",
"links": [
158,
159
],
"shape": 3,
"slot_index": 0
},
{
"name": "text_negative",
"type": "STRING",
"links": [
56,
79
],
"shape": 3,
"slot_index": 1
}
],
"properties": {
"Node name for S&R": "SDXLPromptStyler"
},
"widgets_values": [
"a fireman saving a little girl in a fire",
"",
"sai-photographic",
"No"
]
}
],
"links": [
[
8,
3,
0,
7,
0,
"LATENT"
],
[
10,
7,
0,
8,
0,
"IMAGE"
],
[
24,
17,
0,
18,
0,
"MODEL"
],
[
29,
20,
0,
21,
0,
"IMAGE"
],
[
31,
21,
0,
19,
3,
"LATENT"
],
[
33,
23,
0,
22,
2,
"MOTION_LORA"
],
[
38,
25,
0,
26,
0,
"IPADAPTER"
],
[
39,
27,
0,
26,
1,
"CLIP_VISION"
],
[
43,
22,
0,
26,
3,
"MODEL"
],
[
44,
26,
0,
3,
0,
"MODEL"
],
[
46,
18,
0,
22,
0,
"MODEL"
],
[
48,
1,
0,
17,
0,
"MODEL"
],
[
49,
1,
1,
17,
1,
"CLIP"
],
[
56,
5,
1,
29,
1,
"STRING"
],
[
60,
19,
0,
31,
0,
"LATENT"
],
[
62,
31,
0,
32,
0,
"IMAGE"
],
[
67,
17,
1,
29,
0,
"CLIP"
],
[
68,
31,
0,
26,
2,
"IMAGE"
],
[
71,
6,
0,
35,
2,
"CONDITIONING"
],
[
72,
36,
0,
35,
3,
"LATENT"
],
[
73,
35,
0,
37,
0,
"LATENT"
],
[
74,
34,
2,
37,
1,
"VAE"
],
[
75,
37,
0,
38,
0,
"IMAGE"
],
[
76,
37,
0,
20,
0,
"IMAGE"
],
[
79,
5,
1,
6,
1,
"STRING"
],
[
81,
6,
0,
19,
2,
"CONDITIONING"
],
[
82,
39,
0,
7,
1,
"VAE"
],
[
83,
39,
0,
33,
1,
"VAE"
],
[
85,
17,
1,
40,
0,
"CLIP"
],
[
87,
41,
0,
19,
1,
"CONDITIONING"
],
[
98,
31,
0,
33,
0,
"IMAGE"
],
[
104,
33,
0,
3,
3,
"LATENT"
],
[
105,
7,
0,
43,
0,
"IMAGE"
],
[
106,
43,
0,
24,
0,
"IMAGE"
],
[
121,
34,
0,
51,
0,
"MODEL"
],
[
122,
34,
1,
51,
1,
"CLIP"
],
[
146,
41,
0,
35,
1,
"CONDITIONING"
],
[
147,
51,
1,
41,
0,
"CLIP"
],
[
148,
51,
0,
19,
0,
"MODEL"
],
[
149,
51,
0,
35,
0,
"MODEL"
],
[
150,
40,
0,
3,
1,
"CONDITIONING"
],
[
151,
29,
0,
3,
2,
"CONDITIONING"
],
[
153,
34,
2,
31,
1,
"VAE"
],
[
154,
34,
2,
21,
1,
"VAE"
],
[
157,
51,
1,
6,
0,
"CLIP"
],
[
158,
5,
0,
41,
2,
"STRING"
],
[
159,
5,
0,
40,
2,
"STRING"
]
],
"groups": [],
"config": {},
"extra": {},
"version": 0.4
}