{ "id": 133814, "modelId": 122813, "name": "V0.5", "createdAt": "2023-08-04T22:34:29.238Z", "updatedAt": "2023-08-04T22:44:18.162Z", "status": "Published", "publishedAt": "2023-08-04T22:44:18.158Z", "trainedWords": [ "zdyna_pose" ], "trainingStatus": null, "trainingDetails": null, "baseModel": "SDXL 1.0", "baseModelType": "Standard", "earlyAccessTimeFrame": 0, "description": null, "stats": { "downloadCount": 4198, "ratingCount": 390, "rating": 4.99, "thumbsUpCount": 589 }, "model": { "name": "Action SDXL", "type": "LORA", "nsfw": false, "poi": false }, "files": [ { "id": 97584, "sizeKB": 166557.58984375, "name": "!action-sdxl-V0.5.safetensors", "type": "Model", "metadata": { "fp": null, "size": null, "format": "SafeTensor" }, "pickleScanResult": "Success", "pickleScanMessage": "No Pickle imports", "virusScanResult": "Success", "virusScanMessage": null, "scannedAt": "2023-08-04T22:40:43.094Z", "hashes": { "AutoV1": "D04FB835", "AutoV2": "5F6DAB6A3F", "SHA256": "5F6DAB6A3FCD45F729197D24B5D4303B3DEE33B881253C8047B9AA1F15388DA2", "CRC32": "5FCDA9DC", "BLAKE3": "52ACCC054CF691F9F860C86868EBB732BF9E2F3F4EFFC503587BD072D551EE55" }, "primary": true, "downloadUrl": "https://civitai.com/api/download/models/133814" } ], "images": [ { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/0c86eff8-658e-4e56-ade2-4e07ef258430/width=450/1876540.jpeg", "nsfw": "Soft", "width": 1152, "height": 1728, "hash": "UBAe5Ox]Obpeu6x^%hx__4x]H=RjI8ofRNR4", "type": "image", "metadata": { "hash": "UBAe5Ox]Obpeu6x^%hx__4x]H=RjI8ofRNR4", "size": 2560156, "width": 1152, "height": 1728 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1152, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "sepia" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 425046296258221, "end_at_step": 37, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 37, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "cgi, 3d" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 0.6, "strength_model": 0.6 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 9, "seed": 1075896118322412, "model": [ "4", 0 ], "steps": 10, "denoise": 0.5500000000000002, "negative": [ "63", 0 ], "positive": [ "62", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "62": { "inputs": { "clip": [ "49", 1 ], "text": "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a dramatic pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects, volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" }, "class_type": "CLIPTextEncode" }, "63": { "inputs": { "clip": [ "49", 1 ], "text": "cgi, 3d, photo" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 76, 49, 1, 62, 0, "CLIP" ], [ 77, 49, 1, 63, 0, "CLIP" ], [ 78, 62, 0, 54, 1, "CONDITIONING" ], [ 79, 63, 0, 54, 2, "CONDITIONING" ], [ 80, 4, 0, 54, 0, "MODEL" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 5, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 37, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 6, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 7, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 19, "pos": [ 1124, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 8, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1152, 1 ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 14, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48, 76, 77 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 0.6, 0.6 ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 9, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 10, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 37, "fixed" ] }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting" ] }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "sepia" ] }, { "id": 62, "pos": [ 770, 926 ], "mode": 0, "size": { "0": 438.6286926269531, "1": 153.9615936279297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 76, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 78 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a dramatic pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects, volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" ] }, { "id": 63, "pos": [ 769, 1119 ], "mode": 0, "size": { "0": 439.6286926269531, "1": 148.1879119873047 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 77, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 79 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d, photo" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 11, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45, 80 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 16, "pos": [ 769, 694 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 13, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d" ] }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 348.63818359375, "1": 180.3082733154297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 12, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 425046296258221, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 37, "enable" ] }, { "id": 55, "pos": [ 1630, 930 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 80, "name": "model", "type": "MODEL" }, { "link": 78, "name": "positive", "type": "CONDITIONING" }, { "link": 79, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 1075896118322412, "randomize", 10, 9, "euler_ancestral", "normal", 0.5500000000000002 ] } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscaled Image", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Refined Image", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 80, "last_node_id": 63 } }, "steps": 40, "width": 768, "height": 1152, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "negativePrompt": "sepia", "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 0.6, "strengthClip": 0.6 } ] } }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/46a5b83a-9138-437e-9511-51efccfb26ec/width=450/1876535.jpeg", "nsfw": "None", "width": 1152, "height": 1728, "hash": "U9FFgFzT05~p?HM{PVSe]iKQ02J;-=%L-o-V", "type": "image", "metadata": { "hash": "U9FFgFzT05~p?HM{PVSe]iKQ02J;-=%L-o-V", "size": 2408976, "width": 1152, "height": 1728 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1152, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 44317258845217, "end_at_step": 37, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 37, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 0.6, "strength_model": 0.6 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 9, "seed": 779155592775634, "model": [ "4", 0 ], "steps": 10, "denoise": 0.5000000000000001, "negative": [ "63", 0 ], "positive": [ "62", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "62": { "inputs": { "clip": [ "49", 1 ], "text": "digital painting of a female superhero casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" }, "class_type": "CLIPTextEncode" }, "63": { "inputs": { "clip": [ "49", 1 ], "text": "flame fire" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 76, 49, 1, 62, 0, "CLIP" ], [ 77, 49, 1, 63, 0, "CLIP" ], [ 78, 62, 0, 54, 1, "CONDITIONING" ], [ 79, 63, 0, 54, 2, "CONDITIONING" ], [ 80, 4, 0, 54, 0, "MODEL" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 5, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 12, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" ] }, { "id": 16, "pos": [ 769, 657 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 13, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 37, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 6, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 7, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 19, "pos": [ 1124, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 8, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1152, 1 ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 14, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48, 76, 77 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 0.6, 0.6 ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 9, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 10, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 37, "fixed" ] }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" ] }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 62, "pos": [ 770, 926 ], "mode": 0, "size": { "0": 438.6286926269531, "1": 153.9615936279297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 76, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 78 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting of a female superhero casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" ] }, { "id": 63, "pos": [ 769, 1119 ], "mode": 0, "size": { "0": 439.6286926269531, "1": 148.1879119873047 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 77, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 79 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "flame fire" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 11, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45, 80 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 80, "name": "model", "type": "MODEL" }, { "link": 78, "name": "positive", "type": "CONDITIONING" }, { "link": 79, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 779155592775634, "randomize", 10, 9, "euler_ancestral", "normal", 0.5000000000000001 ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 44317258845217, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 37, "enable" ] }, { "id": 55, "pos": [ 1630, 930 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscale", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Group", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 80, "last_node_id": 63 } }, "steps": 40, "width": 768, "height": 1152, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 0.6, "strengthClip": 0.6 } ] } }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/e9f621c5-89da-4993-bace-e602077483e9/width=450/1876534.jpeg", "nsfw": "None", "width": 1152, "height": 1472, "hash": "UJEo;_}=afVYAHS~s:VYD*9v9akC-:rq$%tR", "type": "image", "metadata": { "hash": "UJEo;_}=afVYAHS~s:VYD*9v9akC-:rq$%tR", "size": 2456669, "width": 1152, "height": 1472 }, "availability": "Public", "sizeKB": null, "meta": null }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/dab6c85e-a435-43e6-bda8-97f218a27011/width=450/1876536.jpeg", "nsfw": "Soft", "width": 1152, "height": 2016, "hash": "UED0N5rs_400PCxvE0e-0fE0I9^+xvWX-Us=", "type": "image", "metadata": { "hash": "UED0N5rs_400PCxvE0e-0fE0I9^+xvWX-Us=", "size": 2848717, "width": 1152, "height": 2016 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1344, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "sepia" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 998063890160206, "end_at_step": 33, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 33, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "cgi, 3d" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 1, "strength_model": 1 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 9, "seed": 386231378085028, "model": [ "4", 0 ], "steps": 10, "denoise": 0.5000000000000001, "negative": [ "63", 0 ], "positive": [ "62", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "62": { "inputs": { "clip": [ "49", 1 ], "text": "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a dramatic pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects, volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" }, "class_type": "CLIPTextEncode" }, "63": { "inputs": { "clip": [ "49", 1 ], "text": "cgi, 3d, photo" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 76, 49, 1, 62, 0, "CLIP" ], [ 77, 49, 1, 63, 0, "CLIP" ], [ 78, 62, 0, 54, 1, "CONDITIONING" ], [ 79, 63, 0, 54, 2, "CONDITIONING" ], [ 80, 4, 0, 54, 0, "MODEL" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 5, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 33, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 6, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 7, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 19, "pos": [ 1124, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 8, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting" ] }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "sepia" ] }, { "id": 62, "pos": [ 770, 926 ], "mode": 0, "size": { "0": 438.6286926269531, "1": 153.9615936279297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 76, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 78 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a dramatic pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects, volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" ] }, { "id": 63, "pos": [ 769, 1119 ], "mode": 0, "size": { "0": 439.6286926269531, "1": 148.1879119873047 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 77, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 79 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d, photo" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 9, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45, 80 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 80, "name": "model", "type": "MODEL" }, { "link": 78, "name": "positive", "type": "CONDITIONING" }, { "link": 79, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 386231378085028, "randomize", 10, 9, "euler_ancestral", "normal", 0.5000000000000001 ] }, { "id": 16, "pos": [ 769, 694 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 13, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d" ] }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 348.63818359375, "1": 180.3082733154297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 12, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, wind, dust, paper, confetti and debris flying in the air, stormy sky, raining" ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 14, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48, 76, 77 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 1, 1 ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 10, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 33, "fixed" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 11, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1344, 1 ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 998063890160206, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 33, "enable" ] }, { "id": 55, "pos": [ 1630, 930 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscaled Image", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Refined Image", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 80, "last_node_id": 63 } }, "steps": 40, "width": 768, "height": 1344, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, foreshortening, man in trenchcoat standing above viewer in a zdyna_pose pointing a pistol at viewer in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "negativePrompt": "sepia", "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 1, "strengthClip": 1 } ] } }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/d1fafbae-6794-4a32-a680-7c2265b426b5/width=450/1876537.jpeg", "nsfw": "Soft", "width": 1152, "height": 2016, "hash": "ULH2ci-T?as+ys9ZNHf89|Rj9Gf,?bV[V@%L", "type": "image", "metadata": { "hash": "ULH2ci-T?as+ys9ZNHf89|Rj9Gf,?bV[V@%L", "size": 3114384, "width": 1152, "height": 2016 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1344, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, female soldier posing in a zdyna_pose running through a forest in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "gun weapon" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 203822501806021, "end_at_step": 33, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 33, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "digital painting of a female soldier running in a dense forest with motion blur, blurred foreground, wind dust paper shrapnel and debris flying in the air, bright morning sunlight shining through the tree canopy and volumetric lighting, cinematic lighting and color grading" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "cgi, 3d, photo" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 0.6, "strength_model": 0.6 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 9, "seed": 801044258762705, "model": [ "4", 0 ], "steps": 10, "denoise": 0.6000000000000002, "negative": [ "63", 0 ], "positive": [ "62", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "62": { "inputs": { "clip": [ "49", 1 ], "text": "from below, dutch angle, foreshortening, digital painting of a female soldier running in a dense forest with motion blur, blurred foreground, wind dust paper shrapnel and debris flying in the air, bright morning sunlight shining through the tree canopy and volumetric lighting, cinematic lighting and color grading" }, "class_type": "CLIPTextEncode" }, "63": { "inputs": { "clip": [ "49", 1 ], "text": "cgi, 3d, photo" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 76, 49, 1, 62, 0, "CLIP" ], [ 77, 49, 1, 63, 0, "CLIP" ], [ 78, 62, 0, 54, 1, "CONDITIONING" ], [ 79, 63, 0, 54, 2, "CONDITIONING" ], [ 80, 4, 0, 54, 0, "MODEL" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 5, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 33, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 6, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 7, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 14, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48, 76, 77 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 0.6, 0.6 ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 8, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, female soldier posing in a zdyna_pose running through a forest in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" ] }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "gun weapon" ] }, { "id": 63, "pos": [ 769, 1119 ], "mode": 0, "size": { "0": 439.6286926269531, "1": 148.1879119873047 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 77, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 79 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d, photo" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 9, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45, 80 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 16, "pos": [ 769, 694 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 13, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "cgi, 3d, photo" ] }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 348.63818359375, "1": 180.3082733154297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 12, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting of a female soldier running in a dense forest with motion blur, blurred foreground, wind dust paper shrapnel and debris flying in the air, bright morning sunlight shining through the tree canopy and volumetric lighting, cinematic lighting and color grading" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 10, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1344, 1 ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 11, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 33, "fixed" ] }, { "id": 62, "pos": [ 770, 926 ], "mode": 0, "size": { "0": 438.6286926269531, "1": 153.9615936279297 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 76, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 78 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "from below, dutch angle, foreshortening, digital painting of a female soldier running in a dense forest with motion blur, blurred foreground, wind dust paper shrapnel and debris flying in the air, bright morning sunlight shining through the tree canopy and volumetric lighting, cinematic lighting and color grading" ] }, { "id": 19, "pos": [ 1125, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 203822501806021, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 33, "enable" ] }, { "id": 55, "pos": [ 1628, 929 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 80, "name": "model", "type": "MODEL" }, { "link": 78, "name": "positive", "type": "CONDITIONING" }, { "link": 79, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 801044258762705, "randomize", 10, 9, "euler_ancestral", "normal", 0.6000000000000002 ] } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscaled Image", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Refined Image", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 80, "last_node_id": 63 } }, "steps": 40, "width": 768, "height": 1344, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, female soldier posing in a zdyna_pose running through a forest in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "negativePrompt": "gun weapon", "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 0.6, "strengthClip": 0.6 } ] } }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/8ba57955-1a5a-46b6-ac39-2bbcc9fe97ed/width=450/1876532.jpeg", "nsfw": "None", "width": 1152, "height": 1728, "hash": "U7GIfnqZ00%%804T.9D$=_Mc0eD4IXtSyX={", "type": "image", "metadata": { "hash": "U7GIfnqZ00%%804T.9D$=_Mc0eD4IXtSyX={", "size": 2523068, "width": 1152, "height": 1728 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1152, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 1102447933008394, "end_at_step": 37, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 37, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 0.6, "strength_model": 0.6 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 9, "seed": 796955245168101, "model": [ "4", 0 ], "steps": 10, "denoise": 0.5000000000000001, "negative": [ "63", 0 ], "positive": [ "62", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "62": { "inputs": { "clip": [ "49", 1 ], "text": "digital painting of a female superhero casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" }, "class_type": "CLIPTextEncode" }, "63": { "inputs": { "clip": [ "49", 1 ], "text": "flame fire" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 76, 49, 1, 62, 0, "CLIP" ], [ 77, 49, 1, 63, 0, "CLIP" ], [ 78, 62, 0, 54, 1, "CONDITIONING" ], [ 79, 63, 0, 54, 2, "CONDITIONING" ], [ 80, 4, 0, 54, 0, "MODEL" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 5, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 12, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" ] }, { "id": 16, "pos": [ 769, 657 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 13, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 37, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 6, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 7, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 19, "pos": [ 1124, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 8, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1152, 1 ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 14, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48, 76, 77 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 0.6, 0.6 ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 9, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 10, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 37, "fixed" ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 1102447933008394, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 37, "enable" ] }, { "id": 55, "pos": [ 1630, 930 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" ] }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 62, "pos": [ 770, 926 ], "mode": 0, "size": [ 438.62869960379885, 153.96159338106554 ], "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 76, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 78 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting of a female superhero casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" ] }, { "id": 63, "pos": [ 769, 1119 ], "mode": 0, "size": [ 439.62869960379885, 148.18791697079632 ], "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 77, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 79 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "flame fire" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 11, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45, 80 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 80, "name": "model", "type": "MODEL" }, { "link": 78, "name": "positive", "type": "CONDITIONING" }, { "link": 79, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 796955245168101, "randomize", 10, 9, "euler_ancestral", "normal", 0.5000000000000001 ] } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscale", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Group", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 80, "last_node_id": 63 } }, "steps": 40, "width": 768, "height": 1152, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 0.6, "strengthClip": 0.6 } ] } }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/495555c7-0bea-4cca-83e8-62cd2de371d0/width=450/1876533.jpeg", "nsfw": "None", "width": 1152, "height": 1472, "hash": "UIE:PK^h4oEM~q%LIAIUS6S5Dii_pIR*V@az", "type": "image", "metadata": { "hash": "UIE:PK^h4oEM~q%LIAIUS6S5Dii_pIR*V@az", "size": 2022512, "width": 1152, "height": 1472 }, "availability": "Public", "sizeKB": null, "meta": null }, { "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/3f841dea-1767-4dfb-99b6-095d6ec3b49d/width=450/1876531.jpeg", "nsfw": "None", "width": 1152, "height": 1728, "hash": "UKHL9+^1O@_3~p?atlIUI=g4E1M{X99ZE1R+", "type": "image", "metadata": { "hash": "UKHL9+^1O@_3~p?atlIUI=g4E1M{X99ZE1R+", "size": 2202609, "width": 1152, "height": 1728 }, "availability": "Public", "sizeKB": null, "meta": { "vaes": [ "sdxl_vae_0.9.safetensors" ], "Model": "XL/sd_xl_base_1.0", "comfy": { "prompt": { "4": { "inputs": { "ckpt_name": "XL/sd_xl_base_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "5": { "inputs": { "width": 768, "height": 1152, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "6": { "inputs": { "clip": [ "49", 1 ], "text": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "7": { "inputs": { "clip": [ "49", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "10": { "inputs": { "cfg": 8, "model": [ "49", 0 ], "steps": 40, "negative": [ "7", 0 ], "positive": [ "6", 0 ], "add_noise": "enable", "scheduler": "karras", "noise_seed": 1123645071018022, "end_at_step": 37, "latent_image": [ "5", 0 ], "sampler_name": "dpmpp_2s_ancestral", "start_at_step": 0, "return_with_leftover_noise": "enable" }, "class_type": "KSamplerAdvanced" }, "11": { "inputs": { "cfg": 8, "model": [ "12", 0 ], "steps": 40, "negative": [ "16", 0 ], "positive": [ "15", 0 ], "add_noise": "disable", "scheduler": "normal", "noise_seed": 0, "end_at_step": 10000, "latent_image": [ "10", 0 ], "sampler_name": "euler", "start_at_step": 37, "return_with_leftover_noise": "disable" }, "class_type": "KSamplerAdvanced" }, "12": { "inputs": { "ckpt_name": "XL/sd_xl_refiner_1.0.safetensors" }, "class_type": "CheckpointLoaderSimple" }, "15": { "inputs": { "clip": [ "12", 1 ], "text": "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" }, "class_type": "CLIPTextEncode" }, "16": { "inputs": { "clip": [ "12", 1 ], "text": "" }, "class_type": "CLIPTextEncode" }, "17": { "inputs": { "vae": [ "50", 0 ], "samples": [ "11", 0 ] }, "class_type": "VAEDecode" }, "19": { "inputs": { "images": [ "17", 0 ], "filename_prefix": "action-sdxl-hero" }, "class_type": "SaveImage" }, "49": { "inputs": { "clip": [ "4", 1 ], "model": [ "4", 0 ], "lora_name": "XL/!action-sdxl-V0.5.safetensors", "strength_clip": 0.6, "strength_model": 0.6 }, "class_type": "LoraLoader" }, "50": { "inputs": { "vae_name": "sdxl_vae_0.9.safetensors" }, "class_type": "VAELoader" }, "52": { "inputs": { "vae": [ "50", 0 ], "samples": [ "10", 0 ] }, "class_type": "VAEDecode" }, "53": { "inputs": { "images": [ "52", 0 ] }, "class_type": "PreviewImage" }, "54": { "inputs": { "cfg": 6, "seed": 85133334682496, "model": [ "12", 0 ], "steps": 6, "denoise": 0.35, "negative": [ "61", 0 ], "positive": [ "60", 0 ], "scheduler": "normal", "latent_image": [ "59", 0 ], "sampler_name": "euler_ancestral" }, "class_type": "KSampler" }, "55": { "inputs": { "vae": [ "50", 0 ], "pixels": [ "17", 0 ] }, "class_type": "VAEEncode" }, "56": { "inputs": { "vae": [ "50", 0 ], "samples": [ "54", 0 ] }, "class_type": "VAEDecode" }, "57": { "inputs": { "images": [ "56", 0 ], "filename_prefix": "ComfyUI" }, "class_type": "SaveImage" }, "59": { "inputs": { "samples": [ "55", 0 ], "scale_by": 1.5, "upscale_method": "nearest-exact" }, "class_type": "LatentUpscaleBy" }, "60": { "inputs": { "clip": [ "12", 1 ], "text": "digital painting, dutch angle, from below, female superhero in fighting stance casting lightning from her outstretched arms in a blue suit with orange capewith a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" }, "class_type": "CLIPTextEncode" }, "61": { "inputs": { "clip": [ "12", 1 ], "text": "" }, "class_type": "CLIPTextEncode" } }, "workflow": { "extra": {}, "links": [ [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 14, 12, 0, 11, 0, "MODEL" ], [ 19, 12, 1, 15, 0, "CLIP" ], [ 20, 12, 1, 16, 0, "CLIP" ], [ 23, 15, 0, 11, 1, "CONDITIONING" ], [ 24, 16, 0, 11, 2, "CONDITIONING" ], [ 25, 11, 0, 17, 0, "LATENT" ], [ 27, 5, 0, 10, 3, "LATENT" ], [ 28, 17, 0, 19, 0, "IMAGE" ], [ 38, 45, 0, 11, 4, "INT" ], [ 41, 45, 0, 10, 4, "INT" ], [ 43, 47, 0, 10, 5, "INT" ], [ 44, 47, 0, 11, 5, "INT" ], [ 45, 4, 0, 49, 0, "MODEL" ], [ 46, 4, 1, 49, 1, "CLIP" ], [ 47, 49, 1, 6, 0, "CLIP" ], [ 48, 49, 1, 7, 0, "CLIP" ], [ 49, 49, 0, 10, 0, "MODEL" ], [ 50, 50, 0, 17, 1, "VAE" ], [ 51, 50, 0, 52, 1, "VAE" ], [ 52, 10, 0, 52, 0, "LATENT" ], [ 53, 52, 0, 53, 0, "IMAGE" ], [ 54, 17, 0, 55, 0, "IMAGE" ], [ 55, 50, 0, 55, 1, "VAE" ], [ 60, 54, 0, 56, 0, "LATENT" ], [ 61, 50, 0, 56, 1, "VAE" ], [ 62, 56, 0, 57, 0, "IMAGE" ], [ 63, 55, 0, 59, 0, "LATENT" ], [ 64, 59, 0, 54, 3, "LATENT" ], [ 65, 10, 0, 11, 3, "LATENT" ], [ 71, 12, 0, 54, 0, "MODEL" ], [ 72, 12, 1, 60, 0, "CLIP" ], [ 73, 12, 1, 61, 0, "CLIP" ], [ 74, 60, 0, 54, 1, "CONDITIONING" ], [ 75, 61, 0, 54, 2, "CONDITIONING" ] ], "nodes": [ { "id": 36, "pos": [ -547.435085420599, -83.41178857372952 ], "mode": 0, "size": { "0": 315.70074462890625, "1": 147.9551239013672 }, "type": "Note", "color": "#323", "flags": {}, "order": 0, "title": "Note - Load Checkpoint BASE", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations" ] }, { "id": 37, "pos": [ -555.6248838808347, 346.6448286818297 ], "mode": 0, "size": { "0": 330, "1": 140 }, "type": "Note", "color": "#323", "flags": {}, "order": 1, "title": "Note - Load Checkpoint REFINER", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations." ] }, { "id": 41, "pos": [ -537.4124915743454, 1105.4474989519647 ], "mode": 0, "size": { "0": 322.8022766113281, "1": 163.92898559570312 }, "type": "Note", "color": "#332922", "flags": {}, "order": 2, "title": "Note - VAE Decoder", "bgcolor": "#593930", "properties": { "text": "" }, "widgets_values": [ "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG.\n\nUsing the better 0.9 VAE here." ] }, { "id": 50, "pos": [ -540.4124915743454, 916.447498951965 ], "mode": 0, "size": { "0": 315, "1": 58 }, "type": "VAELoader", "color": "#332922", "flags": {}, "order": 3, "bgcolor": "#593930", "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 50, 51, 55, 61 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAELoader" }, "widgets_values": [ "sdxl_vae_0.9.safetensors" ] }, { "id": 4, "pos": [ -563.4350854205987, -233.41178857372964 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 4, "title": "Load Checkpoint - BASE", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 45 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 46 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_base_1.0.safetensors" ] }, { "id": 12, "pos": [ -565.6248838808347, 195.64482868182944 ], "mode": 0, "size": { "0": 350, "1": 100 }, "type": "CheckpointLoaderSimple", "color": "#323", "flags": {}, "order": 5, "title": "Load Checkpoint - REFINER", "bgcolor": "#535", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 14, 71 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 19, 20, 72, 73 ], "shape": 3, "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "XL/sd_xl_refiner_1.0.safetensors" ] }, { "id": 42, "pos": [ 85.52377753723336, 872.2042730290383 ], "mode": 0, "size": { "0": 259.1498107910156, "1": 228.0334930419922 }, "type": "Note", "color": "#323", "flags": {}, "order": 6, "title": "Note - Empty Latent Image", "bgcolor": "#535", "properties": { "text": "" }, "widgets_values": [ "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536" ] }, { "id": 17, "pos": [ -536.4740797149254, 1026.6492009476544 ], "mode": 0, "size": { "0": 200, "1": 50 }, "type": "VAEDecode", "color": "#332922", "flags": { "collapsed": true }, "order": 23, "inputs": [ { "link": 25, "name": "samples", "type": "LATENT" }, { "link": 50, "name": "vae", "type": "VAE" } ], "bgcolor": "#593930", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 28, 54 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 52, "pos": [ -365.4740797149253, 1026.6492009476544 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#223", "flags": { "collapsed": true }, "order": 20, "inputs": [ { "link": 52, "name": "samples", "type": "LATENT" }, { "link": 51, "name": "vae", "type": "VAE" } ], "bgcolor": "#335", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 53 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 53, "pos": [ 352, 96 ], "mode": 0, "size": { "0": 369.5992736816406, "1": 326.2545166015625 }, "type": "PreviewImage", "color": "#223", "flags": {}, "order": 22, "inputs": [ { "link": 53, "name": "images", "type": "IMAGE" } ], "bgcolor": "#335", "properties": { "Node name for S&R": "PreviewImage" } }, { "id": 7, "pos": [ 380, 470 ], "mode": 0, "size": { "0": 343.5692138671875, "1": 152.20408630371094 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 18, "inputs": [ { "link": 48, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 6, "pos": [ 23, 467 ], "mode": 0, "size": { "0": 339.08404541015625, "1": 157.1740264892578 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 17, "inputs": [ { "link": 47, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading" ] }, { "id": 15, "pos": [ 764, 474 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 13, "inputs": [ { "link": 19, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 23 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "zdyna_pose, digital painting, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting, orange and teal color grading" ] }, { "id": 16, "pos": [ 769, 657 ], "mode": 0, "size": { "0": 340, "1": 140 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 14, "inputs": [ { "link": 20, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 24 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 11, "pos": [ 766, 91 ], "mode": 0, "size": { "0": 300, "1": 340 }, "type": "KSamplerAdvanced", "color": "#323", "flags": {}, "order": 21, "title": "KSampler (Advanced) - REFINER", "inputs": [ { "link": 14, "name": "model", "type": "MODEL", "slot_index": 0 }, { "link": 23, "name": "positive", "type": "CONDITIONING" }, { "link": 24, "name": "negative", "type": "CONDITIONING" }, { "link": 65, "name": "latent_image", "type": "LATENT" }, { "link": 38, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 44, "name": "start_at_step", "type": "INT", "widget": { "name": "start_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 0 } ] } } ], "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 25 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "disable", 0, "fixed", 40, 8, "euler", "normal", 37, 10000, "disable" ] }, { "id": 40, "pos": [ 118, -443 ], "mode": 0, "size": { "0": 451.5049743652344, "1": 424.4164123535156 }, "type": "Note", "color": "#223", "flags": {}, "order": 7, "title": "Note - KSampler ADVANCED General Information", "bgcolor": "#335", "properties": { "text": "" }, "widgets_values": [ "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)." ] }, { "id": 48, "pos": [ 450, 970 ], "mode": 0, "size": { "0": 213.90769958496094, "1": 110.17156982421875 }, "type": "Note", "color": "#233", "flags": {}, "order": 8, "bgcolor": "#355", "properties": { "text": "" }, "widgets_values": [ "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner." ] }, { "id": 60, "pos": [ 762, 926 ], "mode": 0, "size": { "0": 467.8367919921875, "1": 150.28089904785156 }, "type": "CLIPTextEncode", "color": "#232", "flags": {}, "order": 15, "inputs": [ { "link": 72, "name": "clip", "type": "CLIP" } ], "bgcolor": "#353", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 74 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "digital painting, dutch angle, from below, female superhero in fighting stance casting lightning from her outstretched arms in a blue suit with orange capewith a tall city in the background in a low camera angle actions scene, foreground objects, wind, dust, paper, confetti and debris flying in the air volumetric lighting, bright background lighting" ] }, { "id": 61, "pos": [ 765, 1114 ], "mode": 0, "size": { "0": 462.8668518066406, "1": 146.4709014892578 }, "type": "CLIPTextEncode", "color": "#322", "flags": {}, "order": 16, "inputs": [ { "link": 73, "name": "clip", "type": "CLIP" } ], "bgcolor": "#533", "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 75 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "" ] }, { "id": 56, "pos": [ 1630, 1020 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEDecode", "color": "#432", "flags": {}, "order": 28, "inputs": [ { "link": 60, "name": "samples", "type": "LATENT" }, { "link": 61, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 62 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 59, "pos": [ 1580, 1160 ], "mode": 0, "size": { "0": 269.4000244140625, "1": 86.66625213623047 }, "type": "LatentUpscaleBy", "color": "#432", "flags": {}, "order": 26, "inputs": [ { "link": 63, "name": "samples", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 64 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "LatentUpscaleBy" }, "widgets_values": [ "nearest-exact", 1.5 ] }, { "id": 57, "pos": [ 1892.9643219513464, 89.30937549607815 ], "mode": 0, "size": { "0": 990.9550170898438, "1": 1179.1431884765625 }, "type": "SaveImage", "flags": {}, "order": 29, "inputs": [ { "link": 62, "name": "images", "type": "IMAGE" } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 19, "pos": [ 1124, 87 ], "mode": 0, "size": { "0": 727.26904296875, "1": 744.083984375 }, "type": "SaveImage", "color": "#323", "flags": {}, "order": 24, "inputs": [ { "link": 28, "name": "images", "type": "IMAGE" } ], "bgcolor": "#535", "properties": {}, "widgets_values": [ "action-sdxl-hero" ] }, { "id": 5, "pos": [ 65.52377753723313, 722.2042730290377 ], "mode": 0, "size": { "0": 300, "1": 110 }, "type": "EmptyLatentImage", "color": "#323", "flags": {}, "order": 9, "bgcolor": "#535", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 27 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 1152, 1 ] }, { "id": 49, "pos": [ -542.6685800589798, 621.9482242943083 ], "mode": 0, "size": { "0": 315, "1": 126 }, "type": "LoraLoader", "color": "#2a363b", "flags": {}, "order": 12, "inputs": [ { "link": 45, "name": "model", "type": "MODEL" }, { "link": 46, "name": "clip", "type": "CLIP" } ], "bgcolor": "#3f5159", "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 49 ], "shape": 3, "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 47, 48 ], "shape": 3, "slot_index": 1 } ], "properties": { "Node name for S&R": "LoraLoader" }, "widgets_values": [ "XL/!action-sdxl-V0.5.safetensors", 0.6, 0.6 ] }, { "id": 45, "pos": [ 444, 724 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 10, "title": "steps", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 38, 41 ], "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] } } ], "properties": {}, "widgets_values": [ 40, "fixed" ] }, { "id": 47, "pos": [ 450, 840 ], "mode": 0, "size": { "0": 210, "1": 82 }, "type": "PrimitiveNode", "color": "#233", "flags": {}, "order": 11, "title": "end_at_step", "bgcolor": "#355", "outputs": [ { "name": "INT", "type": "INT", "links": [ 43, 44 ], "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 0 } ], "properties": {}, "widgets_values": [ 37, "fixed" ] }, { "id": 10, "pos": [ 39.585034524037376, 92.33500626534348 ], "mode": 0, "size": { "0": 300, "1": 334 }, "type": "KSamplerAdvanced", "color": "#223", "flags": {}, "order": 19, "title": "KSampler (Advanced) - BASE", "inputs": [ { "link": 49, "name": "model", "type": "MODEL" }, { "link": 11, "name": "positive", "type": "CONDITIONING" }, { "link": 12, "name": "negative", "type": "CONDITIONING" }, { "link": 27, "name": "latent_image", "type": "LATENT" }, { "link": 41, "name": "steps", "type": "INT", "widget": { "name": "steps", "config": [ "INT", { "max": 10000, "min": 1, "default": 20 } ] }, "slot_index": 4 }, { "link": 43, "name": "end_at_step", "type": "INT", "widget": { "name": "end_at_step", "config": [ "INT", { "max": 10000, "min": 0, "default": 10000 } ] }, "slot_index": 5 } ], "bgcolor": "#335", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 52, 65 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSamplerAdvanced" }, "widgets_values": [ "enable", 1123645071018022, "fixed", 40, 8, "dpmpp_2s_ancestral", "karras", 0, 37, "enable" ] }, { "id": 55, "pos": [ 1630, 930 ], "mode": 0, "size": { "0": 210, "1": 46 }, "type": "VAEEncode", "color": "#432", "flags": {}, "order": 25, "inputs": [ { "link": 54, "name": "pixels", "type": "IMAGE" }, { "link": 55, "name": "vae", "type": "VAE" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 63 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEEncode" } }, { "id": 54, "pos": [ 1250, 960 ], "mode": 0, "size": { "0": 315, "1": 262 }, "type": "KSampler", "color": "#432", "flags": {}, "order": 27, "inputs": [ { "link": 71, "name": "model", "type": "MODEL" }, { "link": 74, "name": "positive", "type": "CONDITIONING" }, { "link": 75, "name": "negative", "type": "CONDITIONING" }, { "link": 64, "name": "latent_image", "type": "LATENT" } ], "bgcolor": "#653", "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 60 ], "shape": 3, "slot_index": 0 } ], "properties": { "Node name for S&R": "KSampler" }, "widgets_values": [ 85133334682496, "randomize", 6, 6, "euler_ancestral", "normal", 0.35 ] } ], "config": {}, "groups": [ { "color": "#a1309b", "title": "Load in BASE SDXL Model", "bounding": [ -574, -313, 369, 399 ] }, { "color": "#a1309b", "title": "Load in REFINER SDXL Model", "bounding": [ -585, 106, 391, 400 ] }, { "color": "#a1309b", "title": "Empty Latent Image", "bounding": [ 45, 648, 339, 464 ] }, { "color": "#b06634", "title": "VAE Decoder", "bounding": [ -571, 846, 378, 434 ] }, { "color": "#8AA", "title": "Step Control", "bounding": [ 434, 648, 243, 458 ] }, { "color": "#3f789e", "title": "Load in LoRA", "bounding": [ -560, 541, 353, 230 ] }, { "color": "#88A", "title": "Starting Base Image", "bounding": [ 12, 11, 731, 626 ] }, { "color": "#b58b2a", "title": "Upscale", "bounding": [ 1877, 9, 1023, 1277 ] }, { "color": "#a1309b", "title": "Group", "bounding": [ 753, 10, 1113, 835 ] }, { "color": "#b58b2a", "title": "Upscale Prompt", "bounding": [ 752, 850, 1116, 428 ] } ], "version": 0.4, "last_link_id": 75, "last_node_id": 61 } }, "steps": 40, "width": 768, "height": 1152, "models": [ "XL/sd_xl_base_1.0.safetensors", "XL/sd_xl_refiner_1.0.safetensors" ], "prompt": "zdyna_pose, dutch angle, from below, female superhero in a zdyna_pose casting lightning from her outstretched arms with a tall city in the background in a low camera angle actions scene, foreground objects dust and volumetric lighting, bright cinematic lighting, orange and teal color grading", "sampler": "DPM++ 2S a Karras", "cfgScale": 8, "scheduler": "karras", "upscalers": [], "controlNets": [], "additionalResources": [ { "name": "XL/!action-sdxl-V0.5.safetensors", "type": "lora", "strength": 0.6, "strengthClip": 0.6 } ] } } ], "downloadUrl": "https://civitai.com/api/download/models/133814" }