What are Flux.1 Tools?
For Flux.1, derivative models equivalent to ControlNet and IP-Adapter have been released by Flux officially, separate from the base model.
FLUX.1 Fill... Model for inpainting / outpaintingFLUX.1 Depth/FLUX.1 Canny... Models that redraw while keeping the shape with structure-based guides (Depth / Canny)FLUX.1 Redux... An IP-Adapter-like model for Flux that mass-produces variations that look exactly like the reference image
FLUX.1 Fill
It can be used just like an inpainting model.
Model Download
📂ComfyUI/
└── 📂models/
└── 📂diffusion_models/
└── FLUX.1-Fill-dev_fp8.safetensors
Workflow

{
"id": "18404b37-92b0-4d11-a39c-ae941838eb83",
"revision": 0,
"last_node_id": 49,
"last_link_id": 75,
"nodes": [
{
"id": 42,
"type": "DualCLIPLoader",
"pos": [
127.73647806467098,
235.59098740254555
],
"size": [
270,
130
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [
59,
60
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": [
"clip_l.safetensors",
"t5xxl_fp8_e4m3fn.safetensors",
"flux",
"default"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1667.7084821851195,
189.31698654463506
],
"size": [
140,
46
],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 52
},
{
"name": "vae",
"type": "VAE",
"link": 62
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
64
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 33,
"type": "CLIPTextEncode",
"pos": [
449.6776859504133,
378.47933884297515
],
"size": [
307.77605192326325,
100.04636029960699
],
"flags": {
"collapsed": true
},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 60
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
68
]
}
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
""
]
},
{
"id": 43,
"type": "VAELoader",
"pos": [
510.65253242761105,
443.7682260598112
],
"size": [
240.81669781621986,
58
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
62,
69
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"ae.safetensors"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 35,
"type": "FluxGuidance",
"pos": [
779.6668528783277,
190.47933884297524
],
"size": [
211.60000610351562,
58
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 73
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
74
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "FluxGuidance"
},
"widgets_values": [
3.5
],
"color": "#2a363b",
"bgcolor": "#3f5159"
},
{
"id": 41,
"type": "UNETLoader",
"pos": [
1016.2713254111645,
52.81322568183804
],
"size": [
270,
82
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
61
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "UNETLoader"
},
"widgets_values": [
"Flux.1\\FLUX.1-Fill-dev_fp8.safetensors",
"fp8_e4m3fn"
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 48,
"type": "InpaintModelConditioning",
"pos": [
1024.783207305155,
209.14883998160443
],
"size": [
262.0525920360633,
138
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "positive",
"type": "CONDITIONING",
"link": 74
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 68
},
{
"name": "vae",
"type": "VAE",
"link": 69
},
{
"name": "pixels",
"type": "IMAGE",
"link": 70
},
{
"name": "mask",
"type": "MASK",
"link": 71
}
],
"outputs": [
{
"name": "positive",
"type": "CONDITIONING",
"links": [
75
]
},
{
"name": "negative",
"type": "CONDITIONING",
"links": [
67
]
},
{
"name": "latent",
"type": "LATENT",
"links": [
72
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "InpaintModelConditioning"
},
"widgets_values": [
true
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 46,
"type": "SaveImage",
"pos": [
1832.2378782422484,
189.31698654463506
],
"size": [
311.3000000000002,
437.89999999999986
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 64
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76"
},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
449.6776859504133,
190.47933884297524
],
"size": [
301.84503173828125,
128.01304626464844
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 59
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
73
]
}
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"A chocolate cake topped with lots of whipped cream"
]
},
{
"id": 31,
"type": "KSampler",
"pos": [
1328.1790861279906,
189.31698654463506
],
"size": [
315,
262
],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 61
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 75
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 67
},
{
"name": "latent_image",
"type": "LATENT",
"link": 72
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
52
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "KSampler"
},
"widgets_values": [
1234,
"fixed",
20,
1,
"euler",
"normal",
1
]
},
{
"id": 49,
"type": "LoadImage",
"pos": [
517.235385243143,
558.48031578176
],
"size": [
232.798828125,
421
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
70
]
},
{
"name": "MASK",
"type": "MASK",
"links": [
71
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage",
"image": "clipspace/clipspace-painted-masked-1765287192968.png [input]"
},
"widgets_values": [
"clipspace/clipspace-painted-masked-1765287192968.png [input]",
"image"
]
}
],
"links": [
[
52,
31,
0,
8,
0,
"LATENT"
],
[
59,
42,
0,
6,
0,
"CLIP"
],
[
60,
42,
0,
33,
0,
"CLIP"
],
[
61,
41,
0,
31,
0,
"MODEL"
],
[
62,
43,
0,
8,
1,
"VAE"
],
[
64,
8,
0,
46,
0,
"IMAGE"
],
[
67,
48,
1,
31,
2,
"CONDITIONING"
],
[
68,
33,
0,
48,
1,
"CONDITIONING"
],
[
69,
43,
0,
48,
2,
"VAE"
],
[
70,
49,
0,
48,
3,
"IMAGE"
],
[
71,
49,
1,
48,
4,
"MASK"
],
[
72,
48,
2,
31,
3,
"LATENT"
],
[
73,
6,
0,
35,
0,
"CONDITIONING"
],
[
74,
35,
0,
48,
0,
"CONDITIONING"
],
[
75,
48,
0,
31,
1,
"CONDITIONING"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 1,
"offset": [
-371.73647806467096,
-9.813225681838041
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
-
🟪 Load
flux1-fill-dev.safetensorswith theLoad Diffusion Modelnode. -
🟩 Even specifically for Flux.1 workflows, add the
InpaintModelConditioningnode just like in Stable Diffusion 1.5 inpainting.- Input the image and mask.
FLUX.1 Depth / FLUX.1 Canny
It can be used with the same feeling as ControlNet Depth / Canny.
Model Download
📂ComfyUI/
└── 📂models/
└── 📂diffusion_models/
├── flux1-depth-dev-fp8.safetensors
└── flux1-canny-dev-fp8.safetensors
Workflow

-
🟩 Input the control image to the
InstructPixToPixConditioningnode. -
🟦 Since it is Depth this time, create a depth map with Depth Anything V2.
- Since the image size of this depth map becomes the output image size as is, resize it to an appropriate size.
For the Canny version, input the Canny edge image with the same configuration.
FLUX.1 Redux
FLUX.1 Redux is a model where you pass one or more reference images and generate variations strongly biased towards those images.
It is close to IP-Adapter, but Redux is quite resistant to prompts, and the appearance of the reference image comes out almost as is.
Model Download
Redux is loaded as a "style model" separate from the Flux body. Furthermore, CLIP-ViT for encoding reference images is also required.
📂ComfyUI/
└── 📂models/
├── 📂style_models/
│ └── flux1-redux-dev.safetensors
└── 📂clip_vision/
└── sigclip_vision_patch14_384.safetensors
Workflow

{
"id": "18404b37-92b0-4d11-a39c-ae941838eb83",
"revision": 0,
"last_node_id": 57,
"last_link_id": 85,
"nodes": [
{
"id": 42,
"type": "DualCLIPLoader",
"pos": [
133.94569129526244,
241.80020063313714
],
"size": [
270,
130
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [
59,
60
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": [
"clip_l.safetensors",
"t5xxl_fp8_e4m3fn.safetensors",
"flux",
"default"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 33,
"type": "CLIPTextEncode",
"pos": [
455.88689918100494,
384.68855207356654
],
"size": [
307.77605192326325,
100.04636029960699
],
"flags": {
"collapsed": true
},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 60
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
77
]
}
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
""
]
},
{
"id": 43,
"type": "VAELoader",
"pos": [
1380.0483387249926,
48.75996159700135
],
"size": [
240.81669781621986,
58
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
62
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"ae.safetensors"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
455.88689918100494,
196.68855207356683
],
"size": [
301.84503173828125,
128.01304626464844
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 59
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
73
]
}
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
""
]
},
{
"id": 35,
"type": "FluxGuidance",
"pos": [
785.8760661089191,
196.68855207356683
],
"size": [
211.60000610351562,
58
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 73
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
78
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "FluxGuidance"
},
"widgets_values": [
3.5
],
"color": "#2a363b",
"bgcolor": "#3f5159"
},
{
"id": 31,
"type": "KSampler",
"pos": [
1305.8650365412125,
170.3087220818249
],
"size": [
315,
262
],
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 61
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 79
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 77
},
{
"name": "latent_image",
"type": "LATENT",
"link": 85
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
52
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "KSampler"
},
"widgets_values": [
1234,
"fixed",
20,
1,
"euler",
"normal",
1
]
},
{
"id": 56,
"type": "CLIPVisionLoader",
"pos": [
445.16712421704347,
479.5911591706802
],
"size": [
262.4344965208136,
58
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP_VISION",
"type": "CLIP_VISION",
"links": [
84
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPVisionLoader"
},
"widgets_values": [
"sigclip_vision_patch14_384.safetensors"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 41,
"type": "UNETLoader",
"pos": [
993.9572758243881,
33.8049612190282
],
"size": [
270,
82
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
61
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "UNETLoader"
},
"widgets_values": [
"Flux.1\\flux1-dev-fp8.safetensors",
"fp8_e4m3fn"
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 51,
"type": "StyleModelApply",
"pos": [
1033.5552493234652,
191.37947593796866
],
"size": [
234.041015625,
122
],
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 78
},
{
"name": "style_model",
"type": "STYLE_MODEL",
"link": 83
},
{
"name": "clip_vision_output",
"type": "CLIP_VISION_OUTPUT",
"link": 81
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
79
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "StyleModelApply"
},
"widgets_values": [
0.8,
"multiply"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 52,
"type": "LoadImage",
"pos": [
405.65494752030884,
598.3620620113231
],
"size": [
300.58064630681804,
411.79999999999995
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
82
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (81).png",
"image"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 54,
"type": "CLIPVisionEncode",
"pos": [
749.4881556345088,
580.5318789637175
],
"size": [
229.505859375,
78
],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "clip_vision",
"type": "CLIP_VISION",
"link": 84
},
{
"name": "image",
"type": "IMAGE",
"link": 82
}
],
"outputs": [
{
"name": "CLIP_VISION_OUTPUT",
"type": "CLIP_VISION_OUTPUT",
"links": [
81
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPVisionEncode"
},
"widgets_values": [
"none"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 55,
"type": "StyleModelLoader",
"pos": [
751.8228819013434,
465.8276548359569
],
"size": [
224.28119043036054,
58
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "STYLE_MODEL",
"type": "STYLE_MODEL",
"links": [
83
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "StyleModelLoader"
},
"widgets_values": [
"flux1-redux-dev.safetensors"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1650.361803182814,
170.3087220818249
],
"size": [
158.00671836871538,
46
],
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 52
},
{
"name": "vae",
"type": "VAE",
"link": 62
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
64
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 57,
"type": "EmptySD3LatentImage",
"pos": [
1036.935107923672,
394.05015464800863
],
"size": [
230.66115702479328,
106
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
85
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "EmptySD3LatentImage"
},
"widgets_values": [
1024,
1024,
1
]
},
{
"id": 46,
"type": "SaveImage",
"pos": [
1837.8652881931307,
170.3087220818249
],
"size": [
413.78700000000026,
494.9998999999999
],
"flags": {},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 64
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76"
},
"widgets_values": [
"ComfyUI"
]
}
],
"links": [
[
52,
31,
0,
8,
0,
"LATENT"
],
[
59,
42,
0,
6,
0,
"CLIP"
],
[
60,
42,
0,
33,
0,
"CLIP"
],
[
61,
41,
0,
31,
0,
"MODEL"
],
[
62,
43,
0,
8,
1,
"VAE"
],
[
64,
8,
0,
46,
0,
"IMAGE"
],
[
73,
6,
0,
35,
0,
"CONDITIONING"
],
[
77,
33,
0,
31,
2,
"CONDITIONING"
],
[
78,
35,
0,
51,
0,
"CONDITIONING"
],
[
79,
51,
0,
31,
1,
"CONDITIONING"
],
[
81,
54,
0,
51,
2,
"CLIP_VISION_OUTPUT"
],
[
82,
52,
0,
54,
1,
"IMAGE"
],
[
83,
55,
0,
51,
1,
"STYLE_MODEL"
],
[
84,
56,
0,
54,
0,
"CLIP_VISION"
],
[
85,
57,
0,
31,
3,
"LATENT"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.6830134553650705,
"offset": [
119.78480870473761,
161.3615387809718
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
-
🟩 Add the
Apply Style Modelnode and connect the Style model andCLIP Vision Encode.- Connect
sigclip_vision_patch14_384.safetensorsand the reference image to theCLIP Vision Encodenode.
- Connect
Mixing Multiple Images
If you line up the chunks of Apply Style Model horizontally, you can also reference and mix multiple images.

{
"id": "18404b37-92b0-4d11-a39c-ae941838eb83",
"revision": 0,
"last_node_id": 60,
"last_link_id": 93,
"nodes": [
{
"id": 42,
"type": "DualCLIPLoader",
"pos": [
133.94569129526244,
241.80020063313714
],
"size": [
270,
130
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [
59,
60
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": [
"clip_l.safetensors",
"t5xxl_fp8_e4m3fn.safetensors",
"flux",
"default"
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 33,
"type": "CLIPTextEncode",
"pos": [
455.88689918100494,
384.68855207356654
],
"size": [
307.77605192326325,
100.04636029960699
],
"flags": {
"collapsed": true
},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 60
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
77
]
}
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
""
]
},
{
"id": 43,
"type": "VAELoader",
"pos": [
1662.3558629931397,
53.47049654330015
],
"size": [
240.81669781621986,
58
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
62
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"ae.safetensors"
],
"color": "#322",
"bgcolor": "#533"
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
455.88689918100494,
196.68855207356683
],
"size": [
301.84503173828125,
128.01304626464844
],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 59
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
73
]
}
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
""
]
},
{
"id": 35,
"type": "FluxGuidance",
"pos": [
792.8746692736214,
196.68855207356683
],
"size": [
211.60000610351562,
58
],
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 73
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
78
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "FluxGuidance"
},
"widgets_values": [
3.5
],
"color": "#2a363b",
"bgcolor": "#3f5159"
},
{
"id": 56,
"type": "CLIPVisionLoader",
"pos": [
445.16712421704347,
479.5911591706802
],
"size": [
262.4344965208136,
58
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "CLIP_VISION",
"type": "CLIP_VISION",
"links": [
84,
91
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPVisionLoader"
},
"widgets_values": [
"sigclip_vision_patch14_384.safetensors"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 54,
"type": "CLIPVisionEncode",
"pos": [
749.4881556345088,
580.5318789637175
],
"size": [
229.505859375,
78
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "clip_vision",
"type": "CLIP_VISION",
"link": 84
},
{
"name": "image",
"type": "IMAGE",
"link": 82
}
],
"outputs": [
{
"name": "CLIP_VISION_OUTPUT",
"type": "CLIP_VISION_OUTPUT",
"links": [
81
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPVisionEncode"
},
"widgets_values": [
"none"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 55,
"type": "StyleModelLoader",
"pos": [
751.8228819013434,
465.8276548359569
],
"size": [
224.28119043036054,
58
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "STYLE_MODEL",
"type": "STYLE_MODEL",
"links": [
83,
87
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "StyleModelLoader"
},
"widgets_values": [
"flux1-redux-dev.safetensors"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1932.669327450961,
175.01925702812355
],
"size": [
158.00671836871538,
46
],
"flags": {},
"order": 16,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 52
},
{
"name": "vae",
"type": "VAE",
"link": 62
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
64
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 41,
"type": "UNETLoader",
"pos": [
1276.264800092535,
38.515496165327
],
"size": [
270,
82
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
61
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "UNETLoader"
},
"widgets_values": [
"Flux.1\\flux1-dev-fp8.safetensors",
"fp8_e4m3fn"
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 60,
"type": "CLIPVisionEncode",
"pos": [
1090.776019821459,
717.3463824409438
],
"size": [
229.505859375,
78
],
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "clip_vision",
"type": "CLIP_VISION",
"link": 91
},
{
"name": "image",
"type": "IMAGE",
"link": 92
}
],
"outputs": [
{
"name": "CLIP_VISION_OUTPUT",
"type": "CLIP_VISION_OUTPUT",
"links": [
93
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPVisionEncode"
},
"widgets_values": [
"none"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 57,
"type": "EmptySD3LatentImage",
"pos": [
1317.3257916201403,
386.91266403944394
],
"size": [
230.66115702479328,
106
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
85
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "EmptySD3LatentImage"
},
"widgets_values": [
1024,
1024,
1
]
},
{
"id": 46,
"type": "SaveImage",
"pos": [
2120.1728124612778,
175.01925702812355
],
"size": [
457.78700000000026,
512.5998999999999
],
"flags": {},
"order": 17,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 64
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76"
},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 58,
"type": "StyleModelApply",
"pos": [
1308.8011677108075,
196.68855207356683
],
"size": [
234.041015625,
122
],
"flags": {},
"order": 14,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 89
},
{
"name": "style_model",
"type": "STYLE_MODEL",
"link": 87
},
{
"name": "clip_vision_output",
"type": "CLIP_VISION_OUTPUT",
"link": 93
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
90
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "StyleModelApply"
},
"widgets_values": [
0.4,
"multiply"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 31,
"type": "KSampler",
"pos": [
1588.1725608093595,
175.01925702812355
],
"size": [
315,
262
],
"flags": {},
"order": 15,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 61
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 90
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 77
},
{
"name": "latent_image",
"type": "LATENT",
"link": 85
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
52
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.39",
"Node name for S&R": "KSampler"
},
"widgets_values": [
999,
"fixed",
20,
1,
"euler",
"normal",
1
]
},
{
"id": 52,
"type": "LoadImage",
"pos": [
405.65494752030884,
598.3620620113231
],
"size": [
300.58064630681804,
411.79999999999995
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
82
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (84).png",
"image"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 59,
"type": "LoadImage",
"pos": [
757.8328117072599,
736.3865654885494
],
"size": [
300.58064630681804,
411.79999999999995
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
92
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (85).png",
"image"
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 51,
"type": "StyleModelApply",
"pos": [
1039.6174137314722,
196.68855207356683
],
"size": [
234.041015625,
122
],
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 78
},
{
"name": "style_model",
"type": "STYLE_MODEL",
"link": 83
},
{
"name": "clip_vision_output",
"type": "CLIP_VISION_OUTPUT",
"link": 81
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
89
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "StyleModelApply"
},
"widgets_values": [
0.4,
"multiply"
],
"color": "#232",
"bgcolor": "#353"
}
],
"links": [
[
52,
31,
0,
8,
0,
"LATENT"
],
[
59,
42,
0,
6,
0,
"CLIP"
],
[
60,
42,
0,
33,
0,
"CLIP"
],
[
61,
41,
0,
31,
0,
"MODEL"
],
[
62,
43,
0,
8,
1,
"VAE"
],
[
64,
8,
0,
46,
0,
"IMAGE"
],
[
73,
6,
0,
35,
0,
"CONDITIONING"
],
[
77,
33,
0,
31,
2,
"CONDITIONING"
],
[
78,
35,
0,
51,
0,
"CONDITIONING"
],
[
81,
54,
0,
51,
2,
"CLIP_VISION_OUTPUT"
],
[
82,
52,
0,
54,
1,
"IMAGE"
],
[
83,
55,
0,
51,
1,
"STYLE_MODEL"
],
[
84,
56,
0,
54,
0,
"CLIP_VISION"
],
[
85,
57,
0,
31,
3,
"LATENT"
],
[
87,
55,
0,
58,
1,
"STYLE_MODEL"
],
[
89,
51,
0,
58,
0,
"CONDITIONING"
],
[
90,
58,
0,
31,
1,
"CONDITIONING"
],
[
91,
56,
0,
60,
0,
"CLIP_VISION"
],
[
92,
59,
0,
60,
1,
"IMAGE"
],
[
93,
60,
0,
58,
2,
"CLIP_VISION_OUTPUT"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.6830134553650709,
"offset": [
-33.945691295262435,
61.484503834673
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
Problem with Redux
Again, Redux ignores most other parameters such as prompts and LoRA.
I think it is best to use it as a "tool to mass-produce variations that look exactly like the reference image" by cutting it off like that, but there are also custom nodes that allow some control with prompts. For your reference.