什么是 Hires.fix?

虽然名字很帅,但做的事情并没有那么复杂。
首先在 text2image 生成图像,将该图像调整为 1.5〜2 倍大小。
将该放大图像放入 image2image,让其再次重绘。
只是将这个步骤汇总在了一起。
为什么产生了这种手法?
Stable Diffusion 1.5 推荐的分辨率是 512 × 512px,无法生成大的图像。
这主要有 2 个理由。
计算成本的问题
分辨率越高,所需的 VRAM 和计算时间就会一下子增加。
在图像生成登场的当时,优化还没有像现在这样进展,突然生成大图像是相当繁重的处理。
学习中使用的图像尺寸的问题
更本质的理由是,模型是 “以什么尺寸的图像进行学习的”。

Stable Diffusion 1.5 几乎只用了 512 × 512px 的图像进行学习。
也就是说,虽然擅长画这个尺寸附近的画,但 除此以外的分辨率原本就没有练习过。
假设让漫画家突然在体育馆的墙壁上画满画。
因为平时是用原稿纸尺寸画的,恐怕会保持那个感觉,密密麻麻地排列小的分镜和角色吧。
没有练习过“使用整面墙画巨大的 1 张画”这种画法本身,原本连那种想法都涌现不出来。
Hires.fix 的诞生
于是,首先让模型在擅长的 512 × 512px 附近画,将其放大,以放大的图像为草稿再次重绘。
诞生了这种通过两个阶段的做法。
这个“先经过一次模型擅长的分辨率,再提升到高分辨率”的办法,就是 Hires.fix 没背景的思想。
基础的方法

{
"id": "8b9f7796-0873-4025-be3c-0f997f67f866",
"revision": 0,
"last_node_id": 17,
"last_link_id": 37,
"nodes": [
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
582.1350317382813,
606.5799999999999
],
"size": [
244.81999999999994,
106
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
2
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
512,
512,
1
]
},
{
"id": 15,
"type": "VAEDecode",
"pos": [
2192.0144598529414,
190.6545154746329
],
"size": [
192,
46
],
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 29
},
{
"name": "vae",
"type": "VAE",
"link": 34
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
30
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "VAEDecode"
}
},
{
"id": 4,
"type": "CheckpointLoaderSimple",
"pos": [
35.04463803391465,
305.99511645379476
],
"size": [
315,
98
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
16,
31
]
},
{
"name": "CLIP",
"type": "CLIP",
"slot_index": 1,
"links": [
17,
18
]
},
{
"name": "VAE",
"type": "VAE",
"slot_index": 2,
"links": []
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"v1-5-pruned-emaonly-fp16.safetensors"
]
},
{
"id": 16,
"type": "SaveImage",
"pos": [
2413.5562680422718,
190.54464832962913
],
"size": [
440.8026035004723,
492.16667321788407
],
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 30
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33"
},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
416.1970166015625,
392.37848510742185
],
"size": [
410.75801513671877,
158.82607910156253
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 18
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
6,
22
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"worst quality, text, watermark"
]
},
{
"id": 10,
"type": "VAELoader",
"pos": [
896.9256198347109,
68.77178286934158
],
"size": [
281.0743801652891,
58
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
10,
33,
34
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"vae-ft-mse-840000-ema-pruned.safetensors"
]
},
{
"id": 17,
"type": "PreviewImage",
"pos": [
1423.6732583128128,
328.6264740212463
],
"size": [
245.18407212622105,
286.5709992486851
],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 37
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 3,
"type": "KSampler",
"pos": [
863,
186
],
"size": [
315,
262
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 16
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 4
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 6
},
{
"name": "latent_image",
"type": "LATENT",
"link": 2
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
7
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "KSampler"
},
"widgets_values": [
10000,
"fixed",
20,
8,
"euler",
"normal",
1
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 12,
"type": "ImageScaleBy",
"pos": [
1424.2369484504152,
186
],
"size": [
210,
82
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "image",
"type": "IMAGE",
"link": 19
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
24
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "ImageScaleBy"
},
"widgets_values": [
"nearest-exact",
1.5
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
415,
186
],
"size": [
411.95503173828126,
151.0030493164063
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 17
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
4,
32
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"high quality,high detailed, RAW photo of a white fluffy puppy,rimlight,on the desk,blurry background,house plant"
]
},
{
"id": 14,
"type": "VAEEncode",
"pos": [
1661.3554226756228,
186
],
"size": [
164.5454545454545,
46
],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "pixels",
"type": "IMAGE",
"link": 24
},
{
"name": "vae",
"type": "VAE",
"link": 33
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
26
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "VAEEncode"
}
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1205.1184742252076,
186
],
"size": [
179.27272727272702,
46
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 7
},
{
"name": "vae",
"type": "VAE",
"link": 10
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
19,
37
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 13,
"type": "KSampler",
"pos": [
1846.4738969008304,
187
],
"size": [
315,
262
],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 31
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 32
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 22
},
{
"name": "latent_image",
"type": "LATENT",
"link": 26
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
29
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "KSampler"
},
"widgets_values": [
10000,
"fixed",
20,
8,
"euler",
"normal",
0.6
],
"color": "#432",
"bgcolor": "#653"
}
],
"links": [
[
2,
5,
0,
3,
3,
"LATENT"
],
[
4,
6,
0,
3,
1,
"CONDITIONING"
],
[
6,
7,
0,
3,
2,
"CONDITIONING"
],
[
7,
3,
0,
8,
0,
"LATENT"
],
[
10,
10,
0,
8,
1,
"VAE"
],
[
16,
4,
0,
3,
0,
"MODEL"
],
[
17,
4,
1,
6,
0,
"CLIP"
],
[
18,
4,
1,
7,
0,
"CLIP"
],
[
19,
8,
0,
12,
0,
"IMAGE"
],
[
22,
7,
0,
13,
2,
"CONDITIONING"
],
[
24,
12,
0,
14,
0,
"IMAGE"
],
[
26,
14,
0,
13,
3,
"LATENT"
],
[
29,
13,
0,
15,
0,
"LATENT"
],
[
30,
15,
0,
16,
0,
"IMAGE"
],
[
31,
4,
0,
13,
0,
"MODEL"
],
[
32,
6,
0,
13,
1,
"CONDITIONING"
],
[
33,
10,
0,
14,
1,
"VAE"
],
[
34,
10,
0,
15,
1,
"VAE"
],
[
37,
8,
0,
17,
0,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.6830134553650705,
"offset": [
64.95536196608535,
32.692317130658424
]
},
"frontendVersion": "1.34.6",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true,
"linkExtensions": [
{
"id": 31,
"parentId": 2
},
{
"id": 33,
"parentId": 5
},
{
"id": 34,
"parentId": 6
}
],
"reroutes": [
{
"id": 1,
"pos": [
441.0480618422728,
19.664788129629585
],
"linkIds": [
31
]
},
{
"id": 2,
"parentId": 1,
"pos": [
1771.4727618422721,
24.549888129629558
],
"linkIds": [
31
]
},
{
"id": 5,
"pos": [
1624.7392361419663,
86.84280922035722
],
"linkIds": [
33,
34
]
},
{
"id": 6,
"parentId": 5,
"pos": [
2154.447837765052,
96.76734244275261
],
"linkIds": [
34
]
}
]
},
"version": 0.4
}
- 🟪 text2image
- 🟦 在
Upscale Image By节点将解码的图像放大为 1.5 倍 - 🟨 将放大的图像输入 image2image
保持 Latent 状态放大的方法
刚才的工作流中,流程是将 text2image 的图像一度解码为像素图像后放大,再次转换为 latent 进行 image2image。
这里产生了“不用特意变回像素图像,保持 latent 也能放大不是吗?”的想法。
但是,单纯放大 latent 的话,会发生无法容忍程度的劣化。
因此很长一段时间都不实用,但登场了进行“抑制劣化的 latent 放大”的自定义节点。
- Goktug/ComfyUI_NNLatentUpscale (forked from Ttl)
- 使用神经网络将 latent 放大。

{
"id": "8b9f7796-0873-4025-be3c-0f997f67f866",
"revision": 0,
"last_node_id": 18,
"last_link_id": 40,
"nodes": [
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
582.1350317382813,
606.5799999999999
],
"size": [
244.81999999999994,
106
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
2
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
512,
512,
1
]
},
{
"id": 15,
"type": "VAEDecode",
"pos": [
1797.4334510317049,
183.00700000000006
],
"size": [
192,
46
],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 29
},
{
"name": "vae",
"type": "VAE",
"link": 34
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"slot_index": 0,
"links": [
30
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 4,
"type": "CheckpointLoaderSimple",
"pos": [
35.04463803391465,
305.99511645379476
],
"size": [
315,
98
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
16,
31
]
},
{
"name": "CLIP",
"type": "CLIP",
"slot_index": 1,
"links": [
17,
18
]
},
{
"name": "VAE",
"type": "VAE",
"slot_index": 2,
"links": []
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"v1-5-pruned-emaonly-fp16.safetensors"
]
},
{
"id": 16,
"type": "SaveImage",
"pos": [
2020.9112680422732,
183.00700000000006
],
"size": [
440.8026035004723,
492.16667321788407
],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 30
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33"
},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
416.1970166015625,
392.37848510742185
],
"size": [
410.75801513671877,
158.82607910156253
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 18
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
6,
22
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"worst quality, text, watermark"
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
415,
183.00700000000006
],
"size": [
411.95503173828126,
151.0030493164063
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 17
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"slot_index": 0,
"links": [
4,
32
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"high quality,high detailed, RAW photo of a white fluffy puppy,rimlight,on the desk,blurry background,house plant"
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
863,
183.00700000000006
],
"size": [
315,
262
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 16
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 4
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 6
},
{
"name": "latent_image",
"type": "LATENT",
"link": 2
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
38
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "KSampler"
},
"widgets_values": [
10000,
"fixed",
20,
8,
"euler",
"normal",
1
],
"color": "#323",
"bgcolor": "#535"
},
{
"id": 13,
"type": "KSampler",
"pos": [
1450.9556340211366,
183.00700000000006
],
"size": [
315,
262
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 31
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 32
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 22
},
{
"name": "latent_image",
"type": "LATENT",
"link": 39
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"slot_index": 0,
"links": [
29
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.33",
"Node name for S&R": "KSampler"
},
"widgets_values": [
10000,
"fixed",
20,
8,
"euler",
"normal",
0.6
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 18,
"type": "NNLatentUpscale",
"pos": [
1209.4778170105683,
183.00700000000006
],
"size": [
210,
82
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "latent",
"type": "LATENT",
"link": 38
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
39
]
}
],
"properties": {
"cnr_id": "comfyui_nnlatentupscale",
"ver": "7657841c7113345ef407c498985c141ffff38eba",
"Node name for S&R": "NNLatentUpscale"
},
"widgets_values": [
"SD 1.x",
1.5
],
"color": "#232",
"bgcolor": "#353"
},
{
"id": 10,
"type": "VAELoader",
"pos": [
1484.8812538558475,
66.90901890016424
],
"size": [
281.0743801652891,
58
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
34
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"vae-ft-mse-840000-ema-pruned.safetensors"
]
}
],
"links": [
[
2,
5,
0,
3,
3,
"LATENT"
],
[
4,
6,
0,
3,
1,
"CONDITIONING"
],
[
6,
7,
0,
3,
2,
"CONDITIONING"
],
[
16,
4,
0,
3,
0,
"MODEL"
],
[
17,
4,
1,
6,
0,
"CLIP"
],
[
18,
4,
1,
7,
0,
"CLIP"
],
[
22,
7,
0,
13,
2,
"CONDITIONING"
],
[
29,
13,
0,
15,
0,
"LATENT"
],
[
30,
15,
0,
16,
0,
"IMAGE"
],
[
31,
4,
0,
13,
0,
"MODEL"
],
[
32,
6,
0,
13,
1,
"CONDITIONING"
],
[
34,
10,
0,
15,
1,
"VAE"
],
[
38,
3,
0,
18,
0,
"LATENT"
],
[
39,
18,
0,
13,
3,
"LATENT"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.8264462809917354,
"offset": [
64.95536196608535,
33.09098109983576
]
},
"frontendVersion": "1.34.6",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true,
"reroutes": [
{
"id": 1,
"pos": [
410.0480618422728,
106.66478812962959
],
"linkIds": [
31
]
},
{
"id": 2,
"parentId": 1,
"pos": [
1411.8277618422733,
107.55688812962956
],
"linkIds": [
31
]
}
],
"linkExtensions": [
{
"id": 31,
"parentId": 2
}
]
},
"version": 0.4
}
- 🟩 将 text2image 出来的 latent 在
NNLatentUpscale节点直接放大 - 🟨 将放大的 latent 原样流入 image2image
虽然只是体感,但我觉得一度解码为像素图像的方法品质更好。