What is LivePortrait?
LivePortrait is a keypoint-based Talking Head model for animating a single face photo according to another video or parameters.
Using keypoints placed on the face in the photo as clues, it deforms the face as if an AI were automatically executing Photoshop's "Liquify Tool" dozens of times per second.
You can make it mimic the expression and head movement of a reference video, or fine-tune each part of the face (eyes, mouth, head direction, etc.).
Although it doesn't have the "anything goes" feeling of recent diffusion-based video generation models, a major feature is that it is extremely lightweight because it is not a diffusion model, and can be operated almost in real-time .
You can use it for installations, or it is quite convenient to use for fine-tuning generated images, such as "closing eyelids slightly" or "turning the face slightly downwards".
Custom Nodes
image2image
Changes the face direction and expression of the input person image.
There are two main control methods.
Adjusting expressions with parameters
Mimicking the expression of a reference image
Adjusting expressions with parameters
{
"id": "dba15c18-c2e7-4547-8472-85361bc55454",
"revision": 0,
"last_node_id": 15,
"last_link_id": 15,
"nodes": [
{
"id": 10,
"type": "PreviewImage",
"pos": [
1699.0274397476157,
93.3821815696158
],
"size": [
312.7272727272725,
485.090909090909
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 14
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 8,
"type": "LoadImage",
"pos": [
1052.5387244959265,
93.3821815696158
],
"size": [
282.1988281250001,
508.7
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
13
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (105).png",
"image"
]
},
{
"id": 14,
"type": "ExpressionEditor",
"pos": [
1374.372496184271,
93.3821815696158
],
"size": [
285.0200000000002,
781.96
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"name": "src_image",
"shape": 7,
"type": "IMAGE",
"link": 13
},
{
"name": "motion_link",
"shape": 7,
"type": "EDITOR_LINK",
"link": null
},
{
"name": "sample_image",
"shape": 7,
"type": "IMAGE",
"link": null
},
{
"name": "add_exp",
"shape": 7,
"type": "EXP_DATA",
"link": null
}
],
"outputs": [
{
"name": "image",
"type": "IMAGE",
"links": [
14
]
},
{
"name": "motion_link",
"type": "EDITOR_LINK",
"links": null
},
{
"name": "save_exp",
"type": "EXP_DATA",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-advancedliveportrait",
"ver": "3bba732915e22f18af0d221b9c5c282990181f1b",
"Node name for S&R": "ExpressionEditor"
},
"widgets_values": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
"All",
1.7
],
"color": "#232",
"bgcolor": "#353"
}
],
"links": [
[
13,
8,
0,
14,
0,
"IMAGE"
],
[
14,
14,
0,
10,
0,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.7513148009015777,
"offset": [
-422.25595692540645,
168.35165163849862
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
There are various parameters, but it's quickest to try touching them.
As shown in this video, it is convenient to use ▷ Run (On Change).
Editing from a reference image
LivePortrait_i2i_ref.json
{
"id": "dba15c18-c2e7-4547-8472-85361bc55454",
"revision": 0,
"last_node_id": 18,
"last_link_id": 19,
"nodes": [
{
"id": 15,
"type": "PreviewImage",
"pos": [
1701.7023235943498,
98.72387954106597
],
"size": [
312.7272727272725,
485.090909090909
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 13
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 9,
"type": "LoadImage",
"pos": [
1081.3225849102464,
207.48210643813542
],
"size": [
271.6843277493424,
470.9630353117956
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
9
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (107).png",
"image"
]
},
{
"id": 14,
"type": "LoadImage",
"pos": [
766.4372083426591,
98.72387954106597
],
"size": [
282.1988281250001,
508.70000000000005
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
14
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (105).png",
"image"
]
},
{
"id": 11,
"type": "ExpressionEditor",
"pos": [
1385.6934611021761,
98.72387954106597
],
"size": [
283.32231404958657,
756
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "src_image",
"shape": 7,
"type": "IMAGE",
"link": 14
},
{
"name": "motion_link",
"shape": 7,
"type": "EDITOR_LINK",
"link": null
},
{
"name": "sample_image",
"shape": 7,
"type": "IMAGE",
"link": 9
},
{
"name": "add_exp",
"shape": 7,
"type": "EXP_DATA",
"link": null
}
],
"outputs": [
{
"name": "image",
"type": "IMAGE",
"links": [
13
]
},
{
"name": "motion_link",
"type": "EDITOR_LINK",
"links": []
},
{
"name": "save_exp",
"type": "EXP_DATA",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-advancedliveportrait",
"ver": "3bba732915e22f18af0d221b9c5c282990181f1b",
"Node name for S&R": "ExpressionEditor"
},
"widgets_values": [
0,
17.5,
5.5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
"All",
1.7
],
"color": "#232",
"bgcolor": "#353"
}
],
"links": [
[
9,
9,
0,
11,
2,
"IMAGE"
],
[
13,
11,
0,
15,
0,
"IMAGE"
],
[
14,
14,
0,
11,
0,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.6830134553650705,
"offset": [
-175.963708342659,
209.17832045893417
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
Just add sample_image to the previous workflow.
From sample_parts, you can choose which parts to follow the reference image.
OnlyExpression ... Expression only
OnlyRotation ... Face direction only
OnlyMouth ... Mouth only
OnlyEyes ... Eyes only
All ... All
You can also use parameters to fine-tune after making it match the expression (direction) of the reference image.
image2video
Animates the person in the image according to parameters or a reference video.
motion_link
You can create a video by making multiple expressions with the Expression Editor (PHM) and changing those expressions one after another.
🟨 Advanced Live Portrait (PHM) node
Set animate_without_vid to true
In the command field below it, set which expression to apply and for how long.
The format of the command field is as follows:
Expression Index = "Frames to transition" to that expression : "Frames to wait" with that expression
For example, consider the following case:
1 = 1:0
2 = 15:0
3 = 20:10
1 = 1:0
Transition frames: 1, Wait frames: 0
Starts with expression 1 and immediately transitions to the next expression.
2 = 15:0
Transitions from expression 1 to this expression over 15 frames, and immediately moves to the next expression.
3 = 20:10
Transitions from expression 2 to this expression over 20 frames, and then holds for 10 frames.
In this case, a video with a total of 46 frames is created.
Transferring from a reference video
Although we did something a bit tricky above, I think this usage will actually be the main one.
LivePortrait_i2v_ref.json
{
"id": "dba15c18-c2e7-4547-8472-85361bc55454",
"revision": 0,
"last_node_id": 25,
"last_link_id": 28,
"nodes": [
{
"id": 8,
"type": "LoadImage",
"pos": [
1053.8902782149364,
-0.15546305848340092
],
"size": [
282.1988281250001,
508.7
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
17
]
},
{
"name": "MASK",
"type": "MASK",
"links": null
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LoadImage"
},
"widgets_values": [
"pasted/image (105).png",
"image"
]
},
{
"id": 21,
"type": "VHS_VideoCombine",
"pos": [
2019.7634896752875,
-1.365463058483401
],
"size": [
344.2576171874998,
780.8260157993859
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 19
},
{
"name": "audio",
"shape": 7,
"type": "AUDIO",
"link": null
},
{
"name": "meta_batch",
"shape": 7,
"type": "VHS_BatchManager",
"link": null
},
{
"name": "vae",
"shape": 7,
"type": "VAE",
"link": null
}
],
"outputs": [
{
"name": "Filenames",
"type": "VHS_FILENAMES",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-videohelpersuite",
"ver": "8923bd836bdab8b7bbdf4ed104b7d045e70c66e2",
"Node name for S&R": "VHS_VideoCombine"
},
"widgets_values": {
"frame_rate": 16,
"loop_count": 0,
"filename_prefix": "LivePortrait",
"format": "video/h264-mp4",
"pix_fmt": "yuv420p",
"crf": 19,
"save_metadata": true,
"trim_to_audio": false,
"pingpong": false,
"save_output": true,
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "LivePortrait_00002.mp4",
"subfolder": "",
"type": "output",
"format": "video/h264-mp4",
"frame_rate": 8,
"workflow": "LivePortrait_00002.png",
"fullpath": "D:\\AI\\ComfyUI_windows_portable\\ComfyUI\\output\\LivePortrait_00002.mp4"
}
}
}
},
{
"id": 20,
"type": "AdvancedLivePortrait",
"pos": [
1665.2686130082168,
-1.365463058483401
],
"size": [
311.2184264611833,
272
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "src_images",
"shape": 7,
"type": "IMAGE",
"link": 17
},
{
"name": "motion_link",
"shape": 7,
"type": "EDITOR_LINK",
"link": null
},
{
"name": "driving_images",
"shape": 7,
"type": "IMAGE",
"link": 28
}
],
"outputs": [
{
"name": "images",
"type": "IMAGE",
"links": [
19
]
}
],
"properties": {
"cnr_id": "comfyui-advancedliveportrait",
"ver": "3bba732915e22f18af0d221b9c5c282990181f1b",
"Node name for S&R": "AdvancedLivePortrait"
},
"widgets_values": [
0,
0,
1.7,
true,
false,
false,
""
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 25,
"type": "VHS_LoadVideo",
"pos": [
1371.2861499441556,
114.0272015894756
],
"size": [
261.6533203125,
460.08634187370603
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"name": "meta_batch",
"shape": 7,
"type": "VHS_BatchManager",
"link": null
},
{
"name": "vae",
"shape": 7,
"type": "VAE",
"link": null
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
28
]
},
{
"name": "frame_count",
"type": "INT",
"links": null
},
{
"name": "audio",
"type": "AUDIO",
"links": null
},
{
"name": "video_info",
"type": "VHS_VIDEOINFO",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-videohelpersuite",
"ver": "8923bd836bdab8b7bbdf4ed104b7d045e70c66e2",
"Node name for S&R": "VHS_LoadVideo"
},
"widgets_values": {
"video": "7327398-uhd_3840_2160_25fps.mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1,
"format": "None",
"choose video to upload": "image",
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "7327398-uhd_3840_2160_25fps.mp4",
"type": "input",
"format": "video/mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1
}
}
}
}
],
"links": [
[
17,
8,
0,
20,
0,
"IMAGE"
],
[
19,
20,
0,
21,
0,
"IMAGE"
],
[
28,
25,
0,
20,
2,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.9090909090909091,
"offset": [
-786.1067299591815,
216.72498248265038
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
🟨 Just input the reference video into driving_images.
It transfers the expression and head shaking of the reference video directly to the input image.
Of course, you can also use it in combination with motion_link above.
video2video
Matches the expression of the person in the video to the reference video.
LivePortrait_v2v_ref.json
{
"id": "dba15c18-c2e7-4547-8472-85361bc55454",
"revision": 0,
"last_node_id": 26,
"last_link_id": 29,
"nodes": [
{
"id": 20,
"type": "AdvancedLivePortrait",
"pos": [
1673.4228458122946,
-1.365463058483401
],
"size": [
311.2184264611833,
480
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "src_images",
"shape": 7,
"type": "IMAGE",
"link": 29
},
{
"name": "motion_link",
"shape": 7,
"type": "EDITOR_LINK",
"link": null
},
{
"name": "driving_images",
"shape": 7,
"type": "IMAGE",
"link": 28
}
],
"outputs": [
{
"name": "images",
"type": "IMAGE",
"links": [
19
]
}
],
"properties": {
"cnr_id": "comfyui-advancedliveportrait",
"ver": "3bba732915e22f18af0d221b9c5c282990181f1b",
"Node name for S&R": "AdvancedLivePortrait"
},
"widgets_values": [
0,
0,
1.7,
true,
false,
false,
""
],
"color": "#432",
"bgcolor": "#653"
},
{
"id": 25,
"type": "VHS_LoadVideo",
"pos": [
1376.647308097985,
111.5478627465004
],
"size": [
261.6533203125,
460.08634187370603
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"name": "meta_batch",
"shape": 7,
"type": "VHS_BatchManager",
"link": null
},
{
"name": "vae",
"shape": 7,
"type": "VAE",
"link": null
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
28
]
},
{
"name": "frame_count",
"type": "INT",
"links": null
},
{
"name": "audio",
"type": "AUDIO",
"links": null
},
{
"name": "video_info",
"type": "VHS_VIDEOINFO",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-videohelpersuite",
"ver": "8923bd836bdab8b7bbdf4ed104b7d045e70c66e2",
"Node name for S&R": "VHS_LoadVideo"
},
"widgets_values": {
"video": "7327398-uhd_3840_2160_25fps.mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1,
"format": "None",
"choose video to upload": "image",
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "7327398-uhd_3840_2160_25fps.mp4",
"type": "input",
"format": "video/mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1
}
}
}
},
{
"id": 26,
"type": "VHS_LoadVideo",
"pos": [
1079.8717703836753,
-1.365463058483401
],
"size": [
261.6533203125,
460.08634187370603
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"name": "meta_batch",
"shape": 7,
"type": "VHS_BatchManager",
"link": null
},
{
"name": "vae",
"shape": 7,
"type": "VAE",
"link": null
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
29
]
},
{
"name": "frame_count",
"type": "INT",
"links": null
},
{
"name": "audio",
"type": "AUDIO",
"links": null
},
{
"name": "video_info",
"type": "VHS_VIDEOINFO",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-videohelpersuite",
"ver": "8923bd836bdab8b7bbdf4ed104b7d045e70c66e2",
"Node name for S&R": "VHS_LoadVideo"
},
"widgets_values": {
"video": "3762907-uhd_3840_2160_25fps.mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1,
"format": "None",
"choose video to upload": "image",
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "3762907-uhd_3840_2160_25fps.mp4",
"type": "input",
"format": "video/mp4",
"force_rate": 16,
"custom_width": 0,
"custom_height": 0,
"frame_load_cap": 48,
"skip_first_frames": 0,
"select_every_nth": 1
}
}
}
},
{
"id": 21,
"type": "VHS_VideoCombine",
"pos": [
2019.7634896752875,
-1.365463058483401
],
"size": [
480.98761718749984,
607.3055346679687
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 19
},
{
"name": "audio",
"shape": 7,
"type": "AUDIO",
"link": null
},
{
"name": "meta_batch",
"shape": 7,
"type": "VHS_BatchManager",
"link": null
},
{
"name": "vae",
"shape": 7,
"type": "VAE",
"link": null
}
],
"outputs": [
{
"name": "Filenames",
"type": "VHS_FILENAMES",
"links": null
}
],
"properties": {
"cnr_id": "comfyui-videohelpersuite",
"ver": "8923bd836bdab8b7bbdf4ed104b7d045e70c66e2",
"Node name for S&R": "VHS_VideoCombine"
},
"widgets_values": {
"frame_rate": 16,
"loop_count": 0,
"filename_prefix": "LivePortrait",
"format": "video/h264-mp4",
"pix_fmt": "yuv420p",
"crf": 19,
"save_metadata": true,
"trim_to_audio": false,
"pingpong": false,
"save_output": true,
"videopreview": {
"hidden": false,
"paused": false,
"params": {
"filename": "LivePortrait_00005.mp4",
"subfolder": "",
"type": "output",
"format": "video/h264-mp4",
"frame_rate": 16,
"workflow": "LivePortrait_00005.png",
"fullpath": "D:\\AI\\ComfyUI_windows_portable\\ComfyUI\\output\\LivePortrait_00005.mp4"
}
}
}
}
],
"links": [
[
19,
20,
0,
21,
0,
"IMAGE"
],
[
28,
25,
0,
20,
2,
"IMAGE"
],
[
29,
26,
0,
20,
0,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.8264462809917354,
"offset": [
-698.8176497225181,
308.1820919619897
]
},
"frontendVersion": "1.35.0",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"version": 0.4
}
🟨 Just set both src_images and driving_images to video.
You can replace only the person's expression and lip sync while keeping the camera work and background of the base video as is.