Compare commits

...

1 Commits

Author SHA1 Message Date
Yourz
583514148a update: display name for essentials tab node
Amp-Thread-ID: https://ampcode.com/threads/T-019c7303-ab53-7341-be76-a5da1f7a657e
Co-authored-by: Amp <amp@ampcode.com>
2026-02-19 23:49:30 +08:00
10 changed files with 18 additions and 17 deletions

View File

@@ -52,7 +52,7 @@ class TencentTextToModelNode(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="TencentTextToModelNode",
display_name="Hunyuan3D: Text to Model",
display_name="Text to 3D model",
category="api node/3d/Tencent",
inputs=[
IO.Combo.Input(
@@ -166,7 +166,7 @@ class TencentImageToModelNode(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="TencentImageToModelNode",
display_name="Hunyuan3D: Image(s) to Model",
display_name="Image to 3D Model",
category="api node/3d/Tencent",
inputs=[
IO.Combo.Input(

View File

@@ -2260,7 +2260,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="KlingLipSyncAudioToVideoNode",
display_name="Kling Lip Sync Video with Audio",
display_name="Lipsync",
category="api node/video/Kling",
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
inputs=[

View File

@@ -573,7 +573,7 @@ class OpenAIChatNode(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="OpenAIChatNode",
display_name="OpenAI ChatGPT",
display_name="Text generation (LLM)",
category="api node/text/OpenAI",
description="Generate text responses from an OpenAI model.",
inputs=[

View File

@@ -961,7 +961,7 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="RecraftRemoveBackgroundNode",
display_name="Recraft Remove Background",
display_name="Remove Background",
category="api node/image/Recraft",
description="Remove background from image, and return processed image and mask.",
inputs=[

View File

@@ -622,7 +622,7 @@ class StabilityTextToAudio(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="StabilityTextToAudio",
display_name="Stability AI Text To Audio",
display_name="Music generation",
category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""),
inputs=[

View File

@@ -157,7 +157,7 @@ class SaveAudio(IO.ComfyNode):
return IO.Schema(
node_id="SaveAudio",
search_aliases=["export flac"],
display_name="Save Audio (FLAC)",
display_name="Save Audio",
category="audio",
inputs=[
IO.Audio.Input("audio"),

View File

@@ -23,7 +23,7 @@ class ImageCrop(IO.ComfyNode):
return IO.Schema(
node_id="ImageCrop",
search_aliases=["trim"],
display_name="Image Crop (Deprecated)",
display_name="Crop Image (Deprecated)",
category="image/transform",
is_deprecated=True,
inputs=[
@@ -587,6 +587,7 @@ class ImageRotate(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="ImageRotate",
display_name="Rotate",
search_aliases=["turn", "flip orientation"],
category="image/transform",
inputs=[

View File

@@ -29,7 +29,7 @@ class Load3D(IO.ComfyNode):
]
return IO.Schema(
node_id="Load3D",
display_name="Load 3D & Animation",
display_name="Load 3D model",
category="3d",
is_experimental=True,
inputs=[

View File

@@ -144,7 +144,7 @@ class GetVideoComponents(io.ComfyNode):
return io.Schema(
node_id="GetVideoComponents",
search_aliases=["extract frames", "split video", "video to images", "demux"],
display_name="Get Video Components",
display_name="Extract frame",
category="image/video",
description="Extracts all components from a video: frames, audio, and framerate.",
inputs=[

View File

@@ -2105,7 +2105,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
"CheckpointLoaderSimple": "Load Checkpoint",
"VAELoader": "Load VAE",
"LoraLoader": "Load LoRA (Model and CLIP)",
"LoraLoader": "Load style (LoRA)",
"LoraLoaderModelOnly": "Load LoRA",
"CLIPLoader": "Load CLIP",
"ControlNetLoader": "Load ControlNet Model",
@@ -2116,7 +2116,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
# Conditioning
"CLIPVisionEncode": "CLIP Vision Encode",
"StyleModelApply": "Apply Style Model",
"CLIPTextEncode": "CLIP Text Encode (Prompt)",
"CLIPTextEncode": "Text",
"CLIPSetLastLayer": "CLIP Set Last Layer",
"ConditioningCombine": "Conditioning (Combine)",
"ConditioningAverage ": "Conditioning (Average)",
@@ -2147,15 +2147,15 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"LoadImage": "Load Image",
"LoadImageMask": "Load Image (as Mask)",
"LoadImageOutput": "Load Image (from Outputs)",
"ImageScale": "Upscale Image",
"ImageScale": "Resize Image",
"ImageScaleBy": "Upscale Image By",
"ImageInvert": "Invert Image",
"ImageInvert": "Invert",
"ImagePadForOutpaint": "Pad Image for Outpainting",
"ImageBatch": "Batch Images",
"ImageCrop": "Image Crop",
"ImageBatch": "Batch Image",
"ImageCrop": "Crop Image",
"ImageStitch": "Image Stitch",
"ImageBlend": "Image Blend",
"ImageBlur": "Image Blur",
"ImageBlur": "Blur",
"ImageQuantize": "Image Quantize",
"ImageSharpen": "Image Sharpen",
"ImageScaleToTotalPixels": "Scale Image to Total Pixels",