ComfyUI/nodes.py

2545 lines
102 KiB
Python
Raw Permalink Normal View History

from __future__ import annotations
2023-01-03 14:53:32 +08:00
import torch
2023-01-03 14:53:32 +08:00
import os
import sys
import json
import hashlib
import inspect
import traceback
import math
import time
import random
2024-03-11 12:56:41 +08:00
import logging
2023-01-03 14:53:32 +08:00
2024-12-08 21:06:00 +08:00
from PIL import Image, ImageOps, ImageSequence
2023-01-03 14:53:32 +08:00
from PIL.PngImagePlugin import PngInfo
2023-01-03 14:53:32 +08:00
import numpy as np
2023-05-18 11:04:40 +08:00
import safetensors.torch
2023-01-03 14:53:32 +08:00
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
import comfy.diffusers_load
2023-01-03 14:53:32 +08:00
import comfy.samplers
import comfy.sample
2023-01-03 14:53:32 +08:00
import comfy.sd
2023-02-16 23:38:08 +08:00
import comfy.utils
import comfy.controlnet
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict, FileLocator
from comfy_api.internal import register_versions, ComfyAPIWithVersion
from comfy_api.version_list import supported_versions
from comfy_api.latest import io, ComfyExtension
2023-02-16 23:38:08 +08:00
import comfy.clip_vision
import comfy.model_management
from comfy.cli_args import args
2023-02-15 22:48:10 +08:00
import importlib
2023-01-03 14:53:32 +08:00
import folder_paths
import latent_preview
2024-04-08 02:27:40 +08:00
import node_helpers
2023-06-06 07:39:56 +08:00
def before_node_execution():
comfy.model_management.throw_exception_if_processing_interrupted()
def interrupt_processing(value=True):
comfy.model_management.interrupt_current_processing(value)
2024-03-26 16:00:53 +08:00
MAX_RESOLUTION=16384
class CLIPTextEncode(ComfyNodeABC):
2023-01-03 14:53:32 +08:00
@classmethod
def INPUT_TYPES(s) -> InputTypeDict:
return {
"required": {
"text": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
}
}
RETURN_TYPES = (IO.CONDITIONING,)
OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",)
2023-01-03 14:53:32 +08:00
FUNCTION = "encode"
2023-01-27 01:23:15 +08:00
CATEGORY = "conditioning"
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
2023-01-27 01:23:15 +08:00
2023-01-03 14:53:32 +08:00
def encode(self, clip, text):
if clip is None:
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
tokens = clip.tokenize(text)
ModelPatcher Overhaul and Hook Support (#5583) * Added hook_patches to ModelPatcher for weights (model) * Initial changes to calc_cond_batch to eventually support hook_patches * Added current_patcher property to BaseModel * Consolidated add_hook_patches_as_diffs into add_hook_patches func, fixed fp8 support for model-as-lora feature * Added call to initialize_timesteps on hooks in process_conds func, and added call prepare current keyframe on hooks in calc_cond_batch * Added default_conds support in calc_cond_batch func * Added initial set of hook-related nodes, added code to register hooks for loras/model-as-loras, small renaming/refactoring * Made CLIP work with hook patches * Added initial hook scheduling nodes, small renaming/refactoring * Fixed MaxSpeed and default conds implementations * Added support for adding weight hooks that aren't registered on the ModelPatcher at sampling time * Made Set Clip Hooks node work with hooks from Create Hook nodes, began work on better Create Hook Model As LoRA node * Initial work on adding 'model_as_lora' lora type to calculate_weight * Continued work on simpler Create Hook Model As LoRA node, started to implement ModelPatcher callbacks, attachments, and additional_models * Fix incorrect ref to create_hook_patches_clone after moving function * Added injections support to ModelPatcher + necessary bookkeeping, added additional_models support in ModelPatcher, conds, and hooks * Added wrappers to ModelPatcher to facilitate standardized function wrapping * Started scaffolding for other hook types, refactored get_hooks_from_cond to organize hooks by type * Fix skip_until_exit logic bug breaking injection after first run of model * Updated clone_has_same_weights function to account for new ModelPatcher properties, improved AutoPatcherEjector usage in partially_load * Added WrapperExecutor for non-classbound functions, added calc_cond_batch wrappers * Refactored callbacks+wrappers to allow storing lists by id * Added forward_timestep_embed_patch type, added helper functions on ModelPatcher for emb_patch and forward_timestep_embed_patch, added helper functions for removing callbacks/wrappers/additional_models by key, added custom_should_register prop to hooks * Added get_attachment func on ModelPatcher * Implement basic MemoryCounter system for determing with cached weights due to hooks should be offloaded in hooks_backup * Modified ControlNet/T2IAdapter get_control function to receive transformer_options as additional parameter, made the model_options stored in extra_args in inner_sample be a clone of the original model_options instead of same ref * Added create_model_options_clone func, modified type annotations to use __future__ so that I can use the better type annotations * Refactored WrapperExecutor code to remove need for WrapperClassExecutor (now gone), added sampler.sample wrapper (pending review, will likely keep but will see what hacks this could currently let me get rid of in ACN/ADE) * Added Combine versions of Cond/Cond Pair Set Props nodes, renamed Pair Cond to Cond Pair, fixed default conds never applying hooks (due to hooks key typo) * Renamed Create Hook Model As LoRA nodes to make the test node the main one (more changes pending) * Added uuid to conds in CFGGuider and uuids to transformer_options to allow uniquely identifying conds in batches during sampling * Fixed models not being unloaded properly due to current_patcher reference; the current ComfyUI model cleanup code requires that nothing else has a reference to the ModelPatcher instances * Fixed default conds not respecting hook keyframes, made keyframes not reset cache when strength is unchanged, fixed Cond Set Default Combine throwing error, fixed model-as-lora throwing error during calculate_weight after a recent ComfyUI update, small refactoring/scaffolding changes for hooks * Changed CreateHookModelAsLoraTest to be the new CreateHookModelAsLora, rename old ones as 'direct' and will be removed prior to merge * Added initial support within CLIP Text Encode (Prompt) node for scheduling weight hook CLIP strength via clip_start_percent/clip_end_percent on conds, added schedule_clip toggle to Set CLIP Hooks node, small cleanup/fixes * Fix range check in get_hooks_for_clip_schedule so that proper keyframes get assigned to corresponding ranges * Optimized CLIP hook scheduling to treat same strength as same keyframe * Less fragile memory management. * Make encode_from_tokens_scheduled call cleaner, rollback change in model_patcher.py for hook_patches_backup dict * Fix issue. * Remove useless function. * Prevent and detect some types of memory leaks. * Run garbage collector when switching workflow if needed. * Moved WrappersMP/CallbacksMP/WrapperExecutor to patcher_extension.py * Refactored code to store wrappers and callbacks in transformer_options, added apply_model and diffusion_model.forward wrappers * Fix issue. * Refactored hooks in calc_cond_batch to be part of get_area_and_mult tuple, added extra_hooks to ControlBase to allow custom controlnets w/ hooks, small cleanup and renaming * Fixed inconsistency of results when schedule_clip is set to False, small renaming/typo fixing, added initial support for ControlNet extra_hooks to work in tandem with normal cond hooks, initial work on calc_cond_batch merging all subdicts in returned transformer_options * Modified callbacks and wrappers so that unregistered types can be used, allowing custom_nodes to have their own unique callbacks/wrappers if desired * Updated different hook types to reflect actual progress of implementation, initial scaffolding for working WrapperHook functionality * Fixed existing weight hook_patches (pre-registered) not working properly for CLIP * Removed Register/Direct hook nodes since they were present only for testing, removed diff-related weight hook calculation as improved_memory removes unload_model_clones and using sample time registered hooks is less hacky * Added clip scheduling support to all other native ComfyUI text encoding nodes (sdxl, flux, hunyuan, sd3) * Made WrapperHook functional, added another wrapper/callback getter, added ON_DETACH callback to ModelPatcher * Made opt_hooks append by default instead of replace, renamed comfy.hooks set functions to be more accurate * Added apply_to_conds to Set CLIP Hooks, modified relevant code to allow text encoding to automatically apply hooks to output conds when apply_to_conds is set to True * Fix cached_hook_patches not respecting target_device/memory_counter results * Fixed issue with setting weights from hooks instead of copying them, added additional memory_counter check when caching hook patches * Remove unnecessary torch.no_grad calls for hook patches * Increased MemoryCounter minimum memory to leave free by *2 until a better way to get inference memory estimate of currently loaded models exists * For encode_from_tokens_scheduled, allow start_percent and end_percent in add_dict to limit which scheduled conds get encoded for optimization purposes * Removed a .to call on results of calculate_weight in patch_hook_weight_to_device that was screwing up the intermediate results for fp8 prior to being passed into stochastic_rounding call * Made encode_from_tokens_scheduled work when no hooks are set on patcher * Small cleanup of comments * Turn off hook patch caching when only 1 hook present in sampling, replace some current_hook = None with calls to self.patch_hooks(None) instead to avoid a potential edge case * On Cond/Cond Pair nodes, removed opt_ prefix from optional inputs * Allow both FLOATS and FLOAT for floats_strength input * Revert change, does not work * Made patch_hook_weight_to_device respect set_func and convert_func * Make discard_model_sampling True by default * Add changes manually from 'master' so merge conflict resolution goes more smoothly * Cleaned up text encode nodes with just a single clip.encode_from_tokens_scheduled call * Make sure encode_from_tokens_scheduled will respect use_clip_schedule on clip * Made nodes in nodes_hooks be marked as experimental (beta) * Add get_nested_additional_models for cases where additional_models could have their own additional_models, and add robustness for circular additional_models references * Made finalize_default_conds area math consistent with other sampling code * Changed 'opt_hooks' input of Cond/Cond Pair Set Default Combine nodes to 'hooks' * Remove a couple old TODO's and a no longer necessary workaround
2024-12-03 03:51:02 +08:00
return (clip.encode_from_tokens_scheduled(tokens), )
2024-12-28 07:02:21 +08:00
class ConditioningCombine:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning_1": ("CONDITIONING", ), "conditioning_2": ("CONDITIONING", )}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "combine"
2023-01-27 01:23:15 +08:00
CATEGORY = "conditioning"
def combine(self, conditioning_1, conditioning_2):
return (conditioning_1 + conditioning_2, )
class ConditioningAverage :
@classmethod
def INPUT_TYPES(s):
2023-05-01 05:28:55 +08:00
return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ),
"conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "addWeighted"
CATEGORY = "conditioning"
2023-05-01 05:28:55 +08:00
def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength):
out = []
2023-05-01 05:28:55 +08:00
if len(conditioning_from) > 1:
2024-03-11 12:56:41 +08:00
logging.warning("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
2023-05-01 05:28:55 +08:00
cond_from = conditioning_from[0][0]
pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
2023-05-01 05:28:55 +08:00
for i in range(len(conditioning_to)):
t1 = conditioning_to[i][0]
pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
2023-05-01 05:28:55 +08:00
t0 = cond_from[:,:t1.shape[1]]
if t0.shape[1] < t1.shape[1]:
t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1)
tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
t_to = conditioning_to[i][1].copy()
if pooled_output_from is not None and pooled_output_to is not None:
t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
elif pooled_output_from is not None:
t_to["pooled_output"] = pooled_output_from
n = [tw, t_to]
out.append(n)
return (out, )
2023-07-06 05:40:22 +08:00
class ConditioningConcat:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"conditioning_to": ("CONDITIONING",),
"conditioning_from": ("CONDITIONING",),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "concat"
CATEGORY = "conditioning"
2023-07-06 05:40:22 +08:00
def concat(self, conditioning_to, conditioning_from):
out = []
if len(conditioning_from) > 1:
2024-03-11 12:56:41 +08:00
logging.warning("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
2023-07-06 05:40:22 +08:00
cond_from = conditioning_from[0][0]
for i in range(len(conditioning_to)):
t1 = conditioning_to[i][0]
tw = torch.cat((t1, cond_from),1)
n = [tw, conditioning_to[i][1].copy()]
out.append(n)
return (out, )
class ConditioningSetArea:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
2023-05-03 02:16:27 +08:00
"width": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 64, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
2023-01-27 01:23:15 +08:00
CATEGORY = "conditioning"
def append(self, conditioning, width, height, x, y, strength):
2024-04-08 02:27:40 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"area": (height // 8, width // 8, y // 8, x // 8),
"strength": strength,
"set_area_to_bounds": False})
return (c, )
2023-01-03 14:53:32 +08:00
class ConditioningSetAreaPercentage:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
"height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
"x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
"y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
def append(self, conditioning, width, height, x, y, strength):
2024-04-08 02:27:40 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"area": ("percentage", height, width, y, x),
"strength": strength,
"set_area_to_bounds": False})
return (c, )
class ConditioningSetAreaStrength:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
def append(self, conditioning, strength):
2024-04-08 02:27:40 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"strength": strength})
return (c, )
class ConditioningSetMask:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"mask": ("MASK", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"set_cond_area": (["default", "mask bounds"],),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
def append(self, conditioning, mask, set_cond_area, strength):
set_area_to_bounds = False
if set_cond_area != "default":
set_area_to_bounds = True
if len(mask.shape) < 3:
mask = mask.unsqueeze(0)
2024-04-08 02:27:40 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"mask": mask,
"set_area_to_bounds": set_area_to_bounds,
"mask_strength": strength})
return (c, )
class ConditioningZeroOut:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", )}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "zero_out"
CATEGORY = "advanced/conditioning"
def zero_out(self, conditioning):
c = []
for t in conditioning:
d = t[1].copy()
pooled_output = d.get("pooled_output", None)
if pooled_output is not None:
d["pooled_output"] = torch.zeros_like(pooled_output)
conditioning_lyrics = d.get("conditioning_lyrics", None)
if conditioning_lyrics is not None:
d["conditioning_lyrics"] = torch.zeros_like(conditioning_lyrics)
n = [torch.zeros_like(t[0]), d]
c.append(n)
return (c, )
class ConditioningSetTimestepRange:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "set_range"
CATEGORY = "advanced/conditioning"
def set_range(self, conditioning, start, end):
2024-04-08 02:40:43 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"start_percent": start,
"end_percent": end})
return (c, )
2023-01-03 14:53:32 +08:00
class VAEDecode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"samples": ("LATENT", {"tooltip": "The latent to be decoded."}),
"vae": ("VAE", {"tooltip": "The VAE model used for decoding the latent."})
}
}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("IMAGE",)
OUTPUT_TOOLTIPS = ("The decoded image.",)
2023-01-03 14:53:32 +08:00
FUNCTION = "decode"
2023-01-27 01:23:15 +08:00
CATEGORY = "latent"
DESCRIPTION = "Decodes latent images back into pixel space images."
2023-01-27 01:23:15 +08:00
2023-01-03 14:53:32 +08:00
def decode(self, vae, samples):
images = vae.decode(samples["samples"])
if len(images.shape) == 5: #Combine batches
images = images.reshape(-1, images.shape[-3], images.shape[-2], images.shape[-1])
return (images, )
2023-01-03 14:53:32 +08:00
class VAEDecodeTiled:
@classmethod
def INPUT_TYPES(s):
return {"required": {"samples": ("LATENT", ), "vae": ("VAE", ),
"tile_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 32}),
"overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}),
"temporal_size": ("INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to decode at a time."}),
"temporal_overlap": ("INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap."}),
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "decode"
CATEGORY = "_for_testing"
def decode(self, vae, samples, tile_size, overlap=64, temporal_size=64, temporal_overlap=8):
if tile_size < overlap * 4:
overlap = tile_size // 4
if temporal_size < temporal_overlap * 2:
temporal_overlap = temporal_overlap // 2
temporal_compression = vae.temporal_compression_decode()
if temporal_compression is not None:
temporal_size = max(2, temporal_size // temporal_compression)
temporal_overlap = max(1, min(temporal_size // 2, temporal_overlap // temporal_compression))
else:
temporal_size = None
temporal_overlap = None
compression = vae.spacial_compression_decode()
images = vae.decode_tiled(samples["samples"], tile_x=tile_size // compression, tile_y=tile_size // compression, overlap=overlap // compression, tile_t=temporal_size, overlap_t=temporal_overlap)
if len(images.shape) == 5: #Combine batches
images = images.reshape(-1, images.shape[-3], images.shape[-2], images.shape[-1])
return (images, )
2023-01-03 14:53:32 +08:00
class VAEEncode:
@classmethod
def INPUT_TYPES(s):
return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", )}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "encode"
2023-01-27 01:23:15 +08:00
CATEGORY = "latent"
2023-05-03 02:16:27 +08:00
def encode(self, vae, pixels):
t = vae.encode(pixels[:,:,:,:3])
return ({"samples":t}, )
2023-01-03 14:53:32 +08:00
2023-03-12 04:28:15 +08:00
class VAEEncodeTiled:
@classmethod
def INPUT_TYPES(s):
return {"required": {"pixels": ("IMAGE", ), "vae": ("VAE", ),
"tile_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}),
"overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}),
"temporal_size": ("INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to encode at a time."}),
"temporal_overlap": ("INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap."}),
}}
2023-03-12 04:28:15 +08:00
RETURN_TYPES = ("LATENT",)
FUNCTION = "encode"
CATEGORY = "_for_testing"
def encode(self, vae, pixels, tile_size, overlap, temporal_size=64, temporal_overlap=8):
t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap)
return ({"samples": t}, )
2023-05-03 02:16:27 +08:00
class VAEEncodeForInpaint:
@classmethod
def INPUT_TYPES(s):
return {"required": { "pixels": ("IMAGE", ), "vae": ("VAE", ), "mask": ("MASK", ), "grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "encode"
CATEGORY = "latent/inpaint"
def encode(self, vae, pixels, mask, grow_mask_by=6):
x = (pixels.shape[1] // vae.downscale_ratio) * vae.downscale_ratio
y = (pixels.shape[2] // vae.downscale_ratio) * vae.downscale_ratio
2023-04-25 13:12:40 +08:00
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
2023-02-28 01:02:23 +08:00
pixels = pixels.clone()
if pixels.shape[1] != x or pixels.shape[2] != y:
x_offset = (pixels.shape[1] % vae.downscale_ratio) // 2
y_offset = (pixels.shape[2] % vae.downscale_ratio) // 2
2023-05-03 02:16:27 +08:00
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
2023-02-28 01:02:23 +08:00
#grow mask by a few pixels to keep things seamless in latent space
if grow_mask_by == 0:
mask_erosion = mask
else:
kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
padding = math.ceil((grow_mask_by - 1) / 2)
mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=padding), 0, 1)
m = (1.0 - mask.round()).squeeze(1)
for i in range(3):
pixels[:,:,:,i] -= 0.5
2023-02-28 01:02:23 +08:00
pixels[:,:,:,i] *= m
pixels[:,:,:,i] += 0.5
t = vae.encode(pixels)
2023-04-25 13:12:40 +08:00
return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
2023-01-03 14:53:32 +08:00
class InpaintModelConditioning:
@classmethod
def INPUT_TYPES(s):
return {"required": {"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"vae": ("VAE", ),
"pixels": ("IMAGE", ),
"mask": ("MASK", ),
2024-11-20 04:31:09 +08:00
"noise_mask": ("BOOLEAN", {"default": True, "tooltip": "Add a noise mask to the latent so sampling will only happen within the mask. Might improve results or completely break things depending on the model."}),
}}
RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
RETURN_NAMES = ("positive", "negative", "latent")
FUNCTION = "encode"
CATEGORY = "conditioning/inpaint"
def encode(self, positive, negative, pixels, vae, mask, noise_mask=True):
x = (pixels.shape[1] // 8) * 8
y = (pixels.shape[2] // 8) * 8
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
orig_pixels = pixels
pixels = orig_pixels.clone()
if pixels.shape[1] != x or pixels.shape[2] != y:
x_offset = (pixels.shape[1] % 8) // 2
y_offset = (pixels.shape[2] % 8) // 2
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
m = (1.0 - mask.round()).squeeze(1)
for i in range(3):
pixels[:,:,:,i] -= 0.5
pixels[:,:,:,i] *= m
pixels[:,:,:,i] += 0.5
concat_latent = vae.encode(pixels)
orig_latent = vae.encode(orig_pixels)
out_latent = {}
out_latent["samples"] = orig_latent
2024-11-20 04:31:09 +08:00
if noise_mask:
out_latent["noise_mask"] = mask
out = []
for conditioning in [positive, negative]:
2024-04-08 02:40:43 +08:00
c = node_helpers.conditioning_set_values(conditioning, {"concat_latent_image": concat_latent,
"concat_mask": mask})
out.append(c)
return (out[0], out[1], out_latent)
class SaveLatent:
def __init__(self):
self.output_dir = folder_paths.get_output_directory()
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT", ),
"filename_prefix": ("STRING", {"default": "latents/ComfyUI"})},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ()
FUNCTION = "save"
OUTPUT_NODE = True
CATEGORY = "_for_testing"
def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
# support save metadata for latent sharing
prompt_info = ""
if prompt is not None:
prompt_info = json.dumps(prompt)
metadata = None
if not args.disable_metadata:
metadata = {"prompt": prompt_info}
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata[x] = json.dumps(extra_pnginfo[x])
file = f"{filename}_{counter:05}_.latent"
results: list[FileLocator] = []
results.append({
"filename": file,
"subfolder": subfolder,
"type": "output"
})
file = os.path.join(full_output_folder, file)
2023-05-18 11:04:40 +08:00
output = {}
2025-03-12 03:07:00 +08:00
output["latent_tensor"] = samples["samples"].contiguous()
output["latent_format_version_0"] = torch.tensor([])
2023-05-18 11:04:40 +08:00
comfy.utils.save_torch_file(output, file, metadata=metadata)
return { "ui": { "latents": results } }
class LoadLatent:
@classmethod
def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")]
return {"required": {"latent": [sorted(files), ]}, }
CATEGORY = "_for_testing"
RETURN_TYPES = ("LATENT", )
FUNCTION = "load"
def load(self, latent):
latent_path = folder_paths.get_annotated_filepath(latent)
latent = safetensors.torch.load_file(latent_path, device="cpu")
multiplier = 1.0
if "latent_format_version_0" not in latent:
multiplier = 1.0 / 0.18215
samples = {"samples": latent["latent_tensor"].float() * multiplier}
2023-05-18 11:04:40 +08:00
return (samples, )
@classmethod
def IS_CHANGED(s, latent):
image_path = folder_paths.get_annotated_filepath(latent)
m = hashlib.sha256()
with open(image_path, 'rb') as f:
m.update(f.read())
return m.digest().hex()
@classmethod
def VALIDATE_INPUTS(s, latent):
if not folder_paths.exists_annotated_filepath(latent):
return "Invalid latent file: {}".format(latent)
return True
2023-01-03 14:53:32 +08:00
class CheckpointLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "load_checkpoint"
2023-04-05 10:48:11 +08:00
CATEGORY = "advanced/loaders"
2024-09-13 08:27:07 +08:00
DEPRECATED = True
2023-01-27 01:23:15 +08:00
2024-06-07 02:49:45 +08:00
def load_checkpoint(self, config_name, ckpt_name):
config_path = folder_paths.get_full_path("configs", config_name)
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
2023-03-18 15:08:43 +08:00
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
2023-01-03 14:53:32 +08:00
class CheckpointLoaderSimple:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
}
}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
OUTPUT_TOOLTIPS = ("The model used for denoising latents.",
"The CLIP model used for encoding text prompts.",
"The VAE model used for encoding and decoding images to and from latent space.")
FUNCTION = "load_checkpoint"
CATEGORY = "loaders"
DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents."
2024-06-07 02:49:45 +08:00
def load_checkpoint(self, ckpt_name):
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
2023-03-18 15:08:43 +08:00
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out[:3]
2023-04-06 14:57:31 +08:00
class DiffusersLoader:
@classmethod
def INPUT_TYPES(cls):
paths = []
2023-04-07 13:02:26 +08:00
for search_path in folder_paths.get_folder_paths("diffusers"):
2023-04-07 12:48:58 +08:00
if os.path.exists(search_path):
for root, subdir, files in os.walk(search_path, followlinks=True):
if "model_index.json" in files:
paths.append(os.path.relpath(root, start=search_path))
return {"required": {"model_path": (paths,), }}
2023-04-06 14:57:31 +08:00
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "load_checkpoint"
CATEGORY = "advanced/loaders/deprecated"
2023-04-06 14:57:31 +08:00
def load_checkpoint(self, model_path, output_vae=True, output_clip=True):
2023-04-07 13:02:26 +08:00
for search_path in folder_paths.get_folder_paths("diffusers"):
if os.path.exists(search_path):
path = os.path.join(search_path, model_path)
if os.path.exists(path):
model_path = path
2023-04-07 13:02:26 +08:00
break
2023-08-31 00:55:07 +08:00
return comfy.diffusers_load.load_diffusers(model_path, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
2023-04-06 14:57:31 +08:00
class unCLIPCheckpointLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
FUNCTION = "load_checkpoint"
2023-04-05 10:48:11 +08:00
CATEGORY = "loaders"
def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out
class CLIPSetLastLayer:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip": ("CLIP", ),
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "set_last_layer"
CATEGORY = "conditioning"
def set_last_layer(self, clip, stop_at_clip_layer):
clip = clip.clone()
clip.clip_layer(stop_at_clip_layer)
return (clip,)
class LoraLoader:
def __init__(self):
self.loaded_lora = None
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
}
}
2024-12-28 07:02:21 +08:00
RETURN_TYPES = ("MODEL", "CLIP")
OUTPUT_TOOLTIPS = ("The modified diffusion model.", "The modified CLIP model.")
FUNCTION = "load_lora"
CATEGORY = "loaders"
DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together."
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
if strength_model == 0 and strength_clip == 0:
return (model, clip)
lora_path = folder_paths.get_full_path_or_raise("loras", lora_name)
lora = None
if self.loaded_lora is not None:
if self.loaded_lora[0] == lora_path:
lora = self.loaded_lora[1]
else:
self.loaded_lora = None
if lora is None:
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
self.loaded_lora = (lora_path, lora)
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
return (model_lora, clip_lora)
class LoraLoaderModelOnly(LoraLoader):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"lora_name": (folder_paths.get_filename_list("loras"), ),
2024-04-24 01:07:39 +08:00
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "load_lora_model_only"
def load_lora_model_only(self, model, lora_name, strength_model):
return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)
class LoadLora:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
}
}
RETURN_TYPES = ("MODEL", "CLIP")
OUTPUT_TOOLTIPS = ("The modified diffusion model.", "The modified CLIP model.")
FUNCTION = "load_lora"
CATEGORY = "loaders"
DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together."
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
if strength_model == 0 and strength_clip == 0:
return (model, clip)
from comfy_execution.graph_utils import GraphBuilder
g = GraphBuilder()
# create the nodes
load_weights = g.node("LoadLoraWeights", lora_name=lora_name)
apply_weights = g.node("ApplyLoraWeights", model=model, clip=clip, lora=load_weights.out(0), strength_model=strength_model, strength_clip=strength_clip)
return {
"result": (apply_weights.out(0), apply_weights.out(1)),
"expand": g.finalize(),
}
class LoadLoraModelOnly(LoadLora):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"lora_name": (folder_paths.get_filename_list("loras"), ),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "load_lora_model_only"
def load_lora_model_only(self, model, lora_name, strength_model):
if strength_model == 0:
return (model,)
from comfy_execution.graph_utils import GraphBuilder
g = GraphBuilder()
load_weights = g.node("LoadLoraWeights", lora_name=lora_name)
apply_weights = g.node("ApplyLoraWeights", model=model, clip=None, lora=load_weights.out(0), strength_model=strength_model, strength_clip=0)
return {
"result": (apply_weights.out(0),),
"expand": g.finalize(),
}
class LoadLoraWeights:
@classmethod
def INPUT_TYPES(s):
return {"required": { "lora_name": (folder_paths.get_filename_list("loras"), ),
}}
RETURN_TYPES = ("LORA_MODEL",)
FUNCTION = "load_lora_weights"
DEPRECATED = False
def load_lora_weights(self, lora_name):
lora_path = folder_paths.get_full_path_or_raise("loras", lora_name)
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
return (lora,)
class ApplyLoraWeights:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"clip": ("CLIP",),
"lora": ("LORA_MODEL",),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
}}
RETURN_TYPES = ("MODEL", "CLIP")
FUNCTION = "apply_lora_weights"
DEPRECATED = False
def apply_lora_weights(self, model, clip, lora, strength_model: float, strength_clip: float):
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
return (model_lora, clip_lora)
class ApplyLoraWeightsModelOnly(ApplyLoraWeights):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"lora": ("LORA_MODEL",),
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "apply_lora_weights_model_only"
DEPRECATED = False
def apply_lora_weights_model_only(self, model, lora, strength_model: float):
return (self.apply_lora_weights(model, None, lora, strength_model, 0)[0],)
2023-01-03 14:53:32 +08:00
class VAELoader:
@staticmethod
def vae_list():
vaes = folder_paths.get_filename_list("vae")
approx_vaes = folder_paths.get_filename_list("vae_approx")
sdxl_taesd_enc = False
sdxl_taesd_dec = False
sd1_taesd_enc = False
sd1_taesd_dec = False
2024-06-16 14:03:53 +08:00
sd3_taesd_enc = False
sd3_taesd_dec = False
f1_taesd_enc = False
f1_taesd_dec = False
for v in approx_vaes:
if v.startswith("taesd_decoder."):
sd1_taesd_dec = True
elif v.startswith("taesd_encoder."):
sd1_taesd_enc = True
elif v.startswith("taesdxl_decoder."):
sdxl_taesd_dec = True
elif v.startswith("taesdxl_encoder."):
sdxl_taesd_enc = True
2024-06-16 14:03:53 +08:00
elif v.startswith("taesd3_decoder."):
sd3_taesd_dec = True
elif v.startswith("taesd3_encoder."):
sd3_taesd_enc = True
elif v.startswith("taef1_encoder."):
f1_taesd_dec = True
elif v.startswith("taef1_decoder."):
f1_taesd_enc = True
if sd1_taesd_dec and sd1_taesd_enc:
vaes.append("taesd")
if sdxl_taesd_dec and sdxl_taesd_enc:
vaes.append("taesdxl")
2024-06-16 14:03:53 +08:00
if sd3_taesd_dec and sd3_taesd_enc:
vaes.append("taesd3")
if f1_taesd_dec and f1_taesd_enc:
vaes.append("taef1")
vaes.append("pixel_space")
return vaes
@staticmethod
def load_taesd(name):
sd = {}
approx_vaes = folder_paths.get_filename_list("vae_approx")
encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes))
decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes))
enc = comfy.utils.load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", encoder))
for k in enc:
sd["taesd_encoder.{}".format(k)] = enc[k]
dec = comfy.utils.load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", decoder))
for k in dec:
sd["taesd_decoder.{}".format(k)] = dec[k]
if name == "taesd":
sd["vae_scale"] = torch.tensor(0.18215)
sd["vae_shift"] = torch.tensor(0.0)
elif name == "taesdxl":
sd["vae_scale"] = torch.tensor(0.13025)
sd["vae_shift"] = torch.tensor(0.0)
2024-06-16 14:03:53 +08:00
elif name == "taesd3":
sd["vae_scale"] = torch.tensor(1.5305)
sd["vae_shift"] = torch.tensor(0.0609)
elif name == "taef1":
sd["vae_scale"] = torch.tensor(0.3611)
sd["vae_shift"] = torch.tensor(0.1159)
return sd
2023-01-03 14:53:32 +08:00
@classmethod
def INPUT_TYPES(s):
return {"required": { "vae_name": (s.vae_list(), )}}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("VAE",)
FUNCTION = "load_vae"
2023-01-27 01:23:15 +08:00
CATEGORY = "loaders"
2023-01-03 14:53:32 +08:00
#TODO: scale factor?
def load_vae(self, vae_name):
if vae_name == "pixel_space":
sd = {}
sd["pixel_space_vae"] = torch.tensor(1.0)
Add support for Chroma Radiance (#9682) * Initial Chroma Radiance support * Minor Chroma Radiance cleanups * Update Radiance nodes to ensure latents/images are on the intermediate device * Fix Chroma Radiance memory estimation. * Increase Chroma Radiance memory usage factor * Increase Chroma Radiance memory usage factor once again * Ensure images are multiples of 16 for Chroma Radiance Add batch dimension and fix channels when necessary in ChromaRadianceImageToLatent node * Tile Chroma Radiance NeRF to reduce memory consumption, update memory usage factor * Update Radiance to support conv nerf final head type. * Allow setting NeRF embedder dtype for Radiance Bump Radiance nerf tile size to 32 Support EasyCache/LazyCache on Radiance (maybe) * Add ChromaRadianceStubVAE node * Crop Radiance image inputs to multiples of 16 instead of erroring to be in line with existing VAE behavior * Convert Chroma Radiance nodes to V3 schema. * Add ChromaRadianceOptions node and backend support. Cleanups/refactoring to reduce code duplication with Chroma. * Fix overriding the NeRF embedder dtype for Chroma Radiance * Minor Chroma Radiance cleanups * Move Chroma Radiance to its own directory in ldm Minor code cleanups and tooltip improvements * Fix Chroma Radiance embedder dtype overriding * Remove Radiance dynamic nerf_embedder dtype override feature * Unbork Radiance NeRF embedder init * Remove Chroma Radiance image conversion and stub VAE nodes Add a chroma_radiance option to the VAELoader builtin node which uses comfy.sd.PixelspaceConversionVAE Add a PixelspaceConversionVAE to comfy.sd for converting BHWC 0..1 <-> BCHW -1..1
2025-09-14 05:58:43 +08:00
elif vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]:
sd = self.load_taesd(vae_name)
else:
vae_path = folder_paths.get_full_path_or_raise("vae", vae_name)
sd = comfy.utils.load_torch_file(vae_path)
vae = comfy.sd.VAE(sd=sd)
vae.throw_exception_if_invalid()
2023-01-03 14:53:32 +08:00
return (vae,)
2023-02-16 23:38:08 +08:00
class ControlNetLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
2023-02-16 23:38:08 +08:00
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
CATEGORY = "loaders"
def load_controlnet(self, control_net_name):
controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name)
controlnet = comfy.controlnet.load_controlnet(controlnet_path)
if controlnet is None:
raise RuntimeError("ERROR: controlnet file is invalid and does not contain a valid controlnet model.")
2023-02-16 23:38:08 +08:00
return (controlnet,)
2023-02-23 12:22:03 +08:00
class DiffControlNetLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
2023-02-23 12:22:03 +08:00
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
CATEGORY = "loaders"
def load_controlnet(self, model, control_net_name):
controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name)
controlnet = comfy.controlnet.load_controlnet(controlnet_path, model)
2023-02-23 12:22:03 +08:00
return (controlnet,)
2023-02-16 23:38:08 +08:00
class ControlNetApply:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01})
}}
2023-02-16 23:38:08 +08:00
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_controlnet"
DEPRECATED = True
CATEGORY = "conditioning/controlnet"
2023-02-16 23:38:08 +08:00
def apply_controlnet(self, conditioning, control_net, image, strength):
if strength == 0:
return (conditioning, )
2023-02-16 23:38:08 +08:00
c = []
control_hint = image.movedim(-1,1)
for t in conditioning:
n = [t[0], t[1].copy()]
c_net = control_net.copy().set_cond_hint(control_hint, strength)
if 'control' in t[1]:
c_net.set_previous_controlnet(t[1]['control'])
n[1]['control'] = c_net
n[1]['control_apply_to_uncond'] = True
2023-02-16 23:38:08 +08:00
c.append(n)
return (c, )
class ControlNetApplyAdvanced:
@classmethod
def INPUT_TYPES(s):
return {"required": {"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
},
"optional": {"vae": ("VAE", ),
}
}
RETURN_TYPES = ("CONDITIONING","CONDITIONING")
RETURN_NAMES = ("positive", "negative")
FUNCTION = "apply_controlnet"
CATEGORY = "conditioning/controlnet"
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), vae=vae, extra_concat=extra_concat)
c_net.set_previous_controlnet(prev_cnet)
cnets[prev_cnet] = c_net
d['control'] = c_net
d['control_apply_to_uncond'] = False
n = [t[0], d]
c.append(n)
out.append(c)
return (out[0], out[1])
class UNETLoader:
@classmethod
def INPUT_TYPES(s):
2024-08-18 09:28:36 +08:00
return {"required": { "unet_name": (folder_paths.get_filename_list("diffusion_models"), ),
"weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2"],)
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "load_unet"
CATEGORY = "advanced/loaders"
def load_unet(self, unet_name, weight_dtype):
model_options = {}
2024-08-02 10:19:53 +08:00
if weight_dtype == "fp8_e4m3fn":
model_options["dtype"] = torch.float8_e4m3fn
elif weight_dtype == "fp8_e4m3fn_fast":
model_options["dtype"] = torch.float8_e4m3fn
model_options["fp8_optimizations"] = True
2024-08-02 10:19:53 +08:00
elif weight_dtype == "fp8_e5m2":
model_options["dtype"] = torch.float8_e5m2
2024-08-02 10:19:53 +08:00
unet_path = folder_paths.get_full_path_or_raise("diffusion_models", unet_name)
model = comfy.sd.load_diffusion_model(unet_path, model_options=model_options)
return (model,)
class CLIPLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image"], ),
},
"optional": {
"device": (["default", "cpu"], {"advanced": True}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
CATEGORY = "advanced/loaders"
2025-06-26 07:35:57 +08:00
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B"
def load_clip(self, clip_name, type="stable_diffusion", device="default"):
clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION)
2024-02-17 02:29:04 +08:00
model_options = {}
if device == "cpu":
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")
clip_path = folder_paths.get_full_path_or_raise("text_encoders", clip_name)
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
return (clip,)
class DualCLIPLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
HunyuanVideo 1.5 (#10819) * init * update * Update model.py * Update model.py * remove print * Fix text encoding * Prevent empty negative prompt Really doesn't work otherwise * fp16 works * I2V * Update model_base.py * Update nodes_hunyuan.py * Better latent rgb factors * Use the correct sigclip output... * Support HunyuanVideo1.5 SR model * whitespaces... * Proper latent channel count * SR model fixes This also still needs timesteps scheduling based on the noise scale, can be used with two samplers too already * vae_refiner: roll the convolution through temporal Work in progress. Roll the convolution through time using 2-latent-frame chunks and a FIFO queue for the convolution seams. * Support HunyuanVideo15 latent resampler * fix * Some cleanup Co-Authored-By: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Proper hyvid15 I2V channels Co-Authored-By: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Fix TokenRefiner for fp16 Otherwise x.sum has infs, just in case only casting if input is fp16, I don't know if necessary. * Bugfix for the HunyuanVideo15 SR model * vae_refiner: roll the convolution through temporal II Roll the convolution through time using 2-latent-frame chunks and a FIFO queue for the convolution seams. Added support for encoder, lowered to 1 latent frame to save more VRAM, made work for Hunyuan Image 3.0 (as code shared). Fixed names, cleaned up code. * Allow any number of input frames in VAE. * Better VAE encode mem estimation. * Lowvram fix. * Fix hunyuan image 2.1 refiner. * Fix mistake. * Name changes. * Rename. * Whitespace. * Fix. * Fix. --------- Co-authored-by: kijai <40791699+kijai@users.noreply.github.com> Co-authored-by: Rattus <rattus128@gmail.com>
2025-11-21 11:44:43 +08:00
"type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15"], ),
},
"optional": {
"device": (["default", "cpu"], {"advanced": True}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
CATEGORY = "advanced/loaders"
DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama\nhunyuan_image: qwen2.5vl 7b and byt5 small"
def load_clip(self, clip_name1, clip_name2, type, device="default"):
clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION)
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
model_options = {}
if device == "cpu":
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
return (clip,)
class CLIPVisionLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
}}
RETURN_TYPES = ("CLIP_VISION",)
FUNCTION = "load_clip"
CATEGORY = "loaders"
def load_clip(self, clip_name):
clip_path = folder_paths.get_full_path_or_raise("clip_vision", clip_name)
clip_vision = comfy.clip_vision.load(clip_path)
if clip_vision is None:
raise RuntimeError("ERROR: clip vision file is invalid and does not contain a valid vision model.")
return (clip_vision,)
class CLIPVisionEncode:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_vision": ("CLIP_VISION",),
"image": ("IMAGE",),
"crop": (["center", "none"],)
}}
2023-03-06 14:30:17 +08:00
RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
FUNCTION = "encode"
CATEGORY = "conditioning"
def encode(self, clip_vision, image, crop):
crop_image = True
if crop != "center":
crop_image = False
output = clip_vision.encode_image(image, crop=crop_image)
return (output,)
class StyleModelLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
RETURN_TYPES = ("STYLE_MODEL",)
FUNCTION = "load_style_model"
CATEGORY = "loaders"
def load_style_model(self, style_model_name):
style_model_path = folder_paths.get_full_path_or_raise("style_models", style_model_name)
style_model = comfy.sd.load_style_model(style_model_path)
return (style_model,)
class StyleModelApply:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"style_model": ("STYLE_MODEL", ),
"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}),
"strength_type": (["multiply", "attn_bias"], ),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_stylemodel"
2023-03-06 14:30:17 +08:00
CATEGORY = "conditioning/style_model"
def apply_stylemodel(self, conditioning, style_model, clip_vision_output, strength, strength_type):
cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
if strength_type == "multiply":
cond *= strength
n = cond.shape[1]
c_out = []
for t in conditioning:
(txt, keys) = t
keys = keys.copy()
# even if the strength is 1.0 (i.e, no change), if there's already a mask, we have to add to it
if "attention_mask" in keys or (strength_type == "attn_bias" and strength != 1.0):
# math.log raises an error if the argument is zero
# torch.log returns -inf, which is what we want
attn_bias = torch.log(torch.Tensor([strength if strength_type == "attn_bias" else 1.0]))
# get the size of the mask image
mask_ref_size = keys.get("attention_mask_img_shape", (1, 1))
n_ref = mask_ref_size[0] * mask_ref_size[1]
n_txt = txt.shape[1]
# grab the existing mask
mask = keys.get("attention_mask", None)
# create a default mask if it doesn't exist
if mask is None:
mask = torch.zeros((txt.shape[0], n_txt + n_ref, n_txt + n_ref), dtype=torch.float16)
# convert the mask dtype, because it might be boolean
# we want it to be interpreted as a bias
if mask.dtype == torch.bool:
# log(True) = log(1) = 0
# log(False) = log(0) = -inf
mask = torch.log(mask.to(dtype=torch.float16))
# now we make the mask bigger to add space for our new tokens
new_mask = torch.zeros((txt.shape[0], n_txt + n + n_ref, n_txt + n + n_ref), dtype=torch.float16)
# copy over the old mask, in quandrants
new_mask[:, :n_txt, :n_txt] = mask[:, :n_txt, :n_txt]
new_mask[:, :n_txt, n_txt+n:] = mask[:, :n_txt, n_txt:]
new_mask[:, n_txt+n:, :n_txt] = mask[:, n_txt:, :n_txt]
new_mask[:, n_txt+n:, n_txt+n:] = mask[:, n_txt:, n_txt:]
# now fill in the attention bias to our redux tokens
new_mask[:, :n_txt, n_txt:n_txt+n] = attn_bias
new_mask[:, n_txt+n:, n_txt:n_txt+n] = attn_bias
keys["attention_mask"] = new_mask.to(txt.device)
keys["attention_mask_img_shape"] = mask_ref_size
c_out.append([torch.cat((txt, cond), dim=1), keys])
return (c_out,)
class unCLIPConditioning:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
"strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
"noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_adm"
2023-04-05 10:48:11 +08:00
CATEGORY = "conditioning"
def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation):
if strength == 0:
return (conditioning, )
c = node_helpers.conditioning_set_values(conditioning, {"unclip_conditioning": [{"clip_vision_output": clip_vision_output, "strength": strength, "noise_augmentation": noise_augmentation}]}, append=True)
return (c, )
2023-04-19 21:36:19 +08:00
class GLIGENLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}
RETURN_TYPES = ("GLIGEN",)
FUNCTION = "load_gligen"
2023-04-21 05:30:10 +08:00
CATEGORY = "loaders"
2023-04-19 21:36:19 +08:00
def load_gligen(self, gligen_name):
gligen_path = folder_paths.get_full_path_or_raise("gligen", gligen_name)
2023-04-19 21:36:19 +08:00
gligen = comfy.sd.load_gligen(gligen_path)
return (gligen,)
class GLIGENTextBoxApply:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning_to": ("CONDITIONING", ),
"clip": ("CLIP", ),
"gligen_textbox_model": ("GLIGEN", ),
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}),
2023-04-19 21:36:19 +08:00
"width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
2023-04-21 05:30:10 +08:00
CATEGORY = "conditioning/gligen"
2023-04-19 21:36:19 +08:00
def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
c = []
cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled="unprojected")
2023-04-19 21:36:19 +08:00
for t in conditioning_to:
n = [t[0], t[1].copy()]
position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
prev = []
if "gligen" in n[1]:
prev = n[1]['gligen'][2]
n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
c.append(n)
return (c, )
2023-01-03 14:53:32 +08:00
class EmptyLatentImage:
def __init__(self):
self.device = comfy.model_management.intermediate_device()
2023-01-03 14:53:32 +08:00
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The width of the latent images in pixels."}),
"height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The height of the latent images in pixels."}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."})
}
}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("LATENT",)
OUTPUT_TOOLTIPS = ("The empty latent image batch.",)
2023-01-03 14:53:32 +08:00
FUNCTION = "generate"
2023-01-27 01:23:15 +08:00
CATEGORY = "latent"
DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling."
2023-01-27 01:23:15 +08:00
2023-01-03 14:53:32 +08:00
def generate(self, width, height, batch_size=1):
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
return ({"samples":latent}, )
2023-01-03 14:53:32 +08:00
2023-02-16 23:38:08 +08:00
class LatentFromBatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
"length": ("INT", {"default": 1, "min": 1, "max": 64}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "frombatch"
CATEGORY = "latent/batch"
def frombatch(self, samples, batch_index, length):
s = samples.copy()
s_in = samples["samples"]
batch_index = min(s_in.shape[0] - 1, batch_index)
length = min(s_in.shape[0] - batch_index, length)
s["samples"] = s_in[batch_index:batch_index + length].clone()
if "noise_mask" in samples:
masks = samples["noise_mask"]
if masks.shape[0] == 1:
s["noise_mask"] = masks.clone()
else:
if masks.shape[0] < s_in.shape[0]:
masks = masks.repeat(math.ceil(s_in.shape[0] / masks.shape[0]), 1, 1, 1)[:s_in.shape[0]]
s["noise_mask"] = masks[batch_index:batch_index + length].clone()
if "batch_index" not in s:
s["batch_index"] = [x for x in range(batch_index, batch_index+length)]
else:
s["batch_index"] = samples["batch_index"][batch_index:batch_index + length]
return (s,)
2024-12-28 07:02:21 +08:00
class RepeatLatentBatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"amount": ("INT", {"default": 1, "min": 1, "max": 64}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "repeat"
CATEGORY = "latent/batch"
def repeat(self, samples, amount):
s = samples.copy()
s_in = samples["samples"]
2024-12-28 07:02:21 +08:00
s["samples"] = s_in.repeat((amount,) + ((1,) * (s_in.ndim - 1)))
if "noise_mask" in samples and samples["noise_mask"].shape[0] > 1:
masks = samples["noise_mask"]
if masks.shape[0] < s_in.shape[0]:
masks = masks.repeat((math.ceil(s_in.shape[0] / masks.shape[0]),) + ((1,) * (masks.ndim - 1)))[:s_in.shape[0]]
s["noise_mask"] = samples["noise_mask"].repeat((amount,) + ((1,) * (samples["noise_mask"].ndim - 1)))
if "batch_index" in s:
offset = max(s["batch_index"]) - min(s["batch_index"]) + 1
s["batch_index"] = s["batch_index"] + [x + (i * offset) for i in range(1, amount) for x in s["batch_index"]]
return (s,)
2023-01-03 14:53:32 +08:00
class LatentUpscale:
2023-06-17 13:54:33 +08:00
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
crop_methods = ["disabled", "center"]
2023-01-03 14:53:32 +08:00
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
2023-09-24 23:08:54 +08:00
"width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"crop": (s.crop_methods,)}}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("LATENT",)
FUNCTION = "upscale"
CATEGORY = "latent"
def upscale(self, samples, upscale_method, width, height, crop):
2023-09-24 23:08:54 +08:00
if width == 0 and height == 0:
s = samples
else:
s = samples.copy()
if width == 0:
height = max(64, height)
width = max(64, round(samples["samples"].shape[-1] * height / samples["samples"].shape[-2]))
2023-09-24 23:08:54 +08:00
elif height == 0:
width = max(64, width)
height = max(64, round(samples["samples"].shape[-2] * width / samples["samples"].shape[-1]))
2023-09-24 23:08:54 +08:00
else:
width = max(64, width)
height = max(64, height)
s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop)
2023-01-03 14:53:32 +08:00
return (s,)
2023-05-24 00:53:38 +08:00
class LatentUpscaleBy:
2023-06-17 13:54:33 +08:00
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]
2023-05-24 00:53:38 +08:00
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,),
"scale_by": ("FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}),}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "upscale"
CATEGORY = "latent"
def upscale(self, samples, upscale_method, scale_by):
s = samples.copy()
width = round(samples["samples"].shape[-1] * scale_by)
height = round(samples["samples"].shape[-2] * scale_by)
2023-05-24 00:53:38 +08:00
s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
return (s,)
2023-01-31 15:28:07 +08:00
class LatentRotate:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"rotation": (["none", "90 degrees", "180 degrees", "270 degrees"],),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "rotate"
2023-03-06 14:30:17 +08:00
CATEGORY = "latent/transform"
2023-01-31 15:28:07 +08:00
def rotate(self, samples, rotation):
s = samples.copy()
2023-01-31 15:28:07 +08:00
rotate_by = 0
if rotation.startswith("90"):
rotate_by = 1
elif rotation.startswith("180"):
rotate_by = 2
elif rotation.startswith("270"):
rotate_by = 3
s["samples"] = torch.rot90(samples["samples"], k=rotate_by, dims=[3, 2])
2023-01-31 15:28:07 +08:00
return (s,)
2023-01-31 16:28:38 +08:00
class LatentFlip:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"flip_method": (["x-axis: vertically", "y-axis: horizontally"],),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "flip"
2023-03-06 14:30:17 +08:00
CATEGORY = "latent/transform"
2023-01-31 16:28:38 +08:00
def flip(self, samples, flip_method):
s = samples.copy()
2023-01-31 16:28:38 +08:00
if flip_method.startswith("x"):
s["samples"] = torch.flip(samples["samples"], dims=[2])
2023-01-31 16:28:38 +08:00
elif flip_method.startswith("y"):
s["samples"] = torch.flip(samples["samples"], dims=[3])
2023-01-31 16:28:38 +08:00
return (s,)
class LatentComposite:
@classmethod
def INPUT_TYPES(s):
2023-04-14 12:14:35 +08:00
return {"required": { "samples_to": ("LATENT",),
"samples_from": ("LATENT",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "composite"
CATEGORY = "latent"
2023-04-14 12:14:35 +08:00
def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0):
x = x // 8
y = y // 8
feather = feather // 8
2023-04-14 12:14:35 +08:00
samples_out = samples_to.copy()
s = samples_to["samples"].clone()
samples_to = samples_to["samples"]
samples_from = samples_from["samples"]
if feather == 0:
s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
else:
samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x]
mask = torch.ones_like(samples_from)
for t in range(feather):
if y != 0:
mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))
if y + samples_from.shape[2] < samples_to.shape[2]:
mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
if x != 0:
mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
if x + samples_from.shape[3] < samples_to.shape[3]:
mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
rev_mask = torch.ones_like(mask) - mask
s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask
samples_out["samples"] = s
return (samples_out,)
class LatentBlend:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"samples1": ("LATENT",),
"samples2": ("LATENT",),
"blend_factor": ("FLOAT", {
"default": 0.5,
"min": 0,
"max": 1,
"step": 0.01
}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "blend"
CATEGORY = "_for_testing"
def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"):
samples_out = samples1.copy()
samples1 = samples1["samples"]
samples2 = samples2["samples"]
if samples1.shape != samples2.shape:
samples2.permute(0, 3, 1, 2)
samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center')
samples2.permute(0, 2, 3, 1)
samples_blended = self.blend_mode(samples1, samples2, blend_mode)
samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor)
samples_out["samples"] = samples_blended
return (samples_out,)
def blend_mode(self, img1, img2, mode):
if mode == "normal":
return img2
else:
raise ValueError(f"Unsupported blend mode: {mode}")
2023-02-05 04:21:46 +08:00
class LatentCrop:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
2023-05-03 02:16:27 +08:00
"width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
2023-02-05 04:21:46 +08:00
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "crop"
2023-03-06 14:30:17 +08:00
CATEGORY = "latent/transform"
2023-02-05 04:21:46 +08:00
def crop(self, samples, width, height, x, y):
s = samples.copy()
samples = samples['samples']
2023-02-05 04:21:46 +08:00
x = x // 8
y = y // 8
#enfonce minimum size of 64
if x > (samples.shape[3] - 8):
x = samples.shape[3] - 8
if y > (samples.shape[2] - 8):
y = samples.shape[2] - 8
new_height = height // 8
new_width = width // 8
to_x = new_width + x
to_y = new_height + y
s['samples'] = samples[:,:,y:to_y, x:to_x]
2023-02-05 04:21:46 +08:00
return (s,)
class SetLatentNoiseMask:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"mask": ("MASK",),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "set_mask"
CATEGORY = "latent/inpaint"
def set_mask(self, samples, mask):
s = samples.copy()
s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
return (s,)
2023-06-06 02:19:02 +08:00
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image)
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
2023-04-24 18:53:10 +08:00
noise_mask = None
if "noise_mask" in latent:
2023-04-25 11:25:51 +08:00
noise_mask = latent["noise_mask"]
2023-09-28 04:45:22 +08:00
callback = latent_preview.prepare_callback(model, steps)
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
2023-04-25 11:25:51 +08:00
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
out = latent.copy()
out["samples"] = samples
return (out, )
2023-01-03 14:53:32 +08:00
class KSampler:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL", {"tooltip": "The model used for denoising the input latent."}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True, "tooltip": "The random seed used for creating the noise."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "The number of steps used in the denoising process."}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01, "tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."}),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"tooltip": "The scheduler controls how noise is gradually removed to form the image."}),
"positive": ("CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to include in the image."}),
"negative": ("CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to exclude from the image."}),
"latent_image": ("LATENT", {"tooltip": "The latent image to denoise."}),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."}),
}
}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ("LATENT",)
OUTPUT_TOOLTIPS = ("The denoised latent.",)
2023-01-03 14:53:32 +08:00
FUNCTION = "sample"
2023-01-27 01:23:15 +08:00
CATEGORY = "sampling"
DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image."
2023-01-27 01:23:15 +08:00
2023-06-06 02:19:02 +08:00
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
2023-01-03 14:53:32 +08:00
class KSamplerAdvanced:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"add_noise": (["enable", "disable"], ),
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
"return_with_leftover_noise": (["disable", "enable"], ),
2023-06-06 02:19:02 +08:00
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "sample"
CATEGORY = "sampling"
2023-01-03 14:53:32 +08:00
2023-06-06 02:19:02 +08:00
def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0):
force_full_denoise = True
if return_with_leftover_noise == "enable":
force_full_denoise = False
disable_noise = False
if add_noise == "disable":
disable_noise = True
2023-06-06 02:19:02 +08:00
return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
2023-01-03 14:53:32 +08:00
class SaveImage:
def __init__(self):
self.output_dir = folder_paths.get_output_directory()
2023-03-19 19:54:29 +08:00
self.type = "output"
self.prefix_append = ""
self.compress_level = 4
2023-01-03 14:53:32 +08:00
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"images": ("IMAGE", {"tooltip": "The images to save."}),
"filename_prefix": ("STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."})
},
"hidden": {
"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"
},
}
2023-01-03 14:53:32 +08:00
RETURN_TYPES = ()
FUNCTION = "save_images"
OUTPUT_NODE = True
2023-01-27 01:23:15 +08:00
CATEGORY = "image"
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
2023-01-27 01:23:15 +08:00
2023-03-15 03:42:28 +08:00
def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
2023-03-19 19:54:29 +08:00
results = list()
for (batch_number, image) in enumerate(images):
2023-01-03 14:53:32 +08:00
i = 255. * image.cpu().numpy()
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
metadata = None
if not args.disable_metadata:
metadata = PngInfo()
if prompt is not None:
metadata.add_text("prompt", json.dumps(prompt))
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
2023-03-15 18:48:15 +08:00
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
file = f"{filename_with_batch_num}_{counter:05}_.png"
img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
2023-03-19 19:54:29 +08:00
results.append({
"filename": file,
"subfolder": subfolder,
"type": self.type
2023-04-14 04:38:02 +08:00
})
2023-01-24 15:17:18 +08:00
counter += 1
2023-03-21 02:55:28 +08:00
2023-03-19 19:54:29 +08:00
return { "ui": { "images": results } }
2023-01-03 14:53:32 +08:00
2023-03-15 03:28:07 +08:00
class PreviewImage(SaveImage):
def __init__(self):
self.output_dir = folder_paths.get_temp_directory()
2023-03-19 19:54:29 +08:00
self.type = "temp"
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
self.compress_level = 1
2023-03-15 03:28:07 +08:00
@classmethod
def INPUT_TYPES(s):
return {"required":
2023-03-15 03:28:07 +08:00
{"images": ("IMAGE", ), },
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
class LoadImage:
@classmethod
def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
files = folder_paths.filter_files_content_types(files, ["image"])
return {"required":
{"image": (sorted(files), {"image_upload": True})},
}
2023-01-27 01:23:15 +08:00
CATEGORY = "image"
RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "load_image"
def load_image(self, image):
image_path = folder_paths.get_annotated_filepath(image)
2024-12-28 07:02:21 +08:00
img = node_helpers.pillow(Image.open, image_path)
2024-12-28 07:02:21 +08:00
output_images = []
output_masks = []
w, h = None, None
excluded_formats = ['MPO']
2024-12-28 07:02:21 +08:00
for i in ImageSequence.Iterator(img):
i = node_helpers.pillow(ImageOps.exif_transpose, i)
2024-05-07 17:41:06 +08:00
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
image = i.convert("RGB")
if len(output_images) == 0:
w = image.size[0]
h = image.size[1]
2024-12-28 07:02:21 +08:00
if image.size[0] != w or image.size[1] != h:
continue
2024-12-28 07:02:21 +08:00
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
elif i.mode == 'P' and 'transparency' in i.info:
mask = np.array(i.convert('RGBA').getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
output_images.append(image)
output_masks.append(mask.unsqueeze(0))
if len(output_images) > 1 and img.format not in excluded_formats:
output_image = torch.cat(output_images, dim=0)
output_mask = torch.cat(output_masks, dim=0)
else:
output_image = output_images[0]
output_mask = output_masks[0]
return (output_image, output_mask)
@classmethod
def IS_CHANGED(s, image):
image_path = folder_paths.get_annotated_filepath(image)
m = hashlib.sha256()
with open(image_path, 'rb') as f:
m.update(f.read())
return m.digest().hex()
2023-03-10 02:18:08 +08:00
@classmethod
def VALIDATE_INPUTS(s, image):
if not folder_paths.exists_annotated_filepath(image):
return "Invalid image file: {}".format(image)
return True
class LoadImageMask:
_color_channels = ["alpha", "red", "green", "blue"]
@classmethod
def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
return {"required":
{"image": (sorted(files), {"image_upload": True}),
Clipspace Menu and MaskEditor application. (#548) * Add clipspace feature. * feat: copy content to clipspace * feat: paste content from clipspace Extend validation to allow for validating annotated_path in addition to other parameters. Add support for annotated_filepath in folder_paths function. Generalize the '/upload/image' API to allow for uploading images to the 'input', 'temp', or 'output' directories. * rename contentClipboard -> clipspace * Do deep copy for imgs on copy to clipspace. * mask painting on clipspace * add original_imgs into clipspace * Preserve the original image when 'imgs' are modified * robust patch & refactoring folder_paths about annotated_filepath * wip * Only show the Paste menu if the ComfyApp.clipspace is not empty * clipspace feature added maskeditor feature added * instant refresh on paste force triggering 'changed' on paste action * enhance mask painting smooth drawing add brush_size +/- button * robust patch use mouseup event * robust patch again... * subfolder fix on paste logic attach subfolder if subfolder isn't empty * event listener patch add ], [ key event for brush size remove listener on close * Fix button positioning issue related to window height. Change brush size from button to slider. * clean commit * clean code * various bug fixes * paste action - prevent opening upload popup - ensure rendering after widget_value update * view api update - support annotated_filepath * maskeditor layout - prevent covering button by hidden div * remove dbg message * Add cursor functionality to display brush size * refactor: Replace brush preview feature with missionfloyd implementation * missionfloyd implementation * hiding brush preview off the canvas * change brush size on wheel event * keyup -> keydown event * Update web/extensions/core/maskeditor.js Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com> * Add support for channel-specific image data retrieval in /view API to fix alpha mask loading issue When loading an image with an alpha mask in JavaScript canvas, there is an issue where the alpha and RGB channels are premultiplied. To avoid reliance on JavaScript canvas, I added support for channel-specific image data retrieval in the "/view" API. This allows us to retrieve data for each channel separately and fix the alpha mask loading issue. The changes have been committed to the repository. * Enable brush preview for key and slider events * optimize * preview fix * robust patch * fix copy (clipspace) action imgs[0] copy -> whole imgs copy * support batch images on clipspace, maskeditor * copy/paste bug fixes for batch images enhance selector preview on clipspace menu add img_paste_mode option into clipspace menu * crash fix * print message if clipspace content cannot editable * Update web/extensions/core/maskeditor.js Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com> * make default img_paste_mode to 'selected' refactor space -> tab * save clipspace files to input/clipspace instead of temp * show "clipspace/filename.png" instead of 'filename.png [clipspace]' in LoadImage/LoadImageMask * refresh fix related to FILE_COMBO * Update web/extensions/core/maskeditor.js Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com> * Update web/extensions/core/maskeditor.js Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com> * adjust margin based on missionfloyd impelements * mouse event -> pointer event * pen, touch, mouse drawing patched and tested * Update web/extensions/core/maskeditor.js Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com> * add comment about touch event. --------- Co-authored-by: Lt.Dr.Data <lt.dr.data@gmail.com> Co-authored-by: missionfloyd <missionfloyd@users.noreply.github.com>
2023-05-09 02:37:36 +08:00
"channel": (s._color_channels, ), }
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "load_image"
def load_image(self, image, channel):
image_path = folder_paths.get_annotated_filepath(image)
i = node_helpers.pillow(Image.open, image_path)
i = node_helpers.pillow(ImageOps.exif_transpose, i)
if i.getbands() != ("R", "G", "B", "A"):
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
i = i.convert("RGBA")
mask = None
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
return (mask.unsqueeze(0),)
@classmethod
def IS_CHANGED(s, image, channel):
image_path = folder_paths.get_annotated_filepath(image)
m = hashlib.sha256()
with open(image_path, 'rb') as f:
m.update(f.read())
return m.digest().hex()
2023-03-10 02:18:08 +08:00
@classmethod
def VALIDATE_INPUTS(s, image):
if not folder_paths.exists_annotated_filepath(image):
return "Invalid image file: {}".format(image)
return True
class LoadImageOutput(LoadImage):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("COMBO", {
"image_upload": True,
"image_folder": "output",
"remote": {
"route": "/internal/files/output",
"refresh_button": True,
"control_after_refresh": "first",
},
}),
}
}
DESCRIPTION = "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration."
EXPERIMENTAL = True
2025-03-09 18:46:08 +08:00
FUNCTION = "load_image"
class ImageScale:
2023-09-19 16:40:38 +08:00
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
crop_methods = ["disabled", "center"]
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
2023-09-24 23:08:54 +08:00
"width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"crop": (s.crop_methods,)}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "upscale"
CATEGORY = "image/upscaling"
def upscale(self, image, upscale_method, width, height, crop):
2023-09-24 23:08:54 +08:00
if width == 0 and height == 0:
s = image
else:
samples = image.movedim(-1,1)
if width == 0:
width = max(1, round(samples.shape[3] * height / samples.shape[2]))
elif height == 0:
height = max(1, round(samples.shape[2] * width / samples.shape[3]))
s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
s = s.movedim(1,-1)
return (s,)
2023-01-03 14:53:32 +08:00
2023-06-12 13:14:04 +08:00
class ImageScaleBy:
2023-09-19 16:40:38 +08:00
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
2023-06-12 13:14:04 +08:00
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
"scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}),}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "upscale"
CATEGORY = "image/upscaling"
def upscale(self, image, upscale_method, scale_by):
samples = image.movedim(-1,1)
width = round(samples.shape[3] * scale_by)
height = round(samples.shape[2] * scale_by)
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
s = s.movedim(1,-1)
return (s,)
class ImageInvert:
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",)}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "invert"
CATEGORY = "image"
def invert(self, image):
s = 1.0 - image
return (s,)
2023-08-15 08:23:38 +08:00
class ImageBatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "image1": ("IMAGE",), "image2": ("IMAGE",)}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "batch"
CATEGORY = "image"
def batch(self, image1, image2):
if image1.shape[-1] != image2.shape[-1]:
if image1.shape[-1] > image2.shape[-1]:
image2 = torch.nn.functional.pad(image2, (0,1), mode='constant', value=1.0)
else:
image1 = torch.nn.functional.pad(image1, (0,1), mode='constant', value=1.0)
2023-08-15 08:23:38 +08:00
if image1.shape[1:] != image2.shape[1:]:
image2 = comfy.utils.common_upscale(image2.movedim(-1,1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1,-1)
s = torch.cat((image1, image2), dim=0)
return (s,)
class EmptyImage:
def __init__(self, device="cpu"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
2023-09-25 13:46:44 +08:00
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
"color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "generate"
CATEGORY = "image"
def generate(self, width, height, batch_size=1, color=0):
r = torch.full([batch_size, height, width, 1], ((color >> 16) & 0xFF) / 0xFF)
g = torch.full([batch_size, height, width, 1], ((color >> 8) & 0xFF) / 0xFF)
b = torch.full([batch_size, height, width, 1], ((color) & 0xFF) / 0xFF)
return (torch.cat((r, g, b), dim=-1), )
2023-03-23 23:33:35 +08:00
class ImagePadForOutpaint:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
2023-05-03 02:16:27 +08:00
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
2023-03-25 16:49:58 +08:00
"feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
2023-03-23 23:33:35 +08:00
}
}
RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "expand_image"
CATEGORY = "image"
def expand_image(self, image, left, top, right, bottom, feathering):
2023-03-23 23:33:35 +08:00
d1, d2, d3, d4 = image.size()
new_image = torch.ones(
2023-03-23 23:33:35 +08:00
(d1, d2 + top + bottom, d3 + left + right, d4),
dtype=torch.float32,
) * 0.5
2023-03-23 23:33:35 +08:00
new_image[:, top:top + d2, left:left + d3, :] = image
mask = torch.ones(
(d2 + top + bottom, d3 + left + right),
dtype=torch.float32,
)
t = torch.zeros(
(d2, d3),
dtype=torch.float32
)
if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3:
for i in range(d2):
for j in range(d3):
dt = i if top != 0 else d2
db = d2 - i if bottom != 0 else d2
dl = j if left != 0 else d3
dr = d3 - j if right != 0 else d3
d = min(dt, db, dl, dr)
if d >= feathering:
continue
v = (feathering - d) / feathering
t[i, j] = v * v
mask[top:top + d2, left:left + d3] = t
return (new_image, mask.unsqueeze(0))
2023-03-23 23:33:35 +08:00
2023-01-03 14:53:32 +08:00
NODE_CLASS_MAPPINGS = {
"KSampler": KSampler,
"CheckpointLoaderSimple": CheckpointLoaderSimple,
2023-01-03 14:53:32 +08:00
"CLIPTextEncode": CLIPTextEncode,
"CLIPSetLastLayer": CLIPSetLastLayer,
2023-01-03 14:53:32 +08:00
"VAEDecode": VAEDecode,
"VAEEncode": VAEEncode,
"VAEEncodeForInpaint": VAEEncodeForInpaint,
2023-01-03 14:53:32 +08:00
"VAELoader": VAELoader,
"EmptyLatentImage": EmptyLatentImage,
"LatentUpscale": LatentUpscale,
2023-05-24 00:53:38 +08:00
"LatentUpscaleBy": LatentUpscaleBy,
"LatentFromBatch": LatentFromBatch,
"RepeatLatentBatch": RepeatLatentBatch,
2023-01-03 14:53:32 +08:00
"SaveImage": SaveImage,
2023-03-15 03:28:07 +08:00
"PreviewImage": PreviewImage,
"LoadImage": LoadImage,
"LoadImageMask": LoadImageMask,
"LoadImageOutput": LoadImageOutput,
"ImageScale": ImageScale,
2023-06-12 13:14:04 +08:00
"ImageScaleBy": ImageScaleBy,
"ImageInvert": ImageInvert,
2023-08-15 08:23:38 +08:00
"ImageBatch": ImageBatch,
2023-03-23 23:33:35 +08:00
"ImagePadForOutpaint": ImagePadForOutpaint,
"EmptyImage": EmptyImage,
2023-09-25 01:27:57 +08:00
"ConditioningAverage": ConditioningAverage ,
"ConditioningCombine": ConditioningCombine,
"ConditioningConcat": ConditioningConcat,
"ConditioningSetArea": ConditioningSetArea,
"ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
"ConditioningSetAreaStrength": ConditioningSetAreaStrength,
"ConditioningSetMask": ConditioningSetMask,
"KSamplerAdvanced": KSamplerAdvanced,
"SetLatentNoiseMask": SetLatentNoiseMask,
"LatentComposite": LatentComposite,
"LatentBlend": LatentBlend,
2023-01-31 15:28:07 +08:00
"LatentRotate": LatentRotate,
2023-01-31 16:28:38 +08:00
"LatentFlip": LatentFlip,
2023-02-05 04:21:46 +08:00
"LatentCrop": LatentCrop,
"LoraLoader": LoraLoader,
"CLIPLoader": CLIPLoader,
"UNETLoader": UNETLoader,
"DualCLIPLoader": DualCLIPLoader,
"CLIPVisionEncode": CLIPVisionEncode,
"StyleModelApply": StyleModelApply,
"unCLIPConditioning": unCLIPConditioning,
2023-02-16 23:38:08 +08:00
"ControlNetApply": ControlNetApply,
"ControlNetApplyAdvanced": ControlNetApplyAdvanced,
2023-02-16 23:38:08 +08:00
"ControlNetLoader": ControlNetLoader,
2023-02-23 12:22:03 +08:00
"DiffControlNetLoader": DiffControlNetLoader,
2023-03-06 14:30:17 +08:00
"StyleModelLoader": StyleModelLoader,
"CLIPVisionLoader": CLIPVisionLoader,
"VAEDecodeTiled": VAEDecodeTiled,
2023-03-12 04:28:15 +08:00
"VAEEncodeTiled": VAEEncodeTiled,
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
2023-04-19 21:36:19 +08:00
"GLIGENLoader": GLIGENLoader,
"GLIGENTextBoxApply": GLIGENTextBoxApply,
"InpaintModelConditioning": InpaintModelConditioning,
2023-04-19 21:36:19 +08:00
2023-04-05 10:48:11 +08:00
"CheckpointLoader": CheckpointLoader,
2023-04-06 14:57:31 +08:00
"DiffusersLoader": DiffusersLoader,
"LoadLatent": LoadLatent,
"SaveLatent": SaveLatent,
"ConditioningZeroOut": ConditioningZeroOut,
"ConditioningSetTimestepRange": ConditioningSetTimestepRange,
"LoraLoaderModelOnly": LoraLoaderModelOnly,
"LoadLora": LoadLora,
"LoadLoraModelOnly": LoadLoraModelOnly,
"LoadLoraWeights": LoadLoraWeights,
"ApplyLoraWeights": ApplyLoraWeights,
"ApplyLoraWeightsModelOnly": ApplyLoraWeightsModelOnly,
2023-01-03 14:53:32 +08:00
}
2023-03-31 05:13:58 +08:00
NODE_DISPLAY_NAME_MAPPINGS = {
# Sampling
"KSampler": "KSampler",
"KSamplerAdvanced": "KSampler (Advanced)",
# Loaders
"CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
2023-04-09 03:53:01 +08:00
"CheckpointLoaderSimple": "Load Checkpoint",
2023-03-31 05:13:58 +08:00
"VAELoader": "Load VAE",
"LoraLoader": "Load LoRA",
"CLIPLoader": "Load CLIP",
"ControlNetLoader": "Load ControlNet Model",
"DiffControlNetLoader": "Load ControlNet Model (diff)",
"StyleModelLoader": "Load Style Model",
"CLIPVisionLoader": "Load CLIP Vision",
"UNETLoader": "Load Diffusion Model",
"LoadLora": "Load LoRA Expand",
"LoadLoraModelOnly": "Load LoRA Expand (Model Only)",
"LoadLoraWeights": "Load LoRA Weights",
"ApplyLoraWeights": "Apply LoRA Weights",
"ApplyLoraWeightsModelOnly": "Apply LoRA Weights (Model Only)",
2023-03-31 05:13:58 +08:00
# Conditioning
"CLIPVisionEncode": "CLIP Vision Encode",
"StyleModelApply": "Apply Style Model",
"CLIPTextEncode": "CLIP Text Encode (Prompt)",
"CLIPSetLastLayer": "CLIP Set Last Layer",
"ConditioningCombine": "Conditioning (Combine)",
"ConditioningAverage ": "Conditioning (Average)",
"ConditioningConcat": "Conditioning (Concat)",
2023-03-31 05:13:58 +08:00
"ConditioningSetArea": "Conditioning (Set Area)",
"ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
"ConditioningSetMask": "Conditioning (Set Mask)",
"ControlNetApply": "Apply ControlNet (OLD)",
"ControlNetApplyAdvanced": "Apply ControlNet",
2023-03-31 05:13:58 +08:00
# Latent
"VAEEncodeForInpaint": "VAE Encode (for Inpainting)",
"SetLatentNoiseMask": "Set Latent Noise Mask",
"VAEDecode": "VAE Decode",
"VAEEncode": "VAE Encode",
"LatentRotate": "Rotate Latent",
"LatentFlip": "Flip Latent",
"LatentCrop": "Crop Latent",
"EmptyLatentImage": "Empty Latent Image",
"LatentUpscale": "Upscale Latent",
2023-05-24 00:53:38 +08:00
"LatentUpscaleBy": "Upscale Latent By",
2023-03-31 05:13:58 +08:00
"LatentComposite": "Latent Composite",
"LatentBlend": "Latent Blend",
"LatentFromBatch" : "Latent From Batch",
"RepeatLatentBatch": "Repeat Latent Batch",
2023-03-31 05:13:58 +08:00
# Image
"SaveImage": "Save Image",
"PreviewImage": "Preview Image",
"LoadImage": "Load Image",
"LoadImageMask": "Load Image (as Mask)",
"LoadImageOutput": "Load Image (from Outputs)",
2023-03-31 05:13:58 +08:00
"ImageScale": "Upscale Image",
2023-06-12 13:14:04 +08:00
"ImageScaleBy": "Upscale Image By",
2023-03-31 05:13:58 +08:00
"ImageInvert": "Invert Image",
"ImagePadForOutpaint": "Pad Image for Outpainting",
2023-08-15 08:23:38 +08:00
"ImageBatch": "Batch Images",
"ImageCrop": "Image Crop",
[feat] Add ImageStitch node for concatenating images (#8369) * [feat] Add ImageStitch node for concatenating images with borders Add ImageStitch node that concatenates images in four directions with optional borders and intelligent size handling. Features include optional second image input, configurable borders with color selection, automatic batch size matching, and dimension alignment via padding or resizing. Upstreamed from https://github.com/kijai/ComfyUI-KJNodes with enhancements for better error handling and comprehensive test coverage. * [fix] Fix CI issues with CUDA dependencies and linting - Mock CUDA-dependent modules in tests to avoid CI failures on CPU-only runners - Fix ruff linting issues for code style compliance * [fix] Improve CI compatibility by mocking nodes module import Prevent CUDA initialization chain by mocking the nodes module at import time, which is cleaner than deep mocking of CUDA-specific functions. * [refactor] Clean up ImageStitch tests - Remove unnecessary sys.path manipulation (pythonpath set in pytest.ini) - Remove metadata tests that test framework internals rather than functionality - Rename complex scenario test to be more descriptive of what it tests * [refactor] Rename 'border' to 'spacing' for semantic accuracy - Change border_width/border_color to spacing_width/spacing_color in API - Update all tests to use spacing terminology - Update comments and variable names throughout - More accurately describes the gap/separator between images
2025-06-01 16:28:52 +08:00
"ImageStitch": "Image Stitch",
"ImageBlend": "Image Blend",
"ImageBlur": "Image Blur",
"ImageQuantize": "Image Quantize",
"ImageSharpen": "Image Sharpen",
"ImageScaleToTotalPixels": "Scale Image to Total Pixels",
"GetImageSize": "Get Image Size",
2023-03-31 05:13:58 +08:00
# _for_testing
"VAEDecodeTiled": "VAE Decode (Tiled)",
"VAEEncodeTiled": "VAE Encode (Tiled)",
}
EXTENSION_WEB_DIRS = {}
# Dictionary of successfully loaded module names and associated directories.
LOADED_MODULE_DIRS = {}
def get_module_name(module_path: str) -> str:
"""
Returns the module name based on the given module path.
Examples:
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node.py") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__.py") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__/") -> "my_custom_node"
get_module_name("C:/Users/username/ComfyUI/custom_nodes/my_custom_node.disabled") -> "custom_nodes
Args:
module_path (str): The path of the module.
Returns:
str: The module name.
"""
base_path = os.path.basename(module_path)
if os.path.isfile(module_path):
base_path = os.path.splitext(base_path)[0]
return base_path
async def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes") -> bool:
module_name = get_module_name(module_path)
2024-07-05 09:49:50 +08:00
if os.path.isfile(module_path):
sp = os.path.splitext(module_path)
module_name = sp[0]
sys_module_name = module_name
elif os.path.isdir(module_path):
sys_module_name = module_path.replace(".", "_x_")
try:
logging.debug("Trying to load custom node {}".format(module_path))
if os.path.isfile(module_path):
module_spec = importlib.util.spec_from_file_location(sys_module_name, module_path)
module_dir = os.path.split(module_path)[0]
else:
module_spec = importlib.util.spec_from_file_location(sys_module_name, os.path.join(module_path, "__init__.py"))
module_dir = module_path
module = importlib.util.module_from_spec(module_spec)
sys.modules[sys_module_name] = module
module_spec.loader.exec_module(module)
LOADED_MODULE_DIRS[module_name] = os.path.abspath(module_dir)
try:
from comfy_config import config_parser
project_config = config_parser.extract_node_configuration(module_path)
web_dir_name = project_config.tool_comfy.web
if web_dir_name:
web_dir_path = os.path.join(module_path, web_dir_name)
if os.path.isdir(web_dir_path):
project_name = project_config.project.name
EXTENSION_WEB_DIRS[project_name] = web_dir_path
logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name))
except Exception as e:
2025-06-13 05:14:59 +08:00
logging.warning(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}")
if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
if os.path.isdir(web_dir):
EXTENSION_WEB_DIRS[module_name] = web_dir
# V1 node definition
if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
for name, node_cls in module.NODE_CLASS_MAPPINGS.items():
if name not in ignore:
NODE_CLASS_MAPPINGS[name] = node_cls
node_cls.RELATIVE_PYTHON_MODULE = "{}.{}".format(module_parent, get_module_name(module_path))
if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
return True
# V3 Extension Definition
elif hasattr(module, "comfy_entrypoint"):
entrypoint = getattr(module, "comfy_entrypoint")
if not callable(entrypoint):
logging.warning(f"comfy_entrypoint in {module_path} is not callable, skipping.")
return False
try:
if inspect.iscoroutinefunction(entrypoint):
extension = await entrypoint()
else:
extension = entrypoint()
if not isinstance(extension, ComfyExtension):
logging.warning(f"comfy_entrypoint in {module_path} did not return a ComfyExtension, skipping.")
return False
node_list = await extension.get_node_list()
if not isinstance(node_list, list):
logging.warning(f"comfy_entrypoint in {module_path} did not return a list of nodes, skipping.")
return False
for node_cls in node_list:
node_cls: io.ComfyNode
schema = node_cls.GET_SCHEMA()
if schema.node_id not in ignore:
NODE_CLASS_MAPPINGS[schema.node_id] = node_cls
node_cls.RELATIVE_PYTHON_MODULE = "{}.{}".format(module_parent, get_module_name(module_path))
if schema.display_name is not None:
NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name
return True
except Exception as e:
logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}")
return False
else:
logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).")
return False
except Exception as e:
2024-03-11 12:56:41 +08:00
logging.warning(traceback.format_exc())
2024-03-12 04:24:47 +08:00
logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
return False
async def init_external_custom_nodes():
"""
Initializes the external custom nodes.
This function loads custom nodes from the specified folder paths and imports them into the application.
It measures the import times for each custom node and logs the results.
Returns:
None
"""
base_node_names = set(NODE_CLASS_MAPPINGS.keys())
node_paths = folder_paths.get_folder_paths("custom_nodes")
node_import_times = []
for custom_node_path in node_paths:
2023-11-24 05:24:58 +08:00
possible_modules = os.listdir(os.path.realpath(custom_node_path))
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
for possible_module in possible_modules:
module_path = os.path.join(custom_node_path, possible_module)
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
if module_path.endswith(".disabled"): continue
if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes:
logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes")
continue
time_before = time.perf_counter()
success = await load_custom_node(module_path, base_node_names, module_parent="custom_nodes")
node_import_times.append((time.perf_counter() - time_before, module_path, success))
2023-05-14 01:15:31 +08:00
if len(node_import_times) > 0:
logging.info("\nImport times for custom nodes:")
2023-05-14 01:15:31 +08:00
for n in sorted(node_import_times):
if n[2]:
import_message = ""
else:
import_message = " (IMPORT FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
logging.info("")
async def init_builtin_extra_nodes():
"""
Initializes the built-in extra nodes in ComfyUI.
This function loads the extra node files located in the "comfy_extras" directory and imports them into ComfyUI.
If any of the extra node files fail to import, a warning message is logged.
Returns:
None
"""
extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
extras_files = [
"nodes_latent.py",
"nodes_hypernetwork.py",
"nodes_upscale_model.py",
"nodes_post_processing.py",
"nodes_mask.py",
"nodes_compositing.py",
"nodes_rebatch.py",
"nodes_model_merging.py",
"nodes_tomesd.py",
"nodes_clip_sdxl.py",
"nodes_canny.py",
"nodes_freelunch.py",
"nodes_custom_sampler.py",
"nodes_hypertile.py",
"nodes_model_advanced.py",
"nodes_model_downscale.py",
2023-11-18 17:44:17 +08:00
"nodes_images.py",
"nodes_video_model.py",
"nodes_train.py",
"nodes_sag.py",
2023-12-16 02:58:16 +08:00
"nodes_perpneg.py",
"nodes_stable3d.py",
"nodes_sdupscale.py",
"nodes_photomaker.py",
"nodes_pixart.py",
"nodes_cond.py",
"nodes_morphology.py",
2024-02-17 01:56:11 +08:00
"nodes_stable_cascade.py",
"nodes_differential_diffusion.py",
2024-04-05 03:06:17 +08:00
"nodes_ip2p.py",
"nodes_model_merging_model_specific.py",
2024-04-15 11:34:25 +08:00
"nodes_pag.py",
"nodes_align_your_steps.py",
"nodes_attention_multiply.py",
"nodes_advanced_samplers.py",
"nodes_webcam.py",
"nodes_audio.py",
2024-06-11 01:26:25 +08:00
"nodes_sd3.py",
2024-06-20 20:12:15 +08:00
"nodes_gits.py",
"nodes_controlnet.py",
"nodes_hunyuan.py",
"nodes_eps.py",
2024-08-02 06:53:25 +08:00
"nodes_flux.py",
"nodes_lora_extract.py",
"nodes_torch_compile.py",
"nodes_mochi.py",
"nodes_slg.py",
"nodes_mahiro.py",
2024-11-22 21:44:42 +08:00
"nodes_lt.py",
ModelPatcher Overhaul and Hook Support (#5583) * Added hook_patches to ModelPatcher for weights (model) * Initial changes to calc_cond_batch to eventually support hook_patches * Added current_patcher property to BaseModel * Consolidated add_hook_patches_as_diffs into add_hook_patches func, fixed fp8 support for model-as-lora feature * Added call to initialize_timesteps on hooks in process_conds func, and added call prepare current keyframe on hooks in calc_cond_batch * Added default_conds support in calc_cond_batch func * Added initial set of hook-related nodes, added code to register hooks for loras/model-as-loras, small renaming/refactoring * Made CLIP work with hook patches * Added initial hook scheduling nodes, small renaming/refactoring * Fixed MaxSpeed and default conds implementations * Added support for adding weight hooks that aren't registered on the ModelPatcher at sampling time * Made Set Clip Hooks node work with hooks from Create Hook nodes, began work on better Create Hook Model As LoRA node * Initial work on adding 'model_as_lora' lora type to calculate_weight * Continued work on simpler Create Hook Model As LoRA node, started to implement ModelPatcher callbacks, attachments, and additional_models * Fix incorrect ref to create_hook_patches_clone after moving function * Added injections support to ModelPatcher + necessary bookkeeping, added additional_models support in ModelPatcher, conds, and hooks * Added wrappers to ModelPatcher to facilitate standardized function wrapping * Started scaffolding for other hook types, refactored get_hooks_from_cond to organize hooks by type * Fix skip_until_exit logic bug breaking injection after first run of model * Updated clone_has_same_weights function to account for new ModelPatcher properties, improved AutoPatcherEjector usage in partially_load * Added WrapperExecutor for non-classbound functions, added calc_cond_batch wrappers * Refactored callbacks+wrappers to allow storing lists by id * Added forward_timestep_embed_patch type, added helper functions on ModelPatcher for emb_patch and forward_timestep_embed_patch, added helper functions for removing callbacks/wrappers/additional_models by key, added custom_should_register prop to hooks * Added get_attachment func on ModelPatcher * Implement basic MemoryCounter system for determing with cached weights due to hooks should be offloaded in hooks_backup * Modified ControlNet/T2IAdapter get_control function to receive transformer_options as additional parameter, made the model_options stored in extra_args in inner_sample be a clone of the original model_options instead of same ref * Added create_model_options_clone func, modified type annotations to use __future__ so that I can use the better type annotations * Refactored WrapperExecutor code to remove need for WrapperClassExecutor (now gone), added sampler.sample wrapper (pending review, will likely keep but will see what hacks this could currently let me get rid of in ACN/ADE) * Added Combine versions of Cond/Cond Pair Set Props nodes, renamed Pair Cond to Cond Pair, fixed default conds never applying hooks (due to hooks key typo) * Renamed Create Hook Model As LoRA nodes to make the test node the main one (more changes pending) * Added uuid to conds in CFGGuider and uuids to transformer_options to allow uniquely identifying conds in batches during sampling * Fixed models not being unloaded properly due to current_patcher reference; the current ComfyUI model cleanup code requires that nothing else has a reference to the ModelPatcher instances * Fixed default conds not respecting hook keyframes, made keyframes not reset cache when strength is unchanged, fixed Cond Set Default Combine throwing error, fixed model-as-lora throwing error during calculate_weight after a recent ComfyUI update, small refactoring/scaffolding changes for hooks * Changed CreateHookModelAsLoraTest to be the new CreateHookModelAsLora, rename old ones as 'direct' and will be removed prior to merge * Added initial support within CLIP Text Encode (Prompt) node for scheduling weight hook CLIP strength via clip_start_percent/clip_end_percent on conds, added schedule_clip toggle to Set CLIP Hooks node, small cleanup/fixes * Fix range check in get_hooks_for_clip_schedule so that proper keyframes get assigned to corresponding ranges * Optimized CLIP hook scheduling to treat same strength as same keyframe * Less fragile memory management. * Make encode_from_tokens_scheduled call cleaner, rollback change in model_patcher.py for hook_patches_backup dict * Fix issue. * Remove useless function. * Prevent and detect some types of memory leaks. * Run garbage collector when switching workflow if needed. * Moved WrappersMP/CallbacksMP/WrapperExecutor to patcher_extension.py * Refactored code to store wrappers and callbacks in transformer_options, added apply_model and diffusion_model.forward wrappers * Fix issue. * Refactored hooks in calc_cond_batch to be part of get_area_and_mult tuple, added extra_hooks to ControlBase to allow custom controlnets w/ hooks, small cleanup and renaming * Fixed inconsistency of results when schedule_clip is set to False, small renaming/typo fixing, added initial support for ControlNet extra_hooks to work in tandem with normal cond hooks, initial work on calc_cond_batch merging all subdicts in returned transformer_options * Modified callbacks and wrappers so that unregistered types can be used, allowing custom_nodes to have their own unique callbacks/wrappers if desired * Updated different hook types to reflect actual progress of implementation, initial scaffolding for working WrapperHook functionality * Fixed existing weight hook_patches (pre-registered) not working properly for CLIP * Removed Register/Direct hook nodes since they were present only for testing, removed diff-related weight hook calculation as improved_memory removes unload_model_clones and using sample time registered hooks is less hacky * Added clip scheduling support to all other native ComfyUI text encoding nodes (sdxl, flux, hunyuan, sd3) * Made WrapperHook functional, added another wrapper/callback getter, added ON_DETACH callback to ModelPatcher * Made opt_hooks append by default instead of replace, renamed comfy.hooks set functions to be more accurate * Added apply_to_conds to Set CLIP Hooks, modified relevant code to allow text encoding to automatically apply hooks to output conds when apply_to_conds is set to True * Fix cached_hook_patches not respecting target_device/memory_counter results * Fixed issue with setting weights from hooks instead of copying them, added additional memory_counter check when caching hook patches * Remove unnecessary torch.no_grad calls for hook patches * Increased MemoryCounter minimum memory to leave free by *2 until a better way to get inference memory estimate of currently loaded models exists * For encode_from_tokens_scheduled, allow start_percent and end_percent in add_dict to limit which scheduled conds get encoded for optimization purposes * Removed a .to call on results of calculate_weight in patch_hook_weight_to_device that was screwing up the intermediate results for fp8 prior to being passed into stochastic_rounding call * Made encode_from_tokens_scheduled work when no hooks are set on patcher * Small cleanup of comments * Turn off hook patch caching when only 1 hook present in sampling, replace some current_hook = None with calls to self.patch_hooks(None) instead to avoid a potential edge case * On Cond/Cond Pair nodes, removed opt_ prefix from optional inputs * Allow both FLOATS and FLOAT for floats_strength input * Revert change, does not work * Made patch_hook_weight_to_device respect set_func and convert_func * Make discard_model_sampling True by default * Add changes manually from 'master' so merge conflict resolution goes more smoothly * Cleaned up text encode nodes with just a single clip.encode_from_tokens_scheduled call * Make sure encode_from_tokens_scheduled will respect use_clip_schedule on clip * Made nodes in nodes_hooks be marked as experimental (beta) * Add get_nested_additional_models for cases where additional_models could have their own additional_models, and add robustness for circular additional_models references * Made finalize_default_conds area math consistent with other sampling code * Changed 'opt_hooks' input of Cond/Cond Pair Set Default Combine nodes to 'hooks' * Remove a couple old TODO's and a no longer necessary workaround
2024-12-03 03:51:02 +08:00
"nodes_hooks.py",
"nodes_load_3d.py",
"nodes_cosmos.py",
"nodes_video.py",
"nodes_lumina2.py",
2025-02-26 14:49:43 +08:00
"nodes_wan.py",
"nodes_lotus.py",
"nodes_hunyuan3d.py",
"nodes_primitive.py",
"nodes_cfg.py",
2025-04-16 05:35:05 +08:00
"nodes_optimalsteps.py",
2025-04-18 03:24:33 +08:00
"nodes_hidream.py",
"nodes_fresca.py",
"nodes_apg.py",
"nodes_preview_any.py",
"nodes_ace.py",
2025-05-13 04:29:32 +08:00
"nodes_string.py",
"nodes_camera_trajectory.py",
2025-06-26 07:35:57 +08:00
"nodes_edit_model.py",
"nodes_tcfg.py",
"nodes_context_windows.py",
"nodes_qwen.py",
Add support for Chroma Radiance (#9682) * Initial Chroma Radiance support * Minor Chroma Radiance cleanups * Update Radiance nodes to ensure latents/images are on the intermediate device * Fix Chroma Radiance memory estimation. * Increase Chroma Radiance memory usage factor * Increase Chroma Radiance memory usage factor once again * Ensure images are multiples of 16 for Chroma Radiance Add batch dimension and fix channels when necessary in ChromaRadianceImageToLatent node * Tile Chroma Radiance NeRF to reduce memory consumption, update memory usage factor * Update Radiance to support conv nerf final head type. * Allow setting NeRF embedder dtype for Radiance Bump Radiance nerf tile size to 32 Support EasyCache/LazyCache on Radiance (maybe) * Add ChromaRadianceStubVAE node * Crop Radiance image inputs to multiples of 16 instead of erroring to be in line with existing VAE behavior * Convert Chroma Radiance nodes to V3 schema. * Add ChromaRadianceOptions node and backend support. Cleanups/refactoring to reduce code duplication with Chroma. * Fix overriding the NeRF embedder dtype for Chroma Radiance * Minor Chroma Radiance cleanups * Move Chroma Radiance to its own directory in ldm Minor code cleanups and tooltip improvements * Fix Chroma Radiance embedder dtype overriding * Remove Radiance dynamic nerf_embedder dtype override feature * Unbork Radiance NeRF embedder init * Remove Chroma Radiance image conversion and stub VAE nodes Add a chroma_radiance option to the VAELoader builtin node which uses comfy.sd.PixelspaceConversionVAE Add a PixelspaceConversionVAE to comfy.sd for converting BHWC 0..1 <-> BCHW -1..1
2025-09-14 05:58:43 +08:00
"nodes_chroma_radiance.py",
Implement EasyCache and Invent LazyCache (#9496) * Attempting a universal implementation of EasyCache, starting with flux as test; I screwed up the math a bit, but when I set it just right it works. * Fixed math to make threshold work as expected, refactored code to use EasyCacheHolder instead of a dict wrapped by object * Use sigmas from transformer_options instead of timesteps to be compatible with a greater amount of models, make end_percent work * Make log statement when not skipping useful, preparing for per-cond caching * Added DIFFUSION_MODEL wrapper around forward function for wan model * Add subsampling for heuristic inputs * Add subsampling to output_prev (output_prev_subsampled now) * Properly consider conds in EasyCache logic * Created SuperEasyCache to test what happens if caching and reuse is moved outside the scope of conds, added PREDICT_NOISE wrapper to facilitate this test * Change max reuse_threshold to 3.0 * Mark EasyCache/SuperEasyCache as experimental (beta) * Make Lumina2 compatible with EasyCache * Add EasyCache support for Qwen Image * Fix missing comma, curse you Cursor * Add EasyCache support to AceStep * Add EasyCache support to Chroma * Added EasyCache support to Cosmos Predict t2i * Make EasyCache not crash with Cosmos Predict ImagToVideo latents, but does not work well at all * Add EasyCache support to hidream * Added EasyCache support to hunyuan video * Added EasyCache support to hunyuan3d * Added EasyCache support to LTXV (not very good, but does not crash) * Implemented EasyCache for aura_flow * Renamed SuperEasyCache to LazyCache, hardcoded subsample_factor to 8 on nodes * Eatra logging when verbose is true for EasyCache
2025-08-23 10:41:08 +08:00
"nodes_model_patch.py",
"nodes_easycache.py",
"nodes_audio_encoder.py",
"nodes_rope.py",
"nodes_nop.py",
]
import_failed = []
for node_file in extras_files:
if not await load_custom_node(os.path.join(extras_dir, node_file), module_parent="comfy_extras"):
import_failed.append(node_file)
return import_failed
async def init_builtin_api_nodes():
2025-04-24 03:38:34 +08:00
api_nodes_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_api_nodes")
api_nodes_files = [
More API Nodes (#7956) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com>
2025-05-06 16:23:00 +08:00
"nodes_ideogram.py",
"nodes_openai.py",
"nodes_minimax.py",
"nodes_veo2.py",
"nodes_kling.py",
"nodes_bfl.py",
"nodes_bytedance.py",
"nodes_ltxv.py",
More API Nodes (#7956) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com>
2025-05-06 16:23:00 +08:00
"nodes_luma.py",
"nodes_recraft.py",
"nodes_pixverse.py",
"nodes_stability.py",
"nodes_pika.py",
More API nodes: Gemini/Open AI Chat, Tripo, Rodin, Runway Image (#8295) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." This reverts commit 8415404ce8fbc0262b7de54fc700c5c8854a34fc. * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Refactor util functions (#20) * Add rest of Luma node functionality (#19) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Fix image_luma_ref not working (#28) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add Pixverse and updated Kling types (#75) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Update Pika Duration and Resolution options (#94) * Removed Infinite Style Library until later (#99) * fix multi image return (#101) close #96 * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * Remove pixverse_template from PixVerse Transition Video node (#155) * Use 3.9 compat syntax (#164) * Handle Comfy API key based authorizaton (#167) Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> * [BFL] Print download URL of successful task result directly on nodes (#175) * Show output URL and progress text on Pika nodes (#168) * [Ideogram] Print download URL of successful task result directly on nodes (#176) * [Kling] Print download URL of successful task result directly on nodes (#181) * Merge upstream may 14 25 (#186) Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> * Update instructions on how to develop API Nodes. (#171) * Add Runway FLF and I2V nodes (#187) * Add OpenAI chat node (#188) * Update README. * Add Google Gemini API node (#191) * Add Runway Gen 4 Text to Image Node (#193) * [Runway, Gemini] Update node display names and attributes (#194) * Update path from "image-to-video" to "image_to_video" (#197) * [Runway] Split I2V nodes into separate gen3 and gen4 nodes (#198) * Update runway i2v ratio enum (#201) * Rodin3D: implement Rodin3D API Nodes (#190) Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Tripo Nodes. (#189) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Change casing of categories "3D" => "3d" (#208) * [tripo] fix negtive_prompt and mv2model (#212) * [tripo] set default param to None (#215) * Add description and tooltip to Tripo Refine model. (#218) * Update. * Fix rebase errors. * Fix rebase errors. * Update templates. * Bump frontend. * Add file type info for file inputs. --------- Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> Co-authored-by: Changrz <51637999+WhiteGiven@users.noreply.github.com> Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: seed93 <liangding1990@163.com>
2025-05-27 15:00:58 +08:00
"nodes_runway.py",
"nodes_sora.py",
"nodes_topaz.py",
More API nodes: Gemini/Open AI Chat, Tripo, Rodin, Runway Image (#8295) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." This reverts commit 8415404ce8fbc0262b7de54fc700c5c8854a34fc. * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Refactor util functions (#20) * Add rest of Luma node functionality (#19) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Fix image_luma_ref not working (#28) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add Pixverse and updated Kling types (#75) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Update Pika Duration and Resolution options (#94) * Removed Infinite Style Library until later (#99) * fix multi image return (#101) close #96 * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * Remove pixverse_template from PixVerse Transition Video node (#155) * Use 3.9 compat syntax (#164) * Handle Comfy API key based authorizaton (#167) Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> * [BFL] Print download URL of successful task result directly on nodes (#175) * Show output URL and progress text on Pika nodes (#168) * [Ideogram] Print download URL of successful task result directly on nodes (#176) * [Kling] Print download URL of successful task result directly on nodes (#181) * Merge upstream may 14 25 (#186) Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> * Update instructions on how to develop API Nodes. (#171) * Add Runway FLF and I2V nodes (#187) * Add OpenAI chat node (#188) * Update README. * Add Google Gemini API node (#191) * Add Runway Gen 4 Text to Image Node (#193) * [Runway, Gemini] Update node display names and attributes (#194) * Update path from "image-to-video" to "image_to_video" (#197) * [Runway] Split I2V nodes into separate gen3 and gen4 nodes (#198) * Update runway i2v ratio enum (#201) * Rodin3D: implement Rodin3D API Nodes (#190) Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Tripo Nodes. (#189) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Change casing of categories "3D" => "3d" (#208) * [tripo] fix negtive_prompt and mv2model (#212) * [tripo] set default param to None (#215) * Add description and tooltip to Tripo Refine model. (#218) * Update. * Fix rebase errors. * Fix rebase errors. * Update templates. * Bump frontend. * Add file type info for file inputs. --------- Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> Co-authored-by: Changrz <51637999+WhiteGiven@users.noreply.github.com> Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: seed93 <liangding1990@163.com>
2025-05-27 15:00:58 +08:00
"nodes_tripo.py",
2025-07-08 20:54:30 +08:00
"nodes_moonvalley.py",
More API nodes: Gemini/Open AI Chat, Tripo, Rodin, Runway Image (#8295) * Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." This reverts commit 8415404ce8fbc0262b7de54fc700c5c8854a34fc. * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Refactor util functions (#20) * Add rest of Luma node functionality (#19) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Fix image_luma_ref not working (#28) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add Pixverse and updated Kling types (#75) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Update Pika Duration and Resolution options (#94) * Removed Infinite Style Library until later (#99) * fix multi image return (#101) close #96 * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * Remove pixverse_template from PixVerse Transition Video node (#155) * Use 3.9 compat syntax (#164) * Handle Comfy API key based authorizaton (#167) Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> * [BFL] Print download URL of successful task result directly on nodes (#175) * Show output URL and progress text on Pika nodes (#168) * [Ideogram] Print download URL of successful task result directly on nodes (#176) * [Kling] Print download URL of successful task result directly on nodes (#181) * Merge upstream may 14 25 (#186) Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> * Update instructions on how to develop API Nodes. (#171) * Add Runway FLF and I2V nodes (#187) * Add OpenAI chat node (#188) * Update README. * Add Google Gemini API node (#191) * Add Runway Gen 4 Text to Image Node (#193) * [Runway, Gemini] Update node display names and attributes (#194) * Update path from "image-to-video" to "image_to_video" (#197) * [Runway] Split I2V nodes into separate gen3 and gen4 nodes (#198) * Update runway i2v ratio enum (#201) * Rodin3D: implement Rodin3D API Nodes (#190) Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Add Tripo Nodes. (#189) Co-authored-by: Robin Huang <robin.j.huang@gmail.com> * Change casing of categories "3D" => "3d" (#208) * [tripo] fix negtive_prompt and mv2model (#212) * [tripo] set default param to None (#215) * Add description and tooltip to Tripo Refine model. (#218) * Update. * Fix rebase errors. * Fix rebase errors. * Update templates. * Bump frontend. * Add file type info for file inputs. --------- Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: AustinMroz <austinmroz@utexas.edu> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Benjamin Lu <benceruleanlu@proton.me> Co-authored-by: Andrew Kvochko <kvochko@users.noreply.github.com> Co-authored-by: Pam <42671363+pamparamm@users.noreply.github.com> Co-authored-by: chaObserv <154517000+chaObserv@users.noreply.github.com> Co-authored-by: Yoland Yan <4950057+yoland68@users.noreply.github.com> Co-authored-by: guill <guill@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Silver <65376327+silveroxides@users.noreply.github.com> Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> Co-authored-by: liesen <liesen.dev@gmail.com> Co-authored-by: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Co-authored-by: blepping <157360029+blepping@users.noreply.github.com> Co-authored-by: Changrz <51637999+WhiteGiven@users.noreply.github.com> Co-authored-by: WhiteGiven <c15838568211@163.com> Co-authored-by: seed93 <liangding1990@163.com>
2025-05-27 15:00:58 +08:00
"nodes_rodin.py",
"nodes_gemini.py",
"nodes_vidu.py",
"nodes_wan.py",
2025-04-24 03:38:34 +08:00
]
if not await load_custom_node(os.path.join(api_nodes_dir, "canary.py"), module_parent="comfy_api_nodes"):
return api_nodes_files
import_failed = []
2025-04-24 03:38:34 +08:00
for node_file in api_nodes_files:
if not await load_custom_node(os.path.join(api_nodes_dir, node_file), module_parent="comfy_api_nodes"):
2025-04-24 03:38:34 +08:00
import_failed.append(node_file)
return import_failed
async def init_public_apis():
register_versions([
ComfyAPIWithVersion(
version=getattr(v, "VERSION"),
api_class=v
) for v in supported_versions
])
async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
await init_public_apis()
import_failed = await init_builtin_extra_nodes()
import_failed_api = []
if init_api_nodes:
import_failed_api = await init_builtin_api_nodes()
if init_custom_nodes:
await init_external_custom_nodes()
else:
logging.info("Skipping loading of custom nodes")
if len(import_failed_api) > 0:
logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed_api:
logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build:
logging.warning("Please run the update script: update/update_comfyui.bat")
else:
logging.warning("Please do a: pip install -r requirements.txt")
logging.warning("")
if len(import_failed) > 0:
2024-03-11 12:56:41 +08:00
logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed:
2024-03-11 12:56:41 +08:00
logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build:
2024-03-11 12:56:41 +08:00
logging.warning("Please run the update script: update/update_comfyui.bat")
else:
2024-03-11 12:56:41 +08:00
logging.warning("Please do a: pip install -r requirements.txt")
logging.warning("")
2024-12-28 07:02:21 +08:00
return import_failed