2023-09-13 23:38:20 +08:00
import comfy . options
comfy . options . enable_args_parsing ( )
2023-07-11 14:33:21 +08:00
import os
import importlib . util
import folder_paths
2023-07-14 01:01:45 +08:00
import time
2024-07-02 05:54:03 +08:00
from comfy . cli_args import args
2024-08-31 00:46:37 +08:00
from app . logger import setup_logger
2024-12-14 07:21:32 +08:00
import itertools
import utils . extra_config
import logging
2025-04-13 06:58:20 +08:00
import sys
2025-07-11 02:46:19 +08:00
from comfy_execution . progress import get_progress_state
from comfy_execution . utils import get_executing_context
from comfy_api import feature_flags
2024-08-31 00:46:37 +08:00
2024-12-08 03:51:20 +08:00
if __name__ == " __main__ " :
2025-05-02 17:28:27 +08:00
#NOTE: These do not do anything on core ComfyUI, they are for custom nodes.
2024-12-08 03:51:20 +08:00
os . environ [ ' HF_HUB_DISABLE_TELEMETRY ' ] = ' 1 '
os . environ [ ' DO_NOT_TRACK ' ] = ' 1 '
2024-12-28 03:40:05 +08:00
setup_logger ( log_level = args . verbose , use_stdout = args . log_stdout )
2024-07-02 05:54:03 +08:00
2024-12-14 07:21:32 +08:00
def apply_custom_paths ( ) :
# extra model paths
extra_model_paths_config_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , " extra_model_paths.yaml " )
if os . path . isfile ( extra_model_paths_config_path ) :
utils . extra_config . load_extra_path_config ( extra_model_paths_config_path )
if args . extra_model_paths_config :
for config_path in itertools . chain ( * args . extra_model_paths_config ) :
utils . extra_config . load_extra_path_config ( config_path )
# --output-directory, --input-directory, --user-directory
if args . output_directory :
output_dir = os . path . abspath ( args . output_directory )
logging . info ( f " Setting output directory to: { output_dir } " )
folder_paths . set_output_directory ( output_dir )
# These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
folder_paths . add_model_folder_path ( " checkpoints " , os . path . join ( folder_paths . get_output_directory ( ) , " checkpoints " ) )
folder_paths . add_model_folder_path ( " clip " , os . path . join ( folder_paths . get_output_directory ( ) , " clip " ) )
folder_paths . add_model_folder_path ( " vae " , os . path . join ( folder_paths . get_output_directory ( ) , " vae " ) )
folder_paths . add_model_folder_path ( " diffusion_models " ,
os . path . join ( folder_paths . get_output_directory ( ) , " diffusion_models " ) )
folder_paths . add_model_folder_path ( " loras " , os . path . join ( folder_paths . get_output_directory ( ) , " loras " ) )
if args . input_directory :
input_dir = os . path . abspath ( args . input_directory )
logging . info ( f " Setting input directory to: { input_dir } " )
folder_paths . set_input_directory ( input_dir )
if args . user_directory :
user_dir = os . path . abspath ( args . user_directory )
logging . info ( f " Setting user directory to: { user_dir } " )
folder_paths . set_user_directory ( user_dir )
2023-07-11 14:33:21 +08:00
def execute_prestartup_script ( ) :
2025-06-29 03:53:40 +08:00
if args . disable_all_custom_nodes and len ( args . whitelist_custom_nodes ) == 0 :
return
2023-07-11 14:33:21 +08:00
def execute_script ( script_path ) :
2023-07-14 01:01:45 +08:00
module_name = os . path . splitext ( script_path ) [ 0 ]
try :
spec = importlib . util . spec_from_file_location ( module_name , script_path )
module = importlib . util . module_from_spec ( spec )
spec . loader . exec_module ( module )
return True
except Exception as e :
2024-12-21 05:24:55 +08:00
logging . error ( f " Failed to execute startup-script: { script_path } / { e } " )
2023-07-14 01:01:45 +08:00
return False
2023-07-11 14:33:21 +08:00
node_paths = folder_paths . get_folder_paths ( " custom_nodes " )
for custom_node_path in node_paths :
possible_modules = os . listdir ( custom_node_path )
2023-07-14 01:01:45 +08:00
node_prestartup_times = [ ]
2023-07-11 14:33:21 +08:00
for possible_module in possible_modules :
module_path = os . path . join ( custom_node_path , possible_module )
if os . path . isfile ( module_path ) or module_path . endswith ( " .disabled " ) or module_path == " __pycache__ " :
continue
script_path = os . path . join ( module_path , " prestartup_script.py " )
2023-07-14 01:01:45 +08:00
if os . path . exists ( script_path ) :
2025-06-29 03:24:02 +08:00
if args . disable_all_custom_nodes and possible_module not in args . whitelist_custom_nodes :
logging . info ( f " Prestartup Skipping { possible_module } due to disable_all_custom_nodes and whitelist_custom_nodes " )
continue
2023-07-14 01:01:45 +08:00
time_before = time . perf_counter ( )
success = execute_script ( script_path )
node_prestartup_times . append ( ( time . perf_counter ( ) - time_before , module_path , success ) )
if len ( node_prestartup_times ) > 0 :
2024-12-21 05:24:55 +08:00
logging . info ( " \n Prestartup times for custom nodes: " )
2023-07-14 01:01:45 +08:00
for n in sorted ( node_prestartup_times ) :
if n [ 2 ] :
import_message = " "
else :
import_message = " (PRESTARTUP FAILED) "
2024-12-21 05:24:55 +08:00
logging . info ( " {:6.1f} seconds {} : {} " . format ( n [ 0 ] , import_message , n [ 1 ] ) )
logging . info ( " " )
2023-07-11 14:33:21 +08:00
2024-12-14 07:21:32 +08:00
apply_custom_paths ( )
2023-07-11 14:33:21 +08:00
execute_prestartup_script ( )
# Main code
2023-04-06 08:32:59 +08:00
import asyncio
2023-03-14 03:34:05 +08:00
import shutil
2023-01-03 14:53:32 +08:00
import threading
2023-06-14 01:36:47 +08:00
import gc
2023-04-07 07:06:39 +08:00
2023-01-03 14:53:32 +08:00
2023-02-17 02:19:26 +08:00
if os . name == " nt " :
2025-08-30 11:06:04 +08:00
os . environ [ ' MIMALLOC_PURGE_DELAY ' ] = ' 0 '
2023-02-17 02:19:26 +08:00
2023-02-08 11:12:56 +08:00
if __name__ == " __main__ " :
2025-10-03 05:57:15 +08:00
os . environ [ ' TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL ' ] = ' 1 '
2025-07-24 02:20:49 +08:00
if args . default_device is not None :
default_dev = args . default_device
devices = list ( range ( 32 ) )
devices . remove ( default_dev )
devices . insert ( 0 , default_dev )
devices = ' , ' . join ( map ( str , devices ) )
os . environ [ ' CUDA_VISIBLE_DEVICES ' ] = str ( devices )
os . environ [ ' HIP_VISIBLE_DEVICES ' ] = str ( devices )
2023-04-06 08:32:59 +08:00
if args . cuda_device is not None :
os . environ [ ' CUDA_VISIBLE_DEVICES ' ] = str ( args . cuda_device )
2024-11-12 19:53:36 +08:00
os . environ [ ' HIP_VISIBLE_DEVICES ' ] = str ( args . cuda_device )
2025-09-28 10:36:02 +08:00
os . environ [ " ASCEND_RT_VISIBLE_DEVICES " ] = str ( args . cuda_device )
2024-03-12 04:24:47 +08:00
logging . info ( " Set cuda device to: {} " . format ( args . cuda_device ) )
2023-04-06 08:32:59 +08:00
2024-12-23 16:18:32 +08:00
if args . oneapi_device_selector is not None :
os . environ [ ' ONEAPI_DEVICE_SELECTOR ' ] = args . oneapi_device_selector
logging . info ( " Set oneapi device selector to: {} " . format ( args . oneapi_device_selector ) )
2023-12-18 05:59:21 +08:00
if args . deterministic :
if ' CUBLAS_WORKSPACE_CONFIG ' not in os . environ :
os . environ [ ' CUBLAS_WORKSPACE_CONFIG ' ] = " :4096:8 "
2023-07-20 02:43:55 +08:00
import cuda_malloc
2023-04-06 08:32:59 +08:00
2025-07-10 13:03:27 +08:00
if ' torch ' in sys . modules :
logging . warning ( " WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point. " )
2023-07-17 23:00:14 +08:00
import comfy . utils
2023-03-30 11:28:21 +08:00
2023-03-13 23:36:48 +08:00
import execution
2023-04-06 08:32:59 +08:00
import server
2025-07-11 02:46:19 +08:00
from protocol import BinaryEventTypes
2024-07-05 09:43:23 +08:00
import nodes
2023-06-14 01:36:47 +08:00
import comfy . model_management
2025-01-26 04:03:57 +08:00
import comfyui_version
2025-03-12 19:13:40 +08:00
import app . logger
2025-04-27 08:52:56 +08:00
import hook_breaker_ac10a0
2023-03-13 23:36:48 +08:00
2023-08-14 00:37:53 +08:00
def cuda_malloc_warning ( ) :
device = comfy . model_management . get_torch_device ( )
device_name = comfy . model_management . get_torch_device_name ( device )
cuda_malloc_warning = False
if " cudaMallocAsync " in device_name :
for b in cuda_malloc . blacklist :
if b in device_name :
cuda_malloc_warning = True
if cuda_malloc_warning :
2024-03-12 04:24:47 +08:00
logging . warning ( " \n WARNING: this card most likely does not support cuda-malloc, if you get \" CUDA error \" please run ComfyUI with: --disable-cuda-malloc \n " )
2023-08-14 00:37:53 +08:00
2024-12-24 19:38:52 +08:00
def prompt_worker ( q , server_instance ) :
2024-12-13 07:48:21 +08:00
current_time : float = 0.0
2025-04-11 19:16:52 +08:00
cache_type = execution . CacheType . CLASSIC
if args . cache_lru > 0 :
cache_type = execution . CacheType . LRU
2025-10-31 05:39:02 +08:00
elif args . cache_ram > 0 :
cache_type = execution . CacheType . RAM_PRESSURE
2025-04-11 19:16:52 +08:00
elif args . cache_none :
execution: fold in dependency aware caching / Fix --cache-none with loops/lazy etc (Resubmit) (#10440)
* execution: fold in dependency aware caching
This makes --cache-none compatiable with lazy and expanded
subgraphs.
Currently the --cache-none option is powered by the
DependencyAwareCache. The cache attempts to maintain a parallel
copy of the execution list data structure, however it is only
setup once at the start of execution and does not get meaninigful
updates to the execution list.
This causes multiple problems when --cache-none is used with lazy
and expanded subgraphs as the DAC does not accurately update its
copy of the execution data structure.
DAC has an attempt to handle subgraphs ensure_subcache however
this does not accurately connect to nodes outside the subgraph.
The current semantics of DAC are to free a node ASAP after the
dependent nodes are executed.
This means that if a subgraph refs such a node it will be requed
and re-executed by the execution_list but DAC wont see it in
its to-free lists anymore and leak memory.
Rather than try and cover all the cases where the execution list
changes from inside the cache, move the while problem to the
executor which maintains an always up-to-date copy of the wanted
data-structure.
The executor now has a fast-moving run-local cache of its own.
Each _to node has its own mini cache, and the cache is unconditionally
primed at the time of add_strong_link.
add_strong_link is called for all of static workflows, lazy links
and expanded subgraphs so its the singular source of truth for
output dependendencies.
In the case of a cache-hit, the executor cache will hold the non-none
value (it will respect updates if they happen somehow as well).
In the case of a cache-miss, the executor caches a None and will
wait for a notification to update the value when the node completes.
When a node completes execution, it simply releases its mini-cache
and in turn its strong refs on its direct anscestor outputs, allowing
for ASAP freeing (same as the DependencyAwareCache but a little more
automatic).
This now allows for re-implementation of --cache-none with no cache
at all. The dependency aware cache was also observing the dependency
sematics for the objects and UI cache which is not accurate (this
entire logic was always outputs specific).
This also prepares for more complex caching strategies (such as RAM
pressure based caching), where a cache can implement any freeing
strategy completely independently of the DepedancyAwareness
requirement.
* main: re-implement --cache-none as no cache at all
The execution list now tracks the dependency aware caching more
correctly that the DependancyAwareCache.
Change it to a cache that does nothing.
* test_execution: add --cache-none to the test suite
--cache-none is now expected to work universally. Run it through the
full unit test suite. Propagate the server parameterization for whether
or not the server is capabale of caching, so that the minority of tests
that specifically check for cache hits can if else. Hard assert NOT
caching in the else to give some coverage of --cache-none expected
behaviour to not acutally cache.
2025-10-23 03:49:05 +08:00
cache_type = execution . CacheType . NONE
2025-04-11 19:16:52 +08:00
2025-10-31 05:39:02 +08:00
e = execution . PromptExecutor ( server_instance , cache_type = cache_type , cache_args = { " lru " : args . cache_lru , " ram " : args . cache_ram } )
2023-11-29 03:20:56 +08:00
last_gc_collect = 0
2023-12-01 04:22:32 +08:00
need_gc = False
gc_collect_interval = 10.0
2023-01-03 14:53:32 +08:00
while True :
2024-01-05 03:28:11 +08:00
timeout = 1000.0
2023-12-01 04:22:32 +08:00
if need_gc :
timeout = max ( gc_collect_interval - ( current_time - last_gc_collect ) , 0.0 )
queue_item = q . get ( timeout = timeout )
if queue_item is not None :
item , item_id = queue_item
execution_start_time = time . perf_counter ( )
prompt_id = item [ 1 ]
2024-12-24 19:38:52 +08:00
server_instance . last_prompt_id = prompt_id
2024-01-02 03:27:56 +08:00
2025-10-28 15:23:52 +08:00
sensitive = item [ 5 ]
extra_data = item [ 3 ] . copy ( )
for k in sensitive :
extra_data [ k ] = sensitive [ k ]
e . execute ( item [ 2 ] , prompt_id , extra_data , item [ 4 ] )
2023-12-01 04:22:32 +08:00
need_gc = True
2025-10-28 15:23:52 +08:00
remove_sensitive = lambda prompt : prompt [ : 5 ] + prompt [ 6 : ]
2024-01-11 21:38:18 +08:00
q . task_done ( item_id ,
Execution Model Inversion (#2666)
* Execution Model Inversion
This PR inverts the execution model -- from recursively calling nodes to
using a topological sort of the nodes. This change allows for
modification of the node graph during execution. This allows for two
major advantages:
1. The implementation of lazy evaluation in nodes. For example, if a
"Mix Images" node has a mix factor of exactly 0.0, the second image
input doesn't even need to be evaluated (and visa-versa if the mix
factor is 1.0).
2. Dynamic expansion of nodes. This allows for the creation of dynamic
"node groups". Specifically, custom nodes can return subgraphs that
replace the original node in the graph. This is an incredibly
powerful concept. Using this functionality, it was easy to
implement:
a. Components (a.k.a. node groups)
b. Flow control (i.e. while loops) via tail recursion
c. All-in-one nodes that replicate the WebUI functionality
d. and more
All of those were able to be implemented entirely via custom nodes,
so those features are *not* a part of this PR. (There are some
front-end changes that should occur before that functionality is
made widely available, particularly around variant sockets.)
The custom nodes associated with this PR can be found at:
https://github.com/BadCafeCode/execution-inversion-demo-comfyui
Note that some of them require that variant socket types ("*") be
enabled.
* Allow `input_info` to be of type `None`
* Handle errors (like OOM) more gracefully
* Add a command-line argument to enable variants
This allows the use of nodes that have sockets of type '*' without
applying a patch to the code.
* Fix an overly aggressive assertion.
This could happen when attempting to evaluate `IS_CHANGED` for a node
during the creation of the cache (in order to create the cache key).
* Fix Pyright warnings
* Add execution model unit tests
* Fix issue with unused literals
Behavior should now match the master branch with regard to undeclared
inputs. Undeclared inputs that are socket connections will be used while
undeclared inputs that are literals will be ignored.
* Make custom VALIDATE_INPUTS skip normal validation
Additionally, if `VALIDATE_INPUTS` takes an argument named `input_types`,
that variable will be a dictionary of the socket type of all incoming
connections. If that argument exists, normal socket type validation will
not occur. This removes the last hurdle for enabling variant types
entirely from custom nodes, so I've removed that command-line option.
I've added appropriate unit tests for these changes.
* Fix example in unit test
This wouldn't have caused any issues in the unit test, but it would have
bugged the UI if someone copy+pasted it into their own node pack.
* Use fstrings instead of '%' formatting syntax
* Use custom exception types.
* Display an error for dependency cycles
Previously, dependency cycles that were created during node expansion
would cause the application to quit (due to an uncaught exception). Now,
we'll throw a proper error to the UI. We also make an attempt to 'blame'
the most relevant node in the UI.
* Add docs on when ExecutionBlocker should be used
* Remove unused functionality
* Rename ExecutionResult.SLEEPING to PENDING
* Remove superfluous function parameter
* Pass None for uneval inputs instead of default
This applies to `VALIDATE_INPUTS`, `check_lazy_status`, and lazy values
in evaluation functions.
* Add a test for mixed node expansion
This test ensures that a node that returns a combination of expanded
subgraphs and literal values functions correctly.
* Raise exception for bad get_node calls.
* Minor refactor of IsChangedCache.get
* Refactor `map_node_over_list` function
* Fix ui output for duplicated nodes
* Add documentation on `check_lazy_status`
* Add file for execution model unit tests
* Clean up Javascript code as per review
* Improve documentation
Converted some comments to docstrings as per review
* Add a new unit test for mixed lazy results
This test validates that when an output list is fed to a lazy node, the
node will properly evaluate previous nodes that are needed by any inputs
to the lazy node.
No code in the execution model has been changed. The test already
passes.
* Allow kwargs in VALIDATE_INPUTS functions
When kwargs are used, validation is skipped for all inputs as if they
had been mentioned explicitly.
* List cached nodes in `execution_cached` message
This was previously just bugged in this PR.
2024-08-15 23:21:11 +08:00
e . history_result ,
2024-01-11 21:38:18 +08:00
status = execution . PromptQueue . ExecutionStatus (
status_str = ' success ' if e . success else ' error ' ,
completed = e . success ,
2025-10-28 15:23:52 +08:00
messages = e . status_messages ) , process_item = remove_sensitive )
2024-12-24 19:38:52 +08:00
if server_instance . client_id is not None :
server_instance . send_sync ( " executing " , { " node " : None , " prompt_id " : prompt_id } , server_instance . client_id )
2023-12-01 04:22:32 +08:00
current_time = time . perf_counter ( )
execution_time = current_time - execution_start_time
2025-06-21 15:30:39 +08:00
2025-06-21 11:04:55 +08:00
# Log Time in a more readable way after 10 minutes
if execution_time > 600 :
execution_time = time . strftime ( " % H: % M: % S " , time . gmtime ( execution_time ) )
logging . info ( f " Prompt executed in { execution_time } " )
else :
logging . info ( " Prompt executed in {:.2f} seconds " . format ( execution_time ) )
2023-12-01 04:22:32 +08:00
2024-01-05 03:28:11 +08:00
flags = q . get_flags ( )
free_memory = flags . get ( " free_memory " , False )
if flags . get ( " unload_models " , free_memory ) :
comfy . model_management . unload_all_models ( )
need_gc = True
last_gc_collect = 0
if free_memory :
e . reset ( )
need_gc = True
last_gc_collect = 0
2023-12-01 04:22:32 +08:00
if need_gc :
current_time = time . perf_counter ( )
if ( current_time - last_gc_collect ) > gc_collect_interval :
gc . collect ( )
comfy . model_management . soft_empty_cache ( )
last_gc_collect = current_time
need_gc = False
2025-04-27 08:52:56 +08:00
hook_breaker_ac10a0 . restore_functions ( )
2023-06-07 21:15:38 +08:00
2024-12-24 19:38:52 +08:00
async def run ( server_instance , address = ' ' , port = 8188 , verbose = True , call_on_start = None ) :
2024-09-23 16:36:59 +08:00
addresses = [ ]
for addr in address . split ( " , " ) :
addresses . append ( ( addr , port ) )
2024-12-31 16:27:09 +08:00
await asyncio . gather (
server_instance . start_multi_address ( addresses , call_on_start , verbose ) , server_instance . publish_loop ( )
)
2023-01-03 14:53:32 +08:00
2024-12-24 19:38:52 +08:00
def hijack_progress ( server_instance ) :
2025-07-11 02:46:19 +08:00
def hook ( value , total , preview_image , prompt_id = None , node_id = None ) :
executing_context = get_executing_context ( )
if prompt_id is None and executing_context is not None :
prompt_id = executing_context . prompt_id
if node_id is None and executing_context is not None :
node_id = executing_context . node_id
2023-09-08 11:37:03 +08:00
comfy . model_management . throw_exception_if_processing_interrupted ( )
2025-07-11 02:46:19 +08:00
if prompt_id is None :
prompt_id = server_instance . last_prompt_id
if node_id is None :
node_id = server_instance . last_node_id
progress = { " value " : value , " max " : total , " prompt_id " : prompt_id , " node " : node_id }
get_progress_state ( ) . update_progress ( node_id , value , total , preview_image )
2024-01-02 03:27:56 +08:00
2024-12-24 19:38:52 +08:00
server_instance . send_sync ( " progress " , progress , server_instance . client_id )
2023-07-20 05:37:27 +08:00
if preview_image is not None :
2025-07-11 02:46:19 +08:00
# Only send old method if client doesn't support preview metadata
if not feature_flags . supports_feature (
server_instance . sockets_metadata ,
server_instance . client_id ,
" supports_preview_metadata " ,
) :
server_instance . send_sync (
BinaryEventTypes . UNENCODED_PREVIEW_IMAGE ,
preview_image ,
server_instance . client_id ,
)
2024-12-24 19:38:52 +08:00
2023-05-03 11:00:49 +08:00
comfy . utils . set_progress_bar_global_hook ( hook )
2023-01-03 14:53:32 +08:00
2023-06-07 21:15:38 +08:00
2023-03-14 03:34:05 +08:00
def cleanup_temp ( ) :
2023-08-11 17:00:25 +08:00
temp_dir = folder_paths . get_temp_directory ( )
2023-03-14 03:34:05 +08:00
if os . path . exists ( temp_dir ) :
2023-03-15 06:07:09 +08:00
shutil . rmtree ( temp_dir , ignore_errors = True )
2023-03-14 03:34:05 +08:00
2023-06-07 21:15:38 +08:00
2025-06-12 04:43:39 +08:00
def setup_database ( ) :
try :
from app . database . db import init_db , dependencies_available
if dependencies_available ( ) :
init_db ( )
except Exception as e :
logging . error ( f " Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: { e } " )
2024-12-24 19:38:52 +08:00
def start_comfyui ( asyncio_loop = None ) :
"""
Starts the ComfyUI server using the provided asyncio event loop or creates a new one .
Returns the event loop , server instance , and a function to start the server asynchronously .
"""
2023-08-11 17:00:25 +08:00
if args . temp_directory :
temp_dir = os . path . join ( os . path . abspath ( args . temp_directory ) , " temp " )
2024-03-12 04:24:47 +08:00
logging . info ( f " Setting temp directory to: { temp_dir } " )
2023-08-11 17:00:25 +08:00
folder_paths . set_temp_directory ( temp_dir )
2023-03-14 03:34:05 +08:00
cleanup_temp ( )
2024-02-27 02:32:14 +08:00
if args . windows_standalone_build :
try :
import new_updater
new_updater . update_windows_updater ( )
except :
pass
2024-12-24 19:38:52 +08:00
if not asyncio_loop :
asyncio_loop = asyncio . new_event_loop ( )
asyncio . set_event_loop ( asyncio_loop )
prompt_server = server . PromptServer ( asyncio_loop )
2023-02-12 23:53:48 +08:00
2025-04-27 08:52:56 +08:00
hook_breaker_ac10a0 . save_functions ( )
2025-07-30 10:17:22 +08:00
asyncio_loop . run_until_complete ( nodes . init_extra_nodes (
2025-06-29 03:53:40 +08:00
init_custom_nodes = ( not args . disable_all_custom_nodes ) or len ( args . whitelist_custom_nodes ) > 0 ,
2025-06-29 03:24:02 +08:00
init_api_nodes = not args . disable_api_nodes
2025-07-30 10:17:22 +08:00
) )
2025-04-27 08:52:56 +08:00
hook_breaker_ac10a0 . restore_functions ( )
2023-08-14 00:37:53 +08:00
cuda_malloc_warning ( )
2025-06-12 04:43:39 +08:00
setup_database ( )
2023-08-14 00:37:53 +08:00
2024-12-24 19:38:52 +08:00
prompt_server . add_routes ( )
hijack_progress ( prompt_server )
2023-02-22 03:29:49 +08:00
2025-05-21 17:14:17 +08:00
threading . Thread ( target = prompt_worker , daemon = True , args = ( prompt_server . prompt_queue , prompt_server , ) ) . start ( )
2023-02-08 10:57:17 +08:00
2023-04-06 08:32:59 +08:00
if args . quick_test_for_ci :
2023-03-15 11:02:57 +08:00
exit ( 0 )
2024-09-18 21:39:43 +08:00
os . makedirs ( folder_paths . get_temp_directory ( ) , exist_ok = True )
2023-03-13 03:44:16 +08:00
call_on_start = None
2023-05-07 04:59:40 +08:00
if args . auto_launch :
2024-05-01 08:17:02 +08:00
def startup_server ( scheme , address , port ) :
2023-03-13 03:44:16 +08:00
import webbrowser
2023-08-01 13:14:17 +08:00
if os . name == ' nt ' and address == ' 0.0.0.0 ' :
address = ' 127.0.0.1 '
2024-09-23 16:36:59 +08:00
if ' : ' in address :
address = " [ {} ] " . format ( address )
2024-05-01 08:17:02 +08:00
webbrowser . open ( f " { scheme } :// { address } : { port } " )
2023-03-13 03:44:16 +08:00
call_on_start = startup_server
2024-12-24 19:38:52 +08:00
async def start_all ( ) :
await prompt_server . setup ( )
await run ( prompt_server , address = args . listen , port = args . port , verbose = not args . dont_print_server , call_on_start = call_on_start )
# Returning these so that other code can integrate with the ComfyUI loop and server
return asyncio_loop , prompt_server , start_all
if __name__ == " __main__ " :
# Running directly, just start ComfyUI.
2025-04-13 06:58:20 +08:00
logging . info ( " Python version: {} " . format ( sys . version ) )
2025-01-26 04:03:57 +08:00
logging . info ( " ComfyUI version: {} " . format ( comfyui_version . __version__ ) )
2025-03-08 08:53:07 +08:00
2025-06-13 03:38:33 +08:00
if sys . version_info . major == 3 and sys . version_info . minor < 10 :
logging . warning ( " WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended. " )
2024-12-24 19:38:52 +08:00
event_loop , _ , start_all_func = start_comfyui ( )
2023-06-07 21:15:38 +08:00
try :
2025-03-08 16:51:36 +08:00
x = start_all_func ( )
2025-03-12 19:13:40 +08:00
app . logger . print_startup_warnings ( )
2025-03-08 16:51:36 +08:00
event_loop . run_until_complete ( x )
2023-06-07 21:15:38 +08:00
except KeyboardInterrupt :
2024-03-12 04:24:47 +08:00
logging . info ( " \n Stopped server " )
2023-01-03 14:53:32 +08:00
2023-03-14 03:34:05 +08:00
cleanup_temp ( )