feat(api-nodes): network client v2: async ops, cancellation, downloads, refactor (#10390)

* feat(api-nodes): implement new API client for V3 nodes

* feat(api-nodes): implement new API client for V3 nodes

* feat(api-nodes): implement new API client for V3 nodes

* converted WAN nodes to use new client; polishing

* fix(auth): do not leak authentification for the absolute urls

* convert BFL API nodes to use new API client; remove deprecated BFL nodes

* converted Google Veo nodes

* fix(Veo3.1 model): take into account "generate_audio" parameter
This commit is contained in:
Alexander Piskun
2025-10-24 08:37:16 +03:00
committed by GitHub
parent 24188b3141
commit 388b306a2b
29 changed files with 2935 additions and 2298 deletions

View File

@@ -1,136 +1,43 @@
import asyncio
import io
from inspect import cleandoc
from typing import Union, Optional
from typing import Optional
import torch
from typing_extensions import override
from comfy_api.latest import ComfyExtension, IO
from comfy_api.latest import IO, ComfyExtension
from comfy_api_nodes.apinode_utils import (
resize_mask_to_image,
validate_aspect_ratio,
)
from comfy_api_nodes.apis.bfl_api import (
BFLStatus,
BFLFluxExpandImageRequest,
BFLFluxFillImageRequest,
BFLFluxCannyImageRequest,
BFLFluxDepthImageRequest,
BFLFluxProGenerateRequest,
BFLFluxKontextProGenerateRequest,
BFLFluxProUltraGenerateRequest,
BFLFluxProGenerateRequest,
BFLFluxProGenerateResponse,
BFLFluxProUltraGenerateRequest,
BFLFluxStatusResponse,
BFLStatus,
)
from comfy_api_nodes.apis.client import (
from comfy_api_nodes.util import (
ApiEndpoint,
HttpMethod,
SynchronousOperation,
)
from comfy_api_nodes.apinode_utils import (
downscale_image_tensor,
validate_aspect_ratio,
process_image_response,
resize_mask_to_image,
download_url_to_image_tensor,
poll_op,
sync_op,
tensor_to_base64_string,
validate_string,
)
import numpy as np
from PIL import Image
import aiohttp
import torch
import base64
import time
from server import PromptServer
def convert_mask_to_image(mask: torch.Tensor):
"""
Make mask have the expected amount of dims (4) and channels (3) to be recognized as an image.
"""
mask = mask.unsqueeze(-1)
mask = torch.cat([mask]*3, dim=-1)
mask = torch.cat([mask] * 3, dim=-1)
return mask
async def handle_bfl_synchronous_operation(
operation: SynchronousOperation,
timeout_bfl_calls=360,
node_id: Union[str, None] = None,
):
response_api: BFLFluxProGenerateResponse = await operation.execute()
return await _poll_until_generated(
response_api.polling_url, timeout=timeout_bfl_calls, node_id=node_id
)
async def _poll_until_generated(
polling_url: str, timeout=360, node_id: Union[str, None] = None
):
# used bfl-comfy-nodes to verify code implementation:
# https://github.com/black-forest-labs/bfl-comfy-nodes/tree/main
start_time = time.time()
retries_404 = 0
max_retries_404 = 5
retry_404_seconds = 2
retry_202_seconds = 2
retry_pending_seconds = 1
async with aiohttp.ClientSession() as session:
# NOTE: should True loop be replaced with checking if workflow has been interrupted?
while True:
if node_id:
time_elapsed = time.time() - start_time
PromptServer.instance.send_progress_text(
f"Generating ({time_elapsed:.0f}s)", node_id
)
async with session.get(polling_url) as response:
if response.status == 200:
result = await response.json()
if result["status"] == BFLStatus.ready:
img_url = result["result"]["sample"]
if node_id:
PromptServer.instance.send_progress_text(
f"Result URL: {img_url}", node_id
)
async with session.get(img_url) as img_resp:
return process_image_response(await img_resp.content.read())
elif result["status"] in [
BFLStatus.request_moderated,
BFLStatus.content_moderated,
]:
status = result["status"]
raise Exception(
f"BFL API did not return an image due to: {status}."
)
elif result["status"] == BFLStatus.error:
raise Exception(f"BFL API encountered an error: {result}.")
elif result["status"] == BFLStatus.pending:
await asyncio.sleep(retry_pending_seconds)
continue
elif response.status == 404:
if retries_404 < max_retries_404:
retries_404 += 1
await asyncio.sleep(retry_404_seconds)
continue
raise Exception(
f"BFL API could not find task after {max_retries_404} tries."
)
elif response.status == 202:
await asyncio.sleep(retry_202_seconds)
elif time.time() - start_time > timeout:
raise Exception(
f"BFL API experienced a timeout; could not return request under {timeout} seconds."
)
else:
raise Exception(f"BFL API encountered an error: {response.json()}")
def convert_image_to_base64(image: torch.Tensor):
scaled_image = downscale_image_tensor(image, total_pixels=2048 * 2048)
# remove batch dimension if present
if len(scaled_image.shape) > 3:
scaled_image = scaled_image[0]
image_np = (scaled_image.numpy() * 255).astype(np.uint8)
img = Image.fromarray(image_np)
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format="PNG")
return base64.b64encode(img_byte_arr.getvalue()).decode()
class FluxProUltraImageNode(IO.ComfyNode):
"""
Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.
@@ -158,7 +65,9 @@ class FluxProUltraImageNode(IO.ComfyNode):
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
tooltip="Whether to perform upsampling on the prompt. "
"If active, automatically modifies the prompt for more creative generation, "
"but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Int.Input(
"seed",
@@ -220,22 +129,19 @@ class FluxProUltraImageNode(IO.ComfyNode):
cls,
prompt: str,
aspect_ratio: str,
prompt_upsampling=False,
raw=False,
seed=0,
image_prompt=None,
image_prompt_strength=0.1,
prompt_upsampling: bool = False,
raw: bool = False,
seed: int = 0,
image_prompt: Optional[torch.Tensor] = None,
image_prompt_strength: float = 0.1,
) -> IO.NodeOutput:
if image_prompt is None:
validate_string(prompt, strip_whitespace=False)
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path="/proxy/bfl/flux-pro-1.1-ultra/generate",
method=HttpMethod.POST,
request_model=BFLFluxProUltraGenerateRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxProUltraGenerateRequest(
initial_response = await sync_op(
cls,
ApiEndpoint(path="/proxy/bfl/flux-pro-1.1-ultra/generate", method="POST"),
response_model=BFLFluxProGenerateResponse,
data=BFLFluxProUltraGenerateRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
seed=seed,
@@ -247,22 +153,26 @@ class FluxProUltraImageNode(IO.ComfyNode):
maximum_ratio_str=cls.MAXIMUM_RATIO_STR,
),
raw=raw,
image_prompt=(
image_prompt
if image_prompt is None
else convert_image_to_base64(image_prompt)
),
image_prompt_strength=(
None if image_prompt is None else round(image_prompt_strength, 2)
),
image_prompt=(image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)),
image_prompt_strength=(None if image_prompt is None else round(image_prompt_strength, 2)),
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
queued_statuses=[],
)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class FluxKontextProImageNode(IO.ComfyNode):
@@ -347,7 +257,7 @@ class FluxKontextProImageNode(IO.ComfyNode):
aspect_ratio: str,
guidance: float,
steps: int,
input_image: Optional[torch.Tensor]=None,
input_image: Optional[torch.Tensor] = None,
seed=0,
prompt_upsampling=False,
) -> IO.NodeOutput:
@@ -360,33 +270,36 @@ class FluxKontextProImageNode(IO.ComfyNode):
)
if input_image is None:
validate_string(prompt, strip_whitespace=False)
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path=cls.BFL_PATH,
method=HttpMethod.POST,
request_model=BFLFluxKontextProGenerateRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxKontextProGenerateRequest(
initial_response = await sync_op(
cls,
ApiEndpoint(path=cls.BFL_PATH, method="POST"),
response_model=BFLFluxProGenerateResponse,
data=BFLFluxKontextProGenerateRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
guidance=round(guidance, 1),
steps=steps,
seed=seed,
aspect_ratio=aspect_ratio,
input_image=(
input_image
if input_image is None
else convert_image_to_base64(input_image)
)
input_image=(input_image if input_image is None else tensor_to_base64_string(input_image)),
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
queued_statuses=[],
)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class FluxKontextMaxImageNode(FluxKontextProImageNode):
@@ -422,7 +335,9 @@ class FluxProImageNode(IO.ComfyNode):
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
tooltip="Whether to perform upsampling on the prompt. "
"If active, automatically modifies the prompt for more creative generation, "
"but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Int.Input(
"width",
@@ -481,20 +396,15 @@ class FluxProImageNode(IO.ComfyNode):
image_prompt=None,
# image_prompt_strength=0.1,
) -> IO.NodeOutput:
image_prompt = (
image_prompt
if image_prompt is None
else convert_image_to_base64(image_prompt)
)
operation = SynchronousOperation(
endpoint=ApiEndpoint(
image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/bfl/flux-pro-1.1/generate",
method=HttpMethod.POST,
request_model=BFLFluxProGenerateRequest,
response_model=BFLFluxProGenerateResponse,
method="POST",
),
request=BFLFluxProGenerateRequest(
response_model=BFLFluxProGenerateResponse,
data=BFLFluxProGenerateRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
width=width,
@@ -502,13 +412,23 @@ class FluxProImageNode(IO.ComfyNode):
seed=seed,
image_prompt=image_prompt,
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
queued_statuses=[],
)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class FluxProExpandNode(IO.ComfyNode):
@@ -534,7 +454,9 @@ class FluxProExpandNode(IO.ComfyNode):
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
tooltip="Whether to perform upsampling on the prompt. "
"If active, automatically modifies the prompt for more creative generation, "
"but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Int.Input(
"top",
@@ -610,16 +532,11 @@ class FluxProExpandNode(IO.ComfyNode):
guidance: float,
seed=0,
) -> IO.NodeOutput:
image = convert_image_to_base64(image)
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path="/proxy/bfl/flux-pro-1.0-expand/generate",
method=HttpMethod.POST,
request_model=BFLFluxExpandImageRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxExpandImageRequest(
initial_response = await sync_op(
cls,
ApiEndpoint(path="/proxy/bfl/flux-pro-1.0-expand/generate", method="POST"),
response_model=BFLFluxProGenerateResponse,
data=BFLFluxExpandImageRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
top=top,
@@ -629,16 +546,25 @@ class FluxProExpandNode(IO.ComfyNode):
steps=steps,
guidance=guidance,
seed=seed,
image=image,
image=tensor_to_base64_string(image),
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
queued_statuses=[],
)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class FluxProFillNode(IO.ComfyNode):
@@ -665,7 +591,9 @@ class FluxProFillNode(IO.ComfyNode):
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
tooltip="Whether to perform upsampling on the prompt. "
"If active, automatically modifies the prompt for more creative generation, "
"but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Float.Input(
"guidance",
@@ -712,272 +640,37 @@ class FluxProFillNode(IO.ComfyNode):
) -> IO.NodeOutput:
# prepare mask
mask = resize_mask_to_image(mask, image)
mask = convert_image_to_base64(convert_mask_to_image(mask))
# make sure image will have alpha channel removed
image = convert_image_to_base64(image[:, :, :, :3])
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path="/proxy/bfl/flux-pro-1.0-fill/generate",
method=HttpMethod.POST,
request_model=BFLFluxFillImageRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxFillImageRequest(
mask = tensor_to_base64_string(convert_mask_to_image(mask))
initial_response = await sync_op(
cls,
ApiEndpoint(path="/proxy/bfl/flux-pro-1.0-fill/generate", method="POST"),
response_model=BFLFluxProGenerateResponse,
data=BFLFluxFillImageRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
steps=steps,
guidance=guidance,
seed=seed,
image=image,
image=tensor_to_base64_string(image[:, :, :, :3]), # make sure image will have alpha channel removed
mask=mask,
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
class FluxProCannyNode(IO.ComfyNode):
"""
Generate image using a control image (canny).
"""
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="FluxProCannyNode",
display_name="Flux.1 Canny Control Image",
category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""),
inputs=[
IO.Image.Input("control_image"),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the image generation",
),
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Float.Input(
"canny_low_threshold",
default=0.1,
min=0.01,
max=0.99,
step=0.01,
tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True",
),
IO.Float.Input(
"canny_high_threshold",
default=0.4,
min=0.01,
max=0.99,
step=0.01,
tooltip="High threshold for Canny edge detection; ignored if skip_processing is True",
),
IO.Boolean.Input(
"skip_preprocessing",
default=False,
tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.",
),
IO.Float.Input(
"guidance",
default=30,
min=1,
max=100,
tooltip="Guidance strength for the image generation process",
),
IO.Int.Input(
"steps",
default=50,
min=15,
max=50,
tooltip="Number of steps for the image generation process",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="The random seed used for creating the noise.",
),
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
outputs=[IO.Image.Output()],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
queued_statuses=[],
)
@classmethod
async def execute(
cls,
control_image: torch.Tensor,
prompt: str,
prompt_upsampling: bool,
canny_low_threshold: float,
canny_high_threshold: float,
skip_preprocessing: bool,
steps: int,
guidance: float,
seed=0,
) -> IO.NodeOutput:
control_image = convert_image_to_base64(control_image[:, :, :, :3])
preprocessed_image = None
# scale canny threshold between 0-500, to match BFL's API
def scale_value(value: float, min_val=0, max_val=500):
return min_val + value * (max_val - min_val)
canny_low_threshold = int(round(scale_value(canny_low_threshold)))
canny_high_threshold = int(round(scale_value(canny_high_threshold)))
if skip_preprocessing:
preprocessed_image = control_image
control_image = None
canny_low_threshold = None
canny_high_threshold = None
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path="/proxy/bfl/flux-pro-1.0-canny/generate",
method=HttpMethod.POST,
request_model=BFLFluxCannyImageRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxCannyImageRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
steps=steps,
guidance=guidance,
seed=seed,
control_image=control_image,
canny_low_threshold=canny_low_threshold,
canny_high_threshold=canny_high_threshold,
preprocessed_image=preprocessed_image,
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
class FluxProDepthNode(IO.ComfyNode):
"""
Generate image using a control image (depth).
"""
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="FluxProDepthNode",
display_name="Flux.1 Depth Control Image",
category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""),
inputs=[
IO.Image.Input("control_image"),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the image generation",
),
IO.Boolean.Input(
"prompt_upsampling",
default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
),
IO.Boolean.Input(
"skip_preprocessing",
default=False,
tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.",
),
IO.Float.Input(
"guidance",
default=15,
min=1,
max=100,
tooltip="Guidance strength for the image generation process",
),
IO.Int.Input(
"steps",
default=50,
min=15,
max=50,
tooltip="Number of steps for the image generation process",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="The random seed used for creating the noise.",
),
],
outputs=[IO.Image.Output()],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod
async def execute(
cls,
control_image: torch.Tensor,
prompt: str,
prompt_upsampling: bool,
skip_preprocessing: bool,
steps: int,
guidance: float,
seed=0,
) -> IO.NodeOutput:
control_image = convert_image_to_base64(control_image[:,:,:,:3])
preprocessed_image = None
if skip_preprocessing:
preprocessed_image = control_image
control_image = None
operation = SynchronousOperation(
endpoint=ApiEndpoint(
path="/proxy/bfl/flux-pro-1.0-depth/generate",
method=HttpMethod.POST,
request_model=BFLFluxDepthImageRequest,
response_model=BFLFluxProGenerateResponse,
),
request=BFLFluxDepthImageRequest(
prompt=prompt,
prompt_upsampling=prompt_upsampling,
steps=steps,
guidance=guidance,
seed=seed,
control_image=control_image,
preprocessed_image=preprocessed_image,
),
auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
)
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return IO.NodeOutput(output_image)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class BFLExtension(ComfyExtension):
@@ -990,8 +683,6 @@ class BFLExtension(ComfyExtension):
FluxKontextMaxImageNode,
FluxProExpandNode,
FluxProFillNode,
FluxProCannyNode,
FluxProDepthNode,
]