Spaces:
Running
on
Zero
Running
on
Zero
add deps
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gradio/certificate.pem +31 -0
- __init__.py +0 -0
- app.py +29 -15
- custom_nodes/ComfyUI-KJNodes-main/.gitignore +11 -0
- custom_nodes/ComfyUI-KJNodes-main/LICENSE +674 -0
- custom_nodes/ComfyUI-KJNodes-main/README.md +65 -0
- custom_nodes/ComfyUI-KJNodes-main/__init__.py +227 -0
- custom_nodes/ComfyUI-KJNodes-main/config.json +3 -0
- custom_nodes/ComfyUI-KJNodes-main/custom_dimensions_example.json +22 -0
- custom_nodes/ComfyUI-KJNodes-main/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png +0 -0
- custom_nodes/ComfyUI-KJNodes-main/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png +0 -0
- custom_nodes/ComfyUI-KJNodes-main/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json +1188 -0
- custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors +3 -0
- custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors +3 -0
- custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors +3 -0
- custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors +3 -0
- custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_loras.txt +4 -0
- custom_nodes/ComfyUI-KJNodes-main/kjweb_async/marked.min.js +6 -0
- custom_nodes/ComfyUI-KJNodes-main/kjweb_async/protovis.min.js +0 -0
- custom_nodes/ComfyUI-KJNodes-main/kjweb_async/purify.min.js +3 -0
- custom_nodes/ComfyUI-KJNodes-main/kjweb_async/svg-path-properties.min.js +2 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/audioscheduler_nodes.py +251 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/batchcrop_nodes.py +757 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/curve_nodes.py +1561 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/image_nodes.py +0 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/intrinsic_lora_nodes.py +115 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/mask_nodes.py +1397 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/model_optimization_nodes.py +1179 -0
- custom_nodes/ComfyUI-KJNodes-main/nodes/nodes.py +0 -0
- custom_nodes/ComfyUI-KJNodes-main/pyproject.toml +15 -0
- custom_nodes/ComfyUI-KJNodes-main/requirements.txt +7 -0
- custom_nodes/ComfyUI-KJNodes-main/utility/fluid.py +67 -0
- custom_nodes/ComfyUI-KJNodes-main/utility/magictex.py +95 -0
- custom_nodes/ComfyUI-KJNodes-main/utility/numerical.py +25 -0
- custom_nodes/ComfyUI-KJNodes-main/utility/utility.py +39 -0
- custom_nodes/ComfyUI-KJNodes-main/web/green.png +0 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/appearance.js +23 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/browserstatus.js +55 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/contextmenu.js +147 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/fast_preview.js +95 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/help_popup.js +326 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/jsnodes.js +374 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/point_editor.js +736 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/setgetnodes.js +564 -0
- custom_nodes/ComfyUI-KJNodes-main/web/js/spline_editor.js +866 -0
- custom_nodes/ComfyUI-KJNodes-main/web/red.png +0 -0
- custom_nodes/ComfyUI-essentials-main/.gitignore +6 -0
- custom_nodes/ComfyUI-essentials-main/LICENSE +21 -0
- custom_nodes/ComfyUI-essentials-main/README.md +49 -0
- custom_nodes/ComfyUI-essentials-main/__init__.py +36 -0
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
__init__.py
ADDED
File without changes
|
app.py
CHANGED
@@ -1,15 +1,32 @@
|
|
1 |
import os
|
2 |
import sys
|
|
|
|
|
|
|
3 |
from typing import Any, Mapping, Sequence, Union
|
4 |
|
5 |
import gradio as gr
|
|
|
6 |
import torch
|
7 |
from huggingface_hub import hf_hub_download
|
|
|
8 |
from nodes import NODE_CLASS_MAPPINGS
|
9 |
-
from comfy import model_management
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
|
15 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
@@ -76,11 +93,8 @@ def add_extra_model_paths() -> None:
|
|
76 |
"""
|
77 |
Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
|
78 |
"""
|
79 |
-
|
80 |
-
|
81 |
-
except ImportError:
|
82 |
-
print("Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead.")
|
83 |
-
from utils.extra_config import load_extra_path_config
|
84 |
extra_model_paths = find_path("extra_model_paths.yaml")
|
85 |
|
86 |
if extra_model_paths is not None:
|
@@ -100,9 +114,11 @@ def import_custom_nodes() -> None:
|
|
100 |
creates a PromptQueue, and initializes the custom nodes.
|
101 |
"""
|
102 |
import asyncio
|
|
|
103 |
import execution
|
104 |
-
from nodes import init_extra_nodes
|
105 |
import server
|
|
|
|
|
106 |
# Creating a new event loop and setting it as the default loop
|
107 |
loop = asyncio.new_event_loop()
|
108 |
asyncio.set_event_loop(loop)
|
@@ -115,6 +131,7 @@ def import_custom_nodes() -> None:
|
|
115 |
init_extra_nodes()
|
116 |
|
117 |
|
|
|
118 |
def advance_blur(input_image):
|
119 |
import_custom_nodes()
|
120 |
with torch.inference_mode():
|
@@ -136,7 +153,7 @@ def advance_blur(input_image):
|
|
136 |
|
137 |
upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
|
138 |
upscale_model = upscalemodelloader.load_model(
|
139 |
-
model_name="4x_NMKD-Siax_200k.pth"
|
140 |
)
|
141 |
|
142 |
reactorbuildfacemodel = NODE_CLASS_MAPPINGS["ReActorBuildFaceModel"]()
|
@@ -214,7 +231,7 @@ if __name__ == "__main__":
|
|
214 |
with gr.Column():
|
215 |
input_image = gr.Image(label="Input Image", type="filepath")
|
216 |
generate_btn = gr.Button("Generate")
|
217 |
-
|
218 |
with gr.Column():
|
219 |
# The output image
|
220 |
output_image = gr.Image(label="Generated Image")
|
@@ -222,9 +239,6 @@ if __name__ == "__main__":
|
|
222 |
# When clicking the button, it will trigger the `generate_image` function, with the respective inputs
|
223 |
# and the output an image
|
224 |
generate_btn.click(
|
225 |
-
fn=advance_blur,
|
226 |
-
inputs=[input_image],
|
227 |
-
outputs=[output_image]
|
228 |
)
|
229 |
app.launch(share=True)
|
230 |
-
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
+
|
4 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
5 |
+
|
6 |
from typing import Any, Mapping, Sequence, Union
|
7 |
|
8 |
import gradio as gr
|
9 |
+
import spaces
|
10 |
import torch
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
+
|
13 |
from nodes import NODE_CLASS_MAPPINGS
|
|
|
14 |
|
15 |
+
hf_hub_download(
|
16 |
+
repo_id="uwg/upscaler",
|
17 |
+
filename="ESRGAN/4x_NMKD-Siax_200k.pth",
|
18 |
+
local_dir="models/upscale_models",
|
19 |
+
)
|
20 |
+
hf_hub_download(
|
21 |
+
repo_id="ezioruan/inswapper_128.onnx",
|
22 |
+
filename="inswapper_128.onnx",
|
23 |
+
local_dir="models/insightface",
|
24 |
+
)
|
25 |
+
hf_hub_download(
|
26 |
+
repo_id="ziixzz/codeformer-v0.1.0.pth",
|
27 |
+
filename="codeformer-v0.1.0.pth",
|
28 |
+
local_dir="models/facerestore_models",
|
29 |
+
)
|
30 |
|
31 |
|
32 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
|
|
93 |
"""
|
94 |
Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
|
95 |
"""
|
96 |
+
from utils.extra_config import load_extra_path_config
|
97 |
+
|
|
|
|
|
|
|
98 |
extra_model_paths = find_path("extra_model_paths.yaml")
|
99 |
|
100 |
if extra_model_paths is not None:
|
|
|
114 |
creates a PromptQueue, and initializes the custom nodes.
|
115 |
"""
|
116 |
import asyncio
|
117 |
+
|
118 |
import execution
|
|
|
119 |
import server
|
120 |
+
from nodes import init_extra_nodes
|
121 |
+
|
122 |
# Creating a new event loop and setting it as the default loop
|
123 |
loop = asyncio.new_event_loop()
|
124 |
asyncio.set_event_loop(loop)
|
|
|
131 |
init_extra_nodes()
|
132 |
|
133 |
|
134 |
+
@spaces.GPU(duration=360)
|
135 |
def advance_blur(input_image):
|
136 |
import_custom_nodes()
|
137 |
with torch.inference_mode():
|
|
|
153 |
|
154 |
upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
|
155 |
upscale_model = upscalemodelloader.load_model(
|
156 |
+
model_name="ESRGAN/4x_NMKD-Siax_200k.pth"
|
157 |
)
|
158 |
|
159 |
reactorbuildfacemodel = NODE_CLASS_MAPPINGS["ReActorBuildFaceModel"]()
|
|
|
231 |
with gr.Column():
|
232 |
input_image = gr.Image(label="Input Image", type="filepath")
|
233 |
generate_btn = gr.Button("Generate")
|
234 |
+
|
235 |
with gr.Column():
|
236 |
# The output image
|
237 |
output_image = gr.Image(label="Generated Image")
|
|
|
239 |
# When clicking the button, it will trigger the `generate_image` function, with the respective inputs
|
240 |
# and the output an image
|
241 |
generate_btn.click(
|
242 |
+
fn=advance_blur, inputs=[input_image], outputs=[output_image]
|
|
|
|
|
243 |
)
|
244 |
app.launch(share=True)
|
|
custom_nodes/ComfyUI-KJNodes-main/.gitignore
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
/venv
|
3 |
+
*.code-workspace
|
4 |
+
.history
|
5 |
+
.vscode
|
6 |
+
*.ckpt
|
7 |
+
*.pth
|
8 |
+
types
|
9 |
+
models
|
10 |
+
jsconfig.json
|
11 |
+
custom_dimensions.json
|
custom_nodes/ComfyUI-KJNodes-main/LICENSE
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 29 June 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works.
|
12 |
+
|
13 |
+
The licenses for most software and other practical works are designed
|
14 |
+
to take away your freedom to share and change the works. By contrast,
|
15 |
+
the GNU General Public License is intended to guarantee your freedom to
|
16 |
+
share and change all versions of a program--to make sure it remains free
|
17 |
+
software for all its users. We, the Free Software Foundation, use the
|
18 |
+
GNU General Public License for most of our software; it applies also to
|
19 |
+
any other work released this way by its authors. You can apply it to
|
20 |
+
your programs, too.
|
21 |
+
|
22 |
+
When we speak of free software, we are referring to freedom, not
|
23 |
+
price. Our General Public Licenses are designed to make sure that you
|
24 |
+
have the freedom to distribute copies of free software (and charge for
|
25 |
+
them if you wish), that you receive source code or can get it if you
|
26 |
+
want it, that you can change the software or use pieces of it in new
|
27 |
+
free programs, and that you know you can do these things.
|
28 |
+
|
29 |
+
To protect your rights, we need to prevent others from denying you
|
30 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
31 |
+
certain responsibilities if you distribute copies of the software, or if
|
32 |
+
you modify it: responsibilities to respect the freedom of others.
|
33 |
+
|
34 |
+
For example, if you distribute copies of such a program, whether
|
35 |
+
gratis or for a fee, you must pass on to the recipients the same
|
36 |
+
freedoms that you received. You must make sure that they, too, receive
|
37 |
+
or can get the source code. And you must show them these terms so they
|
38 |
+
know their rights.
|
39 |
+
|
40 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
41 |
+
(1) assert copyright on the software, and (2) offer you this License
|
42 |
+
giving you legal permission to copy, distribute and/or modify it.
|
43 |
+
|
44 |
+
For the developers' and authors' protection, the GPL clearly explains
|
45 |
+
that there is no warranty for this free software. For both users' and
|
46 |
+
authors' sake, the GPL requires that modified versions be marked as
|
47 |
+
changed, so that their problems will not be attributed erroneously to
|
48 |
+
authors of previous versions.
|
49 |
+
|
50 |
+
Some devices are designed to deny users access to install or run
|
51 |
+
modified versions of the software inside them, although the manufacturer
|
52 |
+
can do so. This is fundamentally incompatible with the aim of
|
53 |
+
protecting users' freedom to change the software. The systematic
|
54 |
+
pattern of such abuse occurs in the area of products for individuals to
|
55 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
56 |
+
have designed this version of the GPL to prohibit the practice for those
|
57 |
+
products. If such problems arise substantially in other domains, we
|
58 |
+
stand ready to extend this provision to those domains in future versions
|
59 |
+
of the GPL, as needed to protect the freedom of users.
|
60 |
+
|
61 |
+
Finally, every program is threatened constantly by software patents.
|
62 |
+
States should not allow patents to restrict development and use of
|
63 |
+
software on general-purpose computers, but in those that do, we wish to
|
64 |
+
avoid the special danger that patents applied to a free program could
|
65 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
66 |
+
patents cannot be used to render the program non-free.
|
67 |
+
|
68 |
+
The precise terms and conditions for copying, distribution and
|
69 |
+
modification follow.
|
70 |
+
|
71 |
+
TERMS AND CONDITIONS
|
72 |
+
|
73 |
+
0. Definitions.
|
74 |
+
|
75 |
+
"This License" refers to version 3 of the GNU General Public License.
|
76 |
+
|
77 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
78 |
+
works, such as semiconductor masks.
|
79 |
+
|
80 |
+
"The Program" refers to any copyrightable work licensed under this
|
81 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
82 |
+
"recipients" may be individuals or organizations.
|
83 |
+
|
84 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
85 |
+
in a fashion requiring copyright permission, other than the making of an
|
86 |
+
exact copy. The resulting work is called a "modified version" of the
|
87 |
+
earlier work or a work "based on" the earlier work.
|
88 |
+
|
89 |
+
A "covered work" means either the unmodified Program or a work based
|
90 |
+
on the Program.
|
91 |
+
|
92 |
+
To "propagate" a work means to do anything with it that, without
|
93 |
+
permission, would make you directly or secondarily liable for
|
94 |
+
infringement under applicable copyright law, except executing it on a
|
95 |
+
computer or modifying a private copy. Propagation includes copying,
|
96 |
+
distribution (with or without modification), making available to the
|
97 |
+
public, and in some countries other activities as well.
|
98 |
+
|
99 |
+
To "convey" a work means any kind of propagation that enables other
|
100 |
+
parties to make or receive copies. Mere interaction with a user through
|
101 |
+
a computer network, with no transfer of a copy, is not conveying.
|
102 |
+
|
103 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
104 |
+
to the extent that it includes a convenient and prominently visible
|
105 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
106 |
+
tells the user that there is no warranty for the work (except to the
|
107 |
+
extent that warranties are provided), that licensees may convey the
|
108 |
+
work under this License, and how to view a copy of this License. If
|
109 |
+
the interface presents a list of user commands or options, such as a
|
110 |
+
menu, a prominent item in the list meets this criterion.
|
111 |
+
|
112 |
+
1. Source Code.
|
113 |
+
|
114 |
+
The "source code" for a work means the preferred form of the work
|
115 |
+
for making modifications to it. "Object code" means any non-source
|
116 |
+
form of a work.
|
117 |
+
|
118 |
+
A "Standard Interface" means an interface that either is an official
|
119 |
+
standard defined by a recognized standards body, or, in the case of
|
120 |
+
interfaces specified for a particular programming language, one that
|
121 |
+
is widely used among developers working in that language.
|
122 |
+
|
123 |
+
The "System Libraries" of an executable work include anything, other
|
124 |
+
than the work as a whole, that (a) is included in the normal form of
|
125 |
+
packaging a Major Component, but which is not part of that Major
|
126 |
+
Component, and (b) serves only to enable use of the work with that
|
127 |
+
Major Component, or to implement a Standard Interface for which an
|
128 |
+
implementation is available to the public in source code form. A
|
129 |
+
"Major Component", in this context, means a major essential component
|
130 |
+
(kernel, window system, and so on) of the specific operating system
|
131 |
+
(if any) on which the executable work runs, or a compiler used to
|
132 |
+
produce the work, or an object code interpreter used to run it.
|
133 |
+
|
134 |
+
The "Corresponding Source" for a work in object code form means all
|
135 |
+
the source code needed to generate, install, and (for an executable
|
136 |
+
work) run the object code and to modify the work, including scripts to
|
137 |
+
control those activities. However, it does not include the work's
|
138 |
+
System Libraries, or general-purpose tools or generally available free
|
139 |
+
programs which are used unmodified in performing those activities but
|
140 |
+
which are not part of the work. For example, Corresponding Source
|
141 |
+
includes interface definition files associated with source files for
|
142 |
+
the work, and the source code for shared libraries and dynamically
|
143 |
+
linked subprograms that the work is specifically designed to require,
|
144 |
+
such as by intimate data communication or control flow between those
|
145 |
+
subprograms and other parts of the work.
|
146 |
+
|
147 |
+
The Corresponding Source need not include anything that users
|
148 |
+
can regenerate automatically from other parts of the Corresponding
|
149 |
+
Source.
|
150 |
+
|
151 |
+
The Corresponding Source for a work in source code form is that
|
152 |
+
same work.
|
153 |
+
|
154 |
+
2. Basic Permissions.
|
155 |
+
|
156 |
+
All rights granted under this License are granted for the term of
|
157 |
+
copyright on the Program, and are irrevocable provided the stated
|
158 |
+
conditions are met. This License explicitly affirms your unlimited
|
159 |
+
permission to run the unmodified Program. The output from running a
|
160 |
+
covered work is covered by this License only if the output, given its
|
161 |
+
content, constitutes a covered work. This License acknowledges your
|
162 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
163 |
+
|
164 |
+
You may make, run and propagate covered works that you do not
|
165 |
+
convey, without conditions so long as your license otherwise remains
|
166 |
+
in force. You may convey covered works to others for the sole purpose
|
167 |
+
of having them make modifications exclusively for you, or provide you
|
168 |
+
with facilities for running those works, provided that you comply with
|
169 |
+
the terms of this License in conveying all material for which you do
|
170 |
+
not control copyright. Those thus making or running the covered works
|
171 |
+
for you must do so exclusively on your behalf, under your direction
|
172 |
+
and control, on terms that prohibit them from making any copies of
|
173 |
+
your copyrighted material outside their relationship with you.
|
174 |
+
|
175 |
+
Conveying under any other circumstances is permitted solely under
|
176 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
177 |
+
makes it unnecessary.
|
178 |
+
|
179 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
180 |
+
|
181 |
+
No covered work shall be deemed part of an effective technological
|
182 |
+
measure under any applicable law fulfilling obligations under article
|
183 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
184 |
+
similar laws prohibiting or restricting circumvention of such
|
185 |
+
measures.
|
186 |
+
|
187 |
+
When you convey a covered work, you waive any legal power to forbid
|
188 |
+
circumvention of technological measures to the extent such circumvention
|
189 |
+
is effected by exercising rights under this License with respect to
|
190 |
+
the covered work, and you disclaim any intention to limit operation or
|
191 |
+
modification of the work as a means of enforcing, against the work's
|
192 |
+
users, your or third parties' legal rights to forbid circumvention of
|
193 |
+
technological measures.
|
194 |
+
|
195 |
+
4. Conveying Verbatim Copies.
|
196 |
+
|
197 |
+
You may convey verbatim copies of the Program's source code as you
|
198 |
+
receive it, in any medium, provided that you conspicuously and
|
199 |
+
appropriately publish on each copy an appropriate copyright notice;
|
200 |
+
keep intact all notices stating that this License and any
|
201 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
202 |
+
keep intact all notices of the absence of any warranty; and give all
|
203 |
+
recipients a copy of this License along with the Program.
|
204 |
+
|
205 |
+
You may charge any price or no price for each copy that you convey,
|
206 |
+
and you may offer support or warranty protection for a fee.
|
207 |
+
|
208 |
+
5. Conveying Modified Source Versions.
|
209 |
+
|
210 |
+
You may convey a work based on the Program, or the modifications to
|
211 |
+
produce it from the Program, in the form of source code under the
|
212 |
+
terms of section 4, provided that you also meet all of these conditions:
|
213 |
+
|
214 |
+
a) The work must carry prominent notices stating that you modified
|
215 |
+
it, and giving a relevant date.
|
216 |
+
|
217 |
+
b) The work must carry prominent notices stating that it is
|
218 |
+
released under this License and any conditions added under section
|
219 |
+
7. This requirement modifies the requirement in section 4 to
|
220 |
+
"keep intact all notices".
|
221 |
+
|
222 |
+
c) You must license the entire work, as a whole, under this
|
223 |
+
License to anyone who comes into possession of a copy. This
|
224 |
+
License will therefore apply, along with any applicable section 7
|
225 |
+
additional terms, to the whole of the work, and all its parts,
|
226 |
+
regardless of how they are packaged. This License gives no
|
227 |
+
permission to license the work in any other way, but it does not
|
228 |
+
invalidate such permission if you have separately received it.
|
229 |
+
|
230 |
+
d) If the work has interactive user interfaces, each must display
|
231 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
232 |
+
interfaces that do not display Appropriate Legal Notices, your
|
233 |
+
work need not make them do so.
|
234 |
+
|
235 |
+
A compilation of a covered work with other separate and independent
|
236 |
+
works, which are not by their nature extensions of the covered work,
|
237 |
+
and which are not combined with it such as to form a larger program,
|
238 |
+
in or on a volume of a storage or distribution medium, is called an
|
239 |
+
"aggregate" if the compilation and its resulting copyright are not
|
240 |
+
used to limit the access or legal rights of the compilation's users
|
241 |
+
beyond what the individual works permit. Inclusion of a covered work
|
242 |
+
in an aggregate does not cause this License to apply to the other
|
243 |
+
parts of the aggregate.
|
244 |
+
|
245 |
+
6. Conveying Non-Source Forms.
|
246 |
+
|
247 |
+
You may convey a covered work in object code form under the terms
|
248 |
+
of sections 4 and 5, provided that you also convey the
|
249 |
+
machine-readable Corresponding Source under the terms of this License,
|
250 |
+
in one of these ways:
|
251 |
+
|
252 |
+
a) Convey the object code in, or embodied in, a physical product
|
253 |
+
(including a physical distribution medium), accompanied by the
|
254 |
+
Corresponding Source fixed on a durable physical medium
|
255 |
+
customarily used for software interchange.
|
256 |
+
|
257 |
+
b) Convey the object code in, or embodied in, a physical product
|
258 |
+
(including a physical distribution medium), accompanied by a
|
259 |
+
written offer, valid for at least three years and valid for as
|
260 |
+
long as you offer spare parts or customer support for that product
|
261 |
+
model, to give anyone who possesses the object code either (1) a
|
262 |
+
copy of the Corresponding Source for all the software in the
|
263 |
+
product that is covered by this License, on a durable physical
|
264 |
+
medium customarily used for software interchange, for a price no
|
265 |
+
more than your reasonable cost of physically performing this
|
266 |
+
conveying of source, or (2) access to copy the
|
267 |
+
Corresponding Source from a network server at no charge.
|
268 |
+
|
269 |
+
c) Convey individual copies of the object code with a copy of the
|
270 |
+
written offer to provide the Corresponding Source. This
|
271 |
+
alternative is allowed only occasionally and noncommercially, and
|
272 |
+
only if you received the object code with such an offer, in accord
|
273 |
+
with subsection 6b.
|
274 |
+
|
275 |
+
d) Convey the object code by offering access from a designated
|
276 |
+
place (gratis or for a charge), and offer equivalent access to the
|
277 |
+
Corresponding Source in the same way through the same place at no
|
278 |
+
further charge. You need not require recipients to copy the
|
279 |
+
Corresponding Source along with the object code. If the place to
|
280 |
+
copy the object code is a network server, the Corresponding Source
|
281 |
+
may be on a different server (operated by you or a third party)
|
282 |
+
that supports equivalent copying facilities, provided you maintain
|
283 |
+
clear directions next to the object code saying where to find the
|
284 |
+
Corresponding Source. Regardless of what server hosts the
|
285 |
+
Corresponding Source, you remain obligated to ensure that it is
|
286 |
+
available for as long as needed to satisfy these requirements.
|
287 |
+
|
288 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
289 |
+
you inform other peers where the object code and Corresponding
|
290 |
+
Source of the work are being offered to the general public at no
|
291 |
+
charge under subsection 6d.
|
292 |
+
|
293 |
+
A separable portion of the object code, whose source code is excluded
|
294 |
+
from the Corresponding Source as a System Library, need not be
|
295 |
+
included in conveying the object code work.
|
296 |
+
|
297 |
+
A "User Product" is either (1) a "consumer product", which means any
|
298 |
+
tangible personal property which is normally used for personal, family,
|
299 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
300 |
+
into a dwelling. In determining whether a product is a consumer product,
|
301 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
302 |
+
product received by a particular user, "normally used" refers to a
|
303 |
+
typical or common use of that class of product, regardless of the status
|
304 |
+
of the particular user or of the way in which the particular user
|
305 |
+
actually uses, or expects or is expected to use, the product. A product
|
306 |
+
is a consumer product regardless of whether the product has substantial
|
307 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
308 |
+
the only significant mode of use of the product.
|
309 |
+
|
310 |
+
"Installation Information" for a User Product means any methods,
|
311 |
+
procedures, authorization keys, or other information required to install
|
312 |
+
and execute modified versions of a covered work in that User Product from
|
313 |
+
a modified version of its Corresponding Source. The information must
|
314 |
+
suffice to ensure that the continued functioning of the modified object
|
315 |
+
code is in no case prevented or interfered with solely because
|
316 |
+
modification has been made.
|
317 |
+
|
318 |
+
If you convey an object code work under this section in, or with, or
|
319 |
+
specifically for use in, a User Product, and the conveying occurs as
|
320 |
+
part of a transaction in which the right of possession and use of the
|
321 |
+
User Product is transferred to the recipient in perpetuity or for a
|
322 |
+
fixed term (regardless of how the transaction is characterized), the
|
323 |
+
Corresponding Source conveyed under this section must be accompanied
|
324 |
+
by the Installation Information. But this requirement does not apply
|
325 |
+
if neither you nor any third party retains the ability to install
|
326 |
+
modified object code on the User Product (for example, the work has
|
327 |
+
been installed in ROM).
|
328 |
+
|
329 |
+
The requirement to provide Installation Information does not include a
|
330 |
+
requirement to continue to provide support service, warranty, or updates
|
331 |
+
for a work that has been modified or installed by the recipient, or for
|
332 |
+
the User Product in which it has been modified or installed. Access to a
|
333 |
+
network may be denied when the modification itself materially and
|
334 |
+
adversely affects the operation of the network or violates the rules and
|
335 |
+
protocols for communication across the network.
|
336 |
+
|
337 |
+
Corresponding Source conveyed, and Installation Information provided,
|
338 |
+
in accord with this section must be in a format that is publicly
|
339 |
+
documented (and with an implementation available to the public in
|
340 |
+
source code form), and must require no special password or key for
|
341 |
+
unpacking, reading or copying.
|
342 |
+
|
343 |
+
7. Additional Terms.
|
344 |
+
|
345 |
+
"Additional permissions" are terms that supplement the terms of this
|
346 |
+
License by making exceptions from one or more of its conditions.
|
347 |
+
Additional permissions that are applicable to the entire Program shall
|
348 |
+
be treated as though they were included in this License, to the extent
|
349 |
+
that they are valid under applicable law. If additional permissions
|
350 |
+
apply only to part of the Program, that part may be used separately
|
351 |
+
under those permissions, but the entire Program remains governed by
|
352 |
+
this License without regard to the additional permissions.
|
353 |
+
|
354 |
+
When you convey a copy of a covered work, you may at your option
|
355 |
+
remove any additional permissions from that copy, or from any part of
|
356 |
+
it. (Additional permissions may be written to require their own
|
357 |
+
removal in certain cases when you modify the work.) You may place
|
358 |
+
additional permissions on material, added by you to a covered work,
|
359 |
+
for which you have or can give appropriate copyright permission.
|
360 |
+
|
361 |
+
Notwithstanding any other provision of this License, for material you
|
362 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
363 |
+
that material) supplement the terms of this License with terms:
|
364 |
+
|
365 |
+
a) Disclaiming warranty or limiting liability differently from the
|
366 |
+
terms of sections 15 and 16 of this License; or
|
367 |
+
|
368 |
+
b) Requiring preservation of specified reasonable legal notices or
|
369 |
+
author attributions in that material or in the Appropriate Legal
|
370 |
+
Notices displayed by works containing it; or
|
371 |
+
|
372 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
373 |
+
requiring that modified versions of such material be marked in
|
374 |
+
reasonable ways as different from the original version; or
|
375 |
+
|
376 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
377 |
+
authors of the material; or
|
378 |
+
|
379 |
+
e) Declining to grant rights under trademark law for use of some
|
380 |
+
trade names, trademarks, or service marks; or
|
381 |
+
|
382 |
+
f) Requiring indemnification of licensors and authors of that
|
383 |
+
material by anyone who conveys the material (or modified versions of
|
384 |
+
it) with contractual assumptions of liability to the recipient, for
|
385 |
+
any liability that these contractual assumptions directly impose on
|
386 |
+
those licensors and authors.
|
387 |
+
|
388 |
+
All other non-permissive additional terms are considered "further
|
389 |
+
restrictions" within the meaning of section 10. If the Program as you
|
390 |
+
received it, or any part of it, contains a notice stating that it is
|
391 |
+
governed by this License along with a term that is a further
|
392 |
+
restriction, you may remove that term. If a license document contains
|
393 |
+
a further restriction but permits relicensing or conveying under this
|
394 |
+
License, you may add to a covered work material governed by the terms
|
395 |
+
of that license document, provided that the further restriction does
|
396 |
+
not survive such relicensing or conveying.
|
397 |
+
|
398 |
+
If you add terms to a covered work in accord with this section, you
|
399 |
+
must place, in the relevant source files, a statement of the
|
400 |
+
additional terms that apply to those files, or a notice indicating
|
401 |
+
where to find the applicable terms.
|
402 |
+
|
403 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
404 |
+
form of a separately written license, or stated as exceptions;
|
405 |
+
the above requirements apply either way.
|
406 |
+
|
407 |
+
8. Termination.
|
408 |
+
|
409 |
+
You may not propagate or modify a covered work except as expressly
|
410 |
+
provided under this License. Any attempt otherwise to propagate or
|
411 |
+
modify it is void, and will automatically terminate your rights under
|
412 |
+
this License (including any patent licenses granted under the third
|
413 |
+
paragraph of section 11).
|
414 |
+
|
415 |
+
However, if you cease all violation of this License, then your
|
416 |
+
license from a particular copyright holder is reinstated (a)
|
417 |
+
provisionally, unless and until the copyright holder explicitly and
|
418 |
+
finally terminates your license, and (b) permanently, if the copyright
|
419 |
+
holder fails to notify you of the violation by some reasonable means
|
420 |
+
prior to 60 days after the cessation.
|
421 |
+
|
422 |
+
Moreover, your license from a particular copyright holder is
|
423 |
+
reinstated permanently if the copyright holder notifies you of the
|
424 |
+
violation by some reasonable means, this is the first time you have
|
425 |
+
received notice of violation of this License (for any work) from that
|
426 |
+
copyright holder, and you cure the violation prior to 30 days after
|
427 |
+
your receipt of the notice.
|
428 |
+
|
429 |
+
Termination of your rights under this section does not terminate the
|
430 |
+
licenses of parties who have received copies or rights from you under
|
431 |
+
this License. If your rights have been terminated and not permanently
|
432 |
+
reinstated, you do not qualify to receive new licenses for the same
|
433 |
+
material under section 10.
|
434 |
+
|
435 |
+
9. Acceptance Not Required for Having Copies.
|
436 |
+
|
437 |
+
You are not required to accept this License in order to receive or
|
438 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
439 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
440 |
+
to receive a copy likewise does not require acceptance. However,
|
441 |
+
nothing other than this License grants you permission to propagate or
|
442 |
+
modify any covered work. These actions infringe copyright if you do
|
443 |
+
not accept this License. Therefore, by modifying or propagating a
|
444 |
+
covered work, you indicate your acceptance of this License to do so.
|
445 |
+
|
446 |
+
10. Automatic Licensing of Downstream Recipients.
|
447 |
+
|
448 |
+
Each time you convey a covered work, the recipient automatically
|
449 |
+
receives a license from the original licensors, to run, modify and
|
450 |
+
propagate that work, subject to this License. You are not responsible
|
451 |
+
for enforcing compliance by third parties with this License.
|
452 |
+
|
453 |
+
An "entity transaction" is a transaction transferring control of an
|
454 |
+
organization, or substantially all assets of one, or subdividing an
|
455 |
+
organization, or merging organizations. If propagation of a covered
|
456 |
+
work results from an entity transaction, each party to that
|
457 |
+
transaction who receives a copy of the work also receives whatever
|
458 |
+
licenses to the work the party's predecessor in interest had or could
|
459 |
+
give under the previous paragraph, plus a right to possession of the
|
460 |
+
Corresponding Source of the work from the predecessor in interest, if
|
461 |
+
the predecessor has it or can get it with reasonable efforts.
|
462 |
+
|
463 |
+
You may not impose any further restrictions on the exercise of the
|
464 |
+
rights granted or affirmed under this License. For example, you may
|
465 |
+
not impose a license fee, royalty, or other charge for exercise of
|
466 |
+
rights granted under this License, and you may not initiate litigation
|
467 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
468 |
+
any patent claim is infringed by making, using, selling, offering for
|
469 |
+
sale, or importing the Program or any portion of it.
|
470 |
+
|
471 |
+
11. Patents.
|
472 |
+
|
473 |
+
A "contributor" is a copyright holder who authorizes use under this
|
474 |
+
License of the Program or a work on which the Program is based. The
|
475 |
+
work thus licensed is called the contributor's "contributor version".
|
476 |
+
|
477 |
+
A contributor's "essential patent claims" are all patent claims
|
478 |
+
owned or controlled by the contributor, whether already acquired or
|
479 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
480 |
+
by this License, of making, using, or selling its contributor version,
|
481 |
+
but do not include claims that would be infringed only as a
|
482 |
+
consequence of further modification of the contributor version. For
|
483 |
+
purposes of this definition, "control" includes the right to grant
|
484 |
+
patent sublicenses in a manner consistent with the requirements of
|
485 |
+
this License.
|
486 |
+
|
487 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
488 |
+
patent license under the contributor's essential patent claims, to
|
489 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
490 |
+
propagate the contents of its contributor version.
|
491 |
+
|
492 |
+
In the following three paragraphs, a "patent license" is any express
|
493 |
+
agreement or commitment, however denominated, not to enforce a patent
|
494 |
+
(such as an express permission to practice a patent or covenant not to
|
495 |
+
sue for patent infringement). To "grant" such a patent license to a
|
496 |
+
party means to make such an agreement or commitment not to enforce a
|
497 |
+
patent against the party.
|
498 |
+
|
499 |
+
If you convey a covered work, knowingly relying on a patent license,
|
500 |
+
and the Corresponding Source of the work is not available for anyone
|
501 |
+
to copy, free of charge and under the terms of this License, through a
|
502 |
+
publicly available network server or other readily accessible means,
|
503 |
+
then you must either (1) cause the Corresponding Source to be so
|
504 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
505 |
+
patent license for this particular work, or (3) arrange, in a manner
|
506 |
+
consistent with the requirements of this License, to extend the patent
|
507 |
+
license to downstream recipients. "Knowingly relying" means you have
|
508 |
+
actual knowledge that, but for the patent license, your conveying the
|
509 |
+
covered work in a country, or your recipient's use of the covered work
|
510 |
+
in a country, would infringe one or more identifiable patents in that
|
511 |
+
country that you have reason to believe are valid.
|
512 |
+
|
513 |
+
If, pursuant to or in connection with a single transaction or
|
514 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
515 |
+
covered work, and grant a patent license to some of the parties
|
516 |
+
receiving the covered work authorizing them to use, propagate, modify
|
517 |
+
or convey a specific copy of the covered work, then the patent license
|
518 |
+
you grant is automatically extended to all recipients of the covered
|
519 |
+
work and works based on it.
|
520 |
+
|
521 |
+
A patent license is "discriminatory" if it does not include within
|
522 |
+
the scope of its coverage, prohibits the exercise of, or is
|
523 |
+
conditioned on the non-exercise of one or more of the rights that are
|
524 |
+
specifically granted under this License. You may not convey a covered
|
525 |
+
work if you are a party to an arrangement with a third party that is
|
526 |
+
in the business of distributing software, under which you make payment
|
527 |
+
to the third party based on the extent of your activity of conveying
|
528 |
+
the work, and under which the third party grants, to any of the
|
529 |
+
parties who would receive the covered work from you, a discriminatory
|
530 |
+
patent license (a) in connection with copies of the covered work
|
531 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
532 |
+
for and in connection with specific products or compilations that
|
533 |
+
contain the covered work, unless you entered into that arrangement,
|
534 |
+
or that patent license was granted, prior to 28 March 2007.
|
535 |
+
|
536 |
+
Nothing in this License shall be construed as excluding or limiting
|
537 |
+
any implied license or other defenses to infringement that may
|
538 |
+
otherwise be available to you under applicable patent law.
|
539 |
+
|
540 |
+
12. No Surrender of Others' Freedom.
|
541 |
+
|
542 |
+
If conditions are imposed on you (whether by court order, agreement or
|
543 |
+
otherwise) that contradict the conditions of this License, they do not
|
544 |
+
excuse you from the conditions of this License. If you cannot convey a
|
545 |
+
covered work so as to satisfy simultaneously your obligations under this
|
546 |
+
License and any other pertinent obligations, then as a consequence you may
|
547 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
548 |
+
to collect a royalty for further conveying from those to whom you convey
|
549 |
+
the Program, the only way you could satisfy both those terms and this
|
550 |
+
License would be to refrain entirely from conveying the Program.
|
551 |
+
|
552 |
+
13. Use with the GNU Affero General Public License.
|
553 |
+
|
554 |
+
Notwithstanding any other provision of this License, you have
|
555 |
+
permission to link or combine any covered work with a work licensed
|
556 |
+
under version 3 of the GNU Affero General Public License into a single
|
557 |
+
combined work, and to convey the resulting work. The terms of this
|
558 |
+
License will continue to apply to the part which is the covered work,
|
559 |
+
but the special requirements of the GNU Affero General Public License,
|
560 |
+
section 13, concerning interaction through a network will apply to the
|
561 |
+
combination as such.
|
562 |
+
|
563 |
+
14. Revised Versions of this License.
|
564 |
+
|
565 |
+
The Free Software Foundation may publish revised and/or new versions of
|
566 |
+
the GNU General Public License from time to time. Such new versions will
|
567 |
+
be similar in spirit to the present version, but may differ in detail to
|
568 |
+
address new problems or concerns.
|
569 |
+
|
570 |
+
Each version is given a distinguishing version number. If the
|
571 |
+
Program specifies that a certain numbered version of the GNU General
|
572 |
+
Public License "or any later version" applies to it, you have the
|
573 |
+
option of following the terms and conditions either of that numbered
|
574 |
+
version or of any later version published by the Free Software
|
575 |
+
Foundation. If the Program does not specify a version number of the
|
576 |
+
GNU General Public License, you may choose any version ever published
|
577 |
+
by the Free Software Foundation.
|
578 |
+
|
579 |
+
If the Program specifies that a proxy can decide which future
|
580 |
+
versions of the GNU General Public License can be used, that proxy's
|
581 |
+
public statement of acceptance of a version permanently authorizes you
|
582 |
+
to choose that version for the Program.
|
583 |
+
|
584 |
+
Later license versions may give you additional or different
|
585 |
+
permissions. However, no additional obligations are imposed on any
|
586 |
+
author or copyright holder as a result of your choosing to follow a
|
587 |
+
later version.
|
588 |
+
|
589 |
+
15. Disclaimer of Warranty.
|
590 |
+
|
591 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
592 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
593 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
594 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
595 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
596 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
597 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
598 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
599 |
+
|
600 |
+
16. Limitation of Liability.
|
601 |
+
|
602 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
603 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
604 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
605 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
606 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
607 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
608 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
609 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
610 |
+
SUCH DAMAGES.
|
611 |
+
|
612 |
+
17. Interpretation of Sections 15 and 16.
|
613 |
+
|
614 |
+
If the disclaimer of warranty and limitation of liability provided
|
615 |
+
above cannot be given local legal effect according to their terms,
|
616 |
+
reviewing courts shall apply local law that most closely approximates
|
617 |
+
an absolute waiver of all civil liability in connection with the
|
618 |
+
Program, unless a warranty or assumption of liability accompanies a
|
619 |
+
copy of the Program in return for a fee.
|
620 |
+
|
621 |
+
END OF TERMS AND CONDITIONS
|
622 |
+
|
623 |
+
How to Apply These Terms to Your New Programs
|
624 |
+
|
625 |
+
If you develop a new program, and you want it to be of the greatest
|
626 |
+
possible use to the public, the best way to achieve this is to make it
|
627 |
+
free software which everyone can redistribute and change under these terms.
|
628 |
+
|
629 |
+
To do so, attach the following notices to the program. It is safest
|
630 |
+
to attach them to the start of each source file to most effectively
|
631 |
+
state the exclusion of warranty; and each file should have at least
|
632 |
+
the "copyright" line and a pointer to where the full notice is found.
|
633 |
+
|
634 |
+
<one line to give the program's name and a brief idea of what it does.>
|
635 |
+
Copyright (C) <year> <name of author>
|
636 |
+
|
637 |
+
This program is free software: you can redistribute it and/or modify
|
638 |
+
it under the terms of the GNU General Public License as published by
|
639 |
+
the Free Software Foundation, either version 3 of the License, or
|
640 |
+
(at your option) any later version.
|
641 |
+
|
642 |
+
This program is distributed in the hope that it will be useful,
|
643 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
644 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
645 |
+
GNU General Public License for more details.
|
646 |
+
|
647 |
+
You should have received a copy of the GNU General Public License
|
648 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
649 |
+
|
650 |
+
Also add information on how to contact you by electronic and paper mail.
|
651 |
+
|
652 |
+
If the program does terminal interaction, make it output a short
|
653 |
+
notice like this when it starts in an interactive mode:
|
654 |
+
|
655 |
+
<program> Copyright (C) <year> <name of author>
|
656 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
657 |
+
This is free software, and you are welcome to redistribute it
|
658 |
+
under certain conditions; type `show c' for details.
|
659 |
+
|
660 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
661 |
+
parts of the General Public License. Of course, your program's commands
|
662 |
+
might be different; for a GUI interface, you would use an "about box".
|
663 |
+
|
664 |
+
You should also get your employer (if you work as a programmer) or school,
|
665 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
666 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
667 |
+
<https://www.gnu.org/licenses/>.
|
668 |
+
|
669 |
+
The GNU General Public License does not permit incorporating your program
|
670 |
+
into proprietary programs. If your program is a subroutine library, you
|
671 |
+
may consider it more useful to permit linking proprietary applications with
|
672 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
+
Public License instead of this License. But first, please read
|
674 |
+
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
custom_nodes/ComfyUI-KJNodes-main/README.md
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# KJNodes for ComfyUI
|
2 |
+
|
3 |
+
Various quality of life and masking related -nodes and scripts made by combining functionality of existing nodes for ComfyUI.
|
4 |
+
|
5 |
+
I know I'm bad at documentation, especially this project that has grown from random practice nodes to... too many lines in one file.
|
6 |
+
I have however started to add descriptions to the nodes themselves, there's a small ? you can click for info what the node does.
|
7 |
+
This is still work in progress, like everything else.
|
8 |
+
|
9 |
+
# Installation
|
10 |
+
1. Clone this repo into `custom_nodes` folder.
|
11 |
+
2. Install dependencies: `pip install -r requirements.txt`
|
12 |
+
or if you use the portable install, run this in ComfyUI_windows_portable -folder:
|
13 |
+
|
14 |
+
`python_embeded\python.exe -m pip install -r ComfyUI\custom_nodes\ComfyUI-KJNodes\requirements.txt`
|
15 |
+
|
16 |
+
|
17 |
+
## Javascript
|
18 |
+
|
19 |
+
### browserstatus.js
|
20 |
+
Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the length of your queue.
|
21 |
+
Default off, needs to be enabled from options, overrides Custom-Scripts favicon when enabled.
|
22 |
+
|
23 |
+
## Nodes:
|
24 |
+
|
25 |
+
### Set/Get
|
26 |
+
|
27 |
+
Javascript nodes to set and get constants to reduce unnecessary lines. Takes in and returns anything, purely visual nodes.
|
28 |
+
On the right click menu of these nodes there's now an options to visualize the paths, as well as option to jump to the corresponding node on the other end.
|
29 |
+
|
30 |
+
**Known limitations**:
|
31 |
+
- Will not work with any node that dynamically sets it's outpute, such as reroute or other Set/Get node
|
32 |
+
- Will not work when directly connected to a bypassed node
|
33 |
+
- Other possible conflicts with javascript based nodes.
|
34 |
+
|
35 |
+
### ColorToMask
|
36 |
+
|
37 |
+
RBG color value to mask, works with batches and AnimateDiff.
|
38 |
+
|
39 |
+
### ConditioningMultiCombine
|
40 |
+
|
41 |
+
Combine any number of conditions, saves space.
|
42 |
+
|
43 |
+
### ConditioningSetMaskAndCombine
|
44 |
+
|
45 |
+
Mask and combine two sets of conditions, saves space.
|
46 |
+
|
47 |
+
### GrowMaskWithBlur
|
48 |
+
|
49 |
+
Grows or shrinks (with negative values) mask, option to invert input, returns mask and inverted mask. Additionally Blurs the mask, this is a slow operation especially with big batches.
|
50 |
+
|
51 |
+
### RoundMask
|
52 |
+
|
53 |
+

|
54 |
+
|
55 |
+
### WidgetToString
|
56 |
+
Outputs the value of a widget on any node as a string
|
57 |
+

|
58 |
+
|
59 |
+
Enable node id display from Manager menu, to get the ID of the node you want to read a widget from:
|
60 |
+

|
61 |
+
|
62 |
+
Use the node id of the target node, and add the name of the widget to read from
|
63 |
+

|
64 |
+
|
65 |
+
Recreating or reloading the target node will change its id, and the WidgetToString node will no longer be able to find it until you update the node id value with the new id.
|
custom_nodes/ComfyUI-KJNodes-main/__init__.py
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .nodes.nodes import *
|
2 |
+
from .nodes.curve_nodes import *
|
3 |
+
from .nodes.batchcrop_nodes import *
|
4 |
+
from .nodes.audioscheduler_nodes import *
|
5 |
+
from .nodes.image_nodes import *
|
6 |
+
from .nodes.intrinsic_lora_nodes import *
|
7 |
+
from .nodes.mask_nodes import *
|
8 |
+
from .nodes.model_optimization_nodes import *
|
9 |
+
NODE_CONFIG = {
|
10 |
+
#constants
|
11 |
+
"BOOLConstant": {"class": BOOLConstant, "name": "BOOL Constant"},
|
12 |
+
"INTConstant": {"class": INTConstant, "name": "INT Constant"},
|
13 |
+
"FloatConstant": {"class": FloatConstant, "name": "Float Constant"},
|
14 |
+
"StringConstant": {"class": StringConstant, "name": "String Constant"},
|
15 |
+
"StringConstantMultiline": {"class": StringConstantMultiline, "name": "String Constant Multiline"},
|
16 |
+
#conditioning
|
17 |
+
"ConditioningMultiCombine": {"class": ConditioningMultiCombine, "name": "Conditioning Multi Combine"},
|
18 |
+
"ConditioningSetMaskAndCombine": {"class": ConditioningSetMaskAndCombine, "name": "ConditioningSetMaskAndCombine"},
|
19 |
+
"ConditioningSetMaskAndCombine3": {"class": ConditioningSetMaskAndCombine3, "name": "ConditioningSetMaskAndCombine3"},
|
20 |
+
"ConditioningSetMaskAndCombine4": {"class": ConditioningSetMaskAndCombine4, "name": "ConditioningSetMaskAndCombine4"},
|
21 |
+
"ConditioningSetMaskAndCombine5": {"class": ConditioningSetMaskAndCombine5, "name": "ConditioningSetMaskAndCombine5"},
|
22 |
+
"CondPassThrough": {"class": CondPassThrough},
|
23 |
+
#masking
|
24 |
+
"DownloadAndLoadCLIPSeg": {"class": DownloadAndLoadCLIPSeg, "name": "(Down)load CLIPSeg"},
|
25 |
+
"BatchCLIPSeg": {"class": BatchCLIPSeg, "name": "Batch CLIPSeg"},
|
26 |
+
"ColorToMask": {"class": ColorToMask, "name": "Color To Mask"},
|
27 |
+
"CreateGradientMask": {"class": CreateGradientMask, "name": "Create Gradient Mask"},
|
28 |
+
"CreateTextMask": {"class": CreateTextMask, "name": "Create Text Mask"},
|
29 |
+
"CreateAudioMask": {"class": CreateAudioMask, "name": "Create Audio Mask"},
|
30 |
+
"CreateFadeMask": {"class": CreateFadeMask, "name": "Create Fade Mask"},
|
31 |
+
"CreateFadeMaskAdvanced": {"class": CreateFadeMaskAdvanced, "name": "Create Fade Mask Advanced"},
|
32 |
+
"CreateFluidMask": {"class": CreateFluidMask, "name": "Create Fluid Mask"},
|
33 |
+
"CreateShapeMask": {"class": CreateShapeMask, "name": "Create Shape Mask"},
|
34 |
+
"CreateVoronoiMask": {"class": CreateVoronoiMask, "name": "Create Voronoi Mask"},
|
35 |
+
"CreateMagicMask": {"class": CreateMagicMask, "name": "Create Magic Mask"},
|
36 |
+
"GetMaskSizeAndCount": {"class": GetMaskSizeAndCount, "name": "Get Mask Size & Count"},
|
37 |
+
"GrowMaskWithBlur": {"class": GrowMaskWithBlur, "name": "Grow Mask With Blur"},
|
38 |
+
"MaskBatchMulti": {"class": MaskBatchMulti, "name": "Mask Batch Multi"},
|
39 |
+
"OffsetMask": {"class": OffsetMask, "name": "Offset Mask"},
|
40 |
+
"RemapMaskRange": {"class": RemapMaskRange, "name": "Remap Mask Range"},
|
41 |
+
"ResizeMask": {"class": ResizeMask, "name": "Resize Mask"},
|
42 |
+
"RoundMask": {"class": RoundMask, "name": "Round Mask"},
|
43 |
+
"SeparateMasks": {"class": SeparateMasks, "name": "Separate Masks"},
|
44 |
+
#images
|
45 |
+
"AddLabel": {"class": AddLabel, "name": "Add Label"},
|
46 |
+
"ColorMatch": {"class": ColorMatch, "name": "Color Match"},
|
47 |
+
"ImageTensorList": {"class": ImageTensorList, "name": "Image Tensor List"},
|
48 |
+
"CrossFadeImages": {"class": CrossFadeImages, "name": "Cross Fade Images"},
|
49 |
+
"CrossFadeImagesMulti": {"class": CrossFadeImagesMulti, "name": "Cross Fade Images Multi"},
|
50 |
+
"GetImagesFromBatchIndexed": {"class": GetImagesFromBatchIndexed, "name": "Get Images From Batch Indexed"},
|
51 |
+
"GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image or Mask Range From Batch"},
|
52 |
+
"GetLatentRangeFromBatch": {"class": GetLatentRangeFromBatch, "name": "Get Latent Range From Batch"},
|
53 |
+
"GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"},
|
54 |
+
"FastPreview": {"class": FastPreview, "name": "Fast Preview"},
|
55 |
+
"ImageAndMaskPreview": {"class": ImageAndMaskPreview},
|
56 |
+
"ImageAddMulti": {"class": ImageAddMulti, "name": "Image Add Multi"},
|
57 |
+
"ImageBatchMulti": {"class": ImageBatchMulti, "name": "Image Batch Multi"},
|
58 |
+
"ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving},
|
59 |
+
"ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"},
|
60 |
+
"ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"},
|
61 |
+
"ImageConcatFromBatch": {"class": ImageConcatFromBatch, "name": "Image Concatenate From Batch"},
|
62 |
+
"ImageConcatMulti": {"class": ImageConcatMulti, "name": "Image Concatenate Multi"},
|
63 |
+
"ImageCropByMask": {"class": ImageCropByMask, "name": "Image Crop By Mask"},
|
64 |
+
"ImageCropByMaskAndResize": {"class": ImageCropByMaskAndResize, "name": "Image Crop By Mask And Resize"},
|
65 |
+
"ImageCropByMaskBatch": {"class": ImageCropByMaskBatch, "name": "Image Crop By Mask Batch"},
|
66 |
+
"ImageUncropByMask": {"class": ImageUncropByMask, "name": "Image Uncrop By Mask"},
|
67 |
+
"ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"},
|
68 |
+
"ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"},
|
69 |
+
"ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"},
|
70 |
+
"ImageGridtoBatch": {"class": ImageGridtoBatch, "name": "Image Grid To Batch"},
|
71 |
+
"ImageNoiseAugmentation": {"class": ImageNoiseAugmentation, "name": "Image Noise Augmentation"},
|
72 |
+
"ImageNormalize_Neg1_To_1": {"class": ImageNormalize_Neg1_To_1, "name": "Image Normalize -1 to 1"},
|
73 |
+
"ImagePass": {"class": ImagePass},
|
74 |
+
"ImagePadKJ": {"class": ImagePadKJ, "name": "ImagePad KJ"},
|
75 |
+
"ImagePadForOutpaintMasked": {"class": ImagePadForOutpaintMasked, "name": "Image Pad For Outpaint Masked"},
|
76 |
+
"ImagePadForOutpaintTargetSize": {"class": ImagePadForOutpaintTargetSize, "name": "Image Pad For Outpaint Target Size"},
|
77 |
+
"ImagePrepForICLora": {"class": ImagePrepForICLora, "name": "Image Prep For ICLora"},
|
78 |
+
"ImageResizeKJ": {"class": ImageResizeKJ, "name": "Resize Image"},
|
79 |
+
"ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"},
|
80 |
+
"InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"},
|
81 |
+
"InsertLatentToIndexed": {"class": InsertLatentToIndex, "name": "Insert Latent To Index"},
|
82 |
+
"LoadAndResizeImage": {"class": LoadAndResizeImage, "name": "Load & Resize Image"},
|
83 |
+
"LoadImagesFromFolderKJ": {"class": LoadImagesFromFolderKJ, "name": "Load Images From Folder (KJ)"},
|
84 |
+
"MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"},
|
85 |
+
"PreviewAnimation": {"class": PreviewAnimation, "name": "Preview Animation"},
|
86 |
+
"RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"},
|
87 |
+
"ReverseImageBatch": {"class": ReverseImageBatch, "name": "Reverse Image Batch"},
|
88 |
+
"ReplaceImagesInBatch": {"class": ReplaceImagesInBatch, "name": "Replace Images In Batch"},
|
89 |
+
"SaveImageWithAlpha": {"class": SaveImageWithAlpha, "name": "Save Image With Alpha"},
|
90 |
+
"SaveImageKJ": {"class": SaveImageKJ, "name": "Save Image KJ"},
|
91 |
+
"ShuffleImageBatch": {"class": ShuffleImageBatch, "name": "Shuffle Image Batch"},
|
92 |
+
"SplitImageChannels": {"class": SplitImageChannels, "name": "Split Image Channels"},
|
93 |
+
"TransitionImagesMulti": {"class": TransitionImagesMulti, "name": "Transition Images Multi"},
|
94 |
+
"TransitionImagesInBatch": {"class": TransitionImagesInBatch, "name": "Transition Images In Batch"},
|
95 |
+
#batch cropping
|
96 |
+
"BatchCropFromMask": {"class": BatchCropFromMask, "name": "Batch Crop From Mask"},
|
97 |
+
"BatchCropFromMaskAdvanced": {"class": BatchCropFromMaskAdvanced, "name": "Batch Crop From Mask Advanced"},
|
98 |
+
"FilterZeroMasksAndCorrespondingImages": {"class": FilterZeroMasksAndCorrespondingImages},
|
99 |
+
"InsertImageBatchByIndexes": {"class": InsertImageBatchByIndexes, "name": "Insert Image Batch By Indexes"},
|
100 |
+
"BatchUncrop": {"class": BatchUncrop, "name": "Batch Uncrop"},
|
101 |
+
"BatchUncropAdvanced": {"class": BatchUncropAdvanced, "name": "Batch Uncrop Advanced"},
|
102 |
+
"SplitBboxes": {"class": SplitBboxes, "name": "Split Bboxes"},
|
103 |
+
"BboxToInt": {"class": BboxToInt, "name": "Bbox To Int"},
|
104 |
+
"BboxVisualize": {"class": BboxVisualize, "name": "Bbox Visualize"},
|
105 |
+
#noise
|
106 |
+
"GenerateNoise": {"class": GenerateNoise, "name": "Generate Noise"},
|
107 |
+
"FlipSigmasAdjusted": {"class": FlipSigmasAdjusted, "name": "Flip Sigmas Adjusted"},
|
108 |
+
"InjectNoiseToLatent": {"class": InjectNoiseToLatent, "name": "Inject Noise To Latent"},
|
109 |
+
"CustomSigmas": {"class": CustomSigmas, "name": "Custom Sigmas"},
|
110 |
+
#utility
|
111 |
+
"StringToFloatList": {"class": StringToFloatList, "name": "String to Float List"},
|
112 |
+
"WidgetToString": {"class": WidgetToString, "name": "Widget To String"},
|
113 |
+
"SaveStringKJ": {"class": SaveStringKJ, "name": "Save String KJ"},
|
114 |
+
"DummyOut": {"class": DummyOut, "name": "Dummy Out"},
|
115 |
+
"GetLatentsFromBatchIndexed": {"class": GetLatentsFromBatchIndexed, "name": "Get Latents From Batch Indexed"},
|
116 |
+
"ScaleBatchPromptSchedule": {"class": ScaleBatchPromptSchedule, "name": "Scale Batch Prompt Schedule"},
|
117 |
+
"CameraPoseVisualizer": {"class": CameraPoseVisualizer, "name": "Camera Pose Visualizer"},
|
118 |
+
"AppendStringsToList": {"class": AppendStringsToList, "name": "Append Strings To List"},
|
119 |
+
"JoinStrings": {"class": JoinStrings, "name": "Join Strings"},
|
120 |
+
"JoinStringMulti": {"class": JoinStringMulti, "name": "Join String Multi"},
|
121 |
+
"SomethingToString": {"class": SomethingToString, "name": "Something To String"},
|
122 |
+
"Sleep": {"class": Sleep, "name": "Sleep"},
|
123 |
+
"VRAM_Debug": {"class": VRAM_Debug, "name": "VRAM Debug"},
|
124 |
+
"SomethingToString": {"class": SomethingToString, "name": "Something To String"},
|
125 |
+
"EmptyLatentImagePresets": {"class": EmptyLatentImagePresets, "name": "Empty Latent Image Presets"},
|
126 |
+
"EmptyLatentImageCustomPresets": {"class": EmptyLatentImageCustomPresets, "name": "Empty Latent Image Custom Presets"},
|
127 |
+
"ModelPassThrough": {"class": ModelPassThrough, "name": "ModelPass"},
|
128 |
+
"ModelSaveKJ": {"class": ModelSaveKJ, "name": "Model Save KJ"},
|
129 |
+
"SetShakkerLabsUnionControlNetType": {"class": SetShakkerLabsUnionControlNetType, "name": "Set Shakker Labs Union ControlNet Type"},
|
130 |
+
"StyleModelApplyAdvanced": {"class": StyleModelApplyAdvanced, "name": "Style Model Apply Advanced"},
|
131 |
+
#audioscheduler stuff
|
132 |
+
"NormalizedAmplitudeToMask": {"class": NormalizedAmplitudeToMask},
|
133 |
+
"NormalizedAmplitudeToFloatList": {"class": NormalizedAmplitudeToFloatList},
|
134 |
+
"OffsetMaskByNormalizedAmplitude": {"class": OffsetMaskByNormalizedAmplitude},
|
135 |
+
"ImageTransformByNormalizedAmplitude": {"class": ImageTransformByNormalizedAmplitude},
|
136 |
+
"AudioConcatenate": {"class": AudioConcatenate},
|
137 |
+
#curve nodes
|
138 |
+
"SplineEditor": {"class": SplineEditor, "name": "Spline Editor"},
|
139 |
+
"CreateShapeImageOnPath": {"class": CreateShapeImageOnPath, "name": "Create Shape Image On Path"},
|
140 |
+
"CreateShapeMaskOnPath": {"class": CreateShapeMaskOnPath, "name": "Create Shape Mask On Path"},
|
141 |
+
"CreateTextOnPath": {"class": CreateTextOnPath, "name": "Create Text On Path"},
|
142 |
+
"CreateGradientFromCoords": {"class": CreateGradientFromCoords, "name": "Create Gradient From Coords"},
|
143 |
+
"CutAndDragOnPath": {"class": CutAndDragOnPath, "name": "Cut And Drag On Path"},
|
144 |
+
"GradientToFloat": {"class": GradientToFloat, "name": "Gradient To Float"},
|
145 |
+
"WeightScheduleExtend": {"class": WeightScheduleExtend, "name": "Weight Schedule Extend"},
|
146 |
+
"MaskOrImageToWeight": {"class": MaskOrImageToWeight, "name": "Mask Or Image To Weight"},
|
147 |
+
"WeightScheduleConvert": {"class": WeightScheduleConvert, "name": "Weight Schedule Convert"},
|
148 |
+
"FloatToMask": {"class": FloatToMask, "name": "Float To Mask"},
|
149 |
+
"FloatToSigmas": {"class": FloatToSigmas, "name": "Float To Sigmas"},
|
150 |
+
"SigmasToFloat": {"class": SigmasToFloat, "name": "Sigmas To Float"},
|
151 |
+
"PlotCoordinates": {"class": PlotCoordinates, "name": "Plot Coordinates"},
|
152 |
+
"InterpolateCoords": {"class": InterpolateCoords, "name": "Interpolate Coords"},
|
153 |
+
"PointsEditor": {"class": PointsEditor, "name": "Points Editor"},
|
154 |
+
#experimental
|
155 |
+
"StabilityAPI_SD3": {"class": StabilityAPI_SD3, "name": "Stability API SD3"},
|
156 |
+
"SoundReactive": {"class": SoundReactive, "name": "Sound Reactive"},
|
157 |
+
"StableZero123_BatchSchedule": {"class": StableZero123_BatchSchedule, "name": "Stable Zero123 Batch Schedule"},
|
158 |
+
"SV3D_BatchSchedule": {"class": SV3D_BatchSchedule, "name": "SV3D Batch Schedule"},
|
159 |
+
"LoadResAdapterNormalization": {"class": LoadResAdapterNormalization},
|
160 |
+
"Superprompt": {"class": Superprompt, "name": "Superprompt"},
|
161 |
+
"GLIGENTextBoxApplyBatchCoords": {"class": GLIGENTextBoxApplyBatchCoords},
|
162 |
+
"Intrinsic_lora_sampling": {"class": Intrinsic_lora_sampling, "name": "Intrinsic Lora Sampling"},
|
163 |
+
"CheckpointPerturbWeights": {"class": CheckpointPerturbWeights, "name": "CheckpointPerturbWeights"},
|
164 |
+
"Screencap_mss": {"class": Screencap_mss, "name": "Screencap mss"},
|
165 |
+
"WebcamCaptureCV2": {"class": WebcamCaptureCV2, "name": "Webcam Capture CV2"},
|
166 |
+
"DifferentialDiffusionAdvanced": {"class": DifferentialDiffusionAdvanced, "name": "Differential Diffusion Advanced"},
|
167 |
+
"FluxBlockLoraLoader": {"class": FluxBlockLoraLoader, "name": "Flux Block Lora Loader"},
|
168 |
+
"FluxBlockLoraSelect": {"class": FluxBlockLoraSelect, "name": "Flux Block Lora Select"},
|
169 |
+
"HunyuanVideoBlockLoraSelect": {"class": HunyuanVideoBlockLoraSelect, "name": "Hunyuan Video Block Lora Select"},
|
170 |
+
"CustomControlNetWeightsFluxFromList": {"class": CustomControlNetWeightsFluxFromList, "name": "Custom ControlNet Weights Flux From List"},
|
171 |
+
"CheckpointLoaderKJ": {"class": CheckpointLoaderKJ, "name": "CheckpointLoaderKJ"},
|
172 |
+
"DiffusionModelLoaderKJ": {"class": DiffusionModelLoaderKJ, "name": "Diffusion Model Loader KJ"},
|
173 |
+
"TorchCompileModelFluxAdvanced": {"class": TorchCompileModelFluxAdvanced, "name": "TorchCompileModelFluxAdvanced"},
|
174 |
+
"TorchCompileModelHyVideo": {"class": TorchCompileModelHyVideo, "name": "TorchCompileModelHyVideo"},
|
175 |
+
"TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"},
|
176 |
+
"TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"},
|
177 |
+
"PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"},
|
178 |
+
"TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"},
|
179 |
+
"TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"},
|
180 |
+
"TorchCompileModelWanVideo": {"class": TorchCompileModelWanVideo, "name": "TorchCompileModelWanVideo"},
|
181 |
+
"PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Patch Sage Attention KJ"},
|
182 |
+
"LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"},
|
183 |
+
"VAELoaderKJ": {"class": VAELoaderKJ, "name": "VAELoader KJ"},
|
184 |
+
"ScheduledCFGGuidance": {"class": ScheduledCFGGuidance, "name": "Scheduled CFG Guidance"},
|
185 |
+
"ApplyRifleXRoPE_HunuyanVideo": {"class": ApplyRifleXRoPE_HunuyanVideo, "name": "Apply RifleXRoPE HunuyanVideo"},
|
186 |
+
"ApplyRifleXRoPE_WanVideo": {"class": ApplyRifleXRoPE_WanVideo, "name": "Apply RifleXRoPE WanVideo"},
|
187 |
+
"WanVideoTeaCacheKJ": {"class": WanVideoTeaCacheKJ, "name": "WanVideo Tea Cache (native)"},
|
188 |
+
"WanVideoEnhanceAVideoKJ": {"class": WanVideoEnhanceAVideoKJ, "name": "WanVideo Enhance A Video (native)"},
|
189 |
+
"SkipLayerGuidanceWanVideo": {"class": SkipLayerGuidanceWanVideo, "name": "Skip Layer Guidance WanVideo"},
|
190 |
+
"TimerNodeKJ": {"class": TimerNodeKJ, "name": "Timer Node KJ"},
|
191 |
+
"HunyuanVideoEncodeKeyframesToCond": {"class": HunyuanVideoEncodeKeyframesToCond, "name": "HunyuanVideo Encode Keyframes To Cond"},
|
192 |
+
|
193 |
+
#instance diffusion
|
194 |
+
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
195 |
+
"AppendInstanceDiffusionTracking": {"class": AppendInstanceDiffusionTracking},
|
196 |
+
"DrawInstanceDiffusionTracking": {"class": DrawInstanceDiffusionTracking},
|
197 |
+
}
|
198 |
+
|
199 |
+
def generate_node_mappings(node_config):
|
200 |
+
node_class_mappings = {}
|
201 |
+
node_display_name_mappings = {}
|
202 |
+
|
203 |
+
for node_name, node_info in node_config.items():
|
204 |
+
node_class_mappings[node_name] = node_info["class"]
|
205 |
+
node_display_name_mappings[node_name] = node_info.get("name", node_info["class"].__name__)
|
206 |
+
|
207 |
+
return node_class_mappings, node_display_name_mappings
|
208 |
+
|
209 |
+
NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = generate_node_mappings(NODE_CONFIG)
|
210 |
+
|
211 |
+
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
|
212 |
+
|
213 |
+
WEB_DIRECTORY = "./web"
|
214 |
+
|
215 |
+
from aiohttp import web
|
216 |
+
from server import PromptServer
|
217 |
+
from pathlib import Path
|
218 |
+
|
219 |
+
if hasattr(PromptServer, "instance"):
|
220 |
+
try:
|
221 |
+
# NOTE: we add an extra static path to avoid comfy mechanism
|
222 |
+
# that loads every script in web.
|
223 |
+
PromptServer.instance.app.add_routes(
|
224 |
+
[web.static("/kjweb_async", (Path(__file__).parent.absolute() / "kjweb_async").as_posix())]
|
225 |
+
)
|
226 |
+
except:
|
227 |
+
pass
|
custom_nodes/ComfyUI-KJNodes-main/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"sai_api_key": "your_api_key_here"
|
3 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/custom_dimensions_example.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"label": "SD",
|
4 |
+
"value": "512x512"
|
5 |
+
},
|
6 |
+
{
|
7 |
+
"label": "HD",
|
8 |
+
"value": "768x768"
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"label": "Full HD",
|
12 |
+
"value": "1024x1024"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"label": "4k",
|
16 |
+
"value": "2048x2048"
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"label": "SVD",
|
20 |
+
"value": "1024x576"
|
21 |
+
}
|
22 |
+
]
|
custom_nodes/ComfyUI-KJNodes-main/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png
ADDED
![]() |
custom_nodes/ComfyUI-KJNodes-main/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png
ADDED
![]() |
custom_nodes/ComfyUI-KJNodes-main/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json
ADDED
@@ -0,0 +1,1188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 86,
|
3 |
+
"last_link_id": 144,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 62,
|
7 |
+
"type": "FluxGuidance",
|
8 |
+
"pos": [
|
9 |
+
-630,
|
10 |
+
-170
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
317.4000244140625,
|
14 |
+
58
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 13,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "conditioning",
|
22 |
+
"type": "CONDITIONING",
|
23 |
+
"link": 82
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"outputs": [
|
27 |
+
{
|
28 |
+
"name": "CONDITIONING",
|
29 |
+
"type": "CONDITIONING",
|
30 |
+
"links": [
|
31 |
+
83
|
32 |
+
],
|
33 |
+
"slot_index": 0
|
34 |
+
}
|
35 |
+
],
|
36 |
+
"properties": {
|
37 |
+
"Node name for S&R": "FluxGuidance"
|
38 |
+
},
|
39 |
+
"widgets_values": [
|
40 |
+
6
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"id": 51,
|
45 |
+
"type": "KSamplerSelect",
|
46 |
+
"pos": [
|
47 |
+
-610,
|
48 |
+
-480
|
49 |
+
],
|
50 |
+
"size": [
|
51 |
+
315,
|
52 |
+
58
|
53 |
+
],
|
54 |
+
"flags": {},
|
55 |
+
"order": 0,
|
56 |
+
"mode": 0,
|
57 |
+
"inputs": [],
|
58 |
+
"outputs": [
|
59 |
+
{
|
60 |
+
"name": "SAMPLER",
|
61 |
+
"type": "SAMPLER",
|
62 |
+
"links": [
|
63 |
+
61
|
64 |
+
]
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"properties": {
|
68 |
+
"Node name for S&R": "KSamplerSelect"
|
69 |
+
},
|
70 |
+
"widgets_values": [
|
71 |
+
"euler"
|
72 |
+
]
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"id": 57,
|
76 |
+
"type": "VAEDecodeTiled",
|
77 |
+
"pos": [
|
78 |
+
-200,
|
79 |
+
90
|
80 |
+
],
|
81 |
+
"size": [
|
82 |
+
315,
|
83 |
+
150
|
84 |
+
],
|
85 |
+
"flags": {},
|
86 |
+
"order": 20,
|
87 |
+
"mode": 0,
|
88 |
+
"inputs": [
|
89 |
+
{
|
90 |
+
"name": "samples",
|
91 |
+
"type": "LATENT",
|
92 |
+
"link": 142
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"name": "vae",
|
96 |
+
"type": "VAE",
|
97 |
+
"link": 74
|
98 |
+
}
|
99 |
+
],
|
100 |
+
"outputs": [
|
101 |
+
{
|
102 |
+
"name": "IMAGE",
|
103 |
+
"type": "IMAGE",
|
104 |
+
"links": [
|
105 |
+
105
|
106 |
+
],
|
107 |
+
"slot_index": 0
|
108 |
+
}
|
109 |
+
],
|
110 |
+
"properties": {
|
111 |
+
"Node name for S&R": "VAEDecodeTiled"
|
112 |
+
},
|
113 |
+
"widgets_values": [
|
114 |
+
128,
|
115 |
+
64,
|
116 |
+
64,
|
117 |
+
8
|
118 |
+
]
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"id": 65,
|
122 |
+
"type": "LoadImage",
|
123 |
+
"pos": [
|
124 |
+
-2212.498779296875,
|
125 |
+
-632.4085083007812
|
126 |
+
],
|
127 |
+
"size": [
|
128 |
+
315,
|
129 |
+
314
|
130 |
+
],
|
131 |
+
"flags": {},
|
132 |
+
"order": 1,
|
133 |
+
"mode": 0,
|
134 |
+
"inputs": [],
|
135 |
+
"outputs": [
|
136 |
+
{
|
137 |
+
"name": "IMAGE",
|
138 |
+
"type": "IMAGE",
|
139 |
+
"links": [
|
140 |
+
86
|
141 |
+
],
|
142 |
+
"slot_index": 0
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"name": "MASK",
|
146 |
+
"type": "MASK",
|
147 |
+
"links": null
|
148 |
+
}
|
149 |
+
],
|
150 |
+
"properties": {
|
151 |
+
"Node name for S&R": "LoadImage"
|
152 |
+
},
|
153 |
+
"widgets_values": [
|
154 |
+
"Mona-Lisa-oil-wood-panel-Leonardo-da.webp",
|
155 |
+
"image"
|
156 |
+
]
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"id": 64,
|
160 |
+
"type": "VAEEncode",
|
161 |
+
"pos": [
|
162 |
+
-1336.7884521484375,
|
163 |
+
-492.5806884765625
|
164 |
+
],
|
165 |
+
"size": [
|
166 |
+
210,
|
167 |
+
46
|
168 |
+
],
|
169 |
+
"flags": {},
|
170 |
+
"order": 14,
|
171 |
+
"mode": 0,
|
172 |
+
"inputs": [
|
173 |
+
{
|
174 |
+
"name": "pixels",
|
175 |
+
"type": "IMAGE",
|
176 |
+
"link": 144
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"name": "vae",
|
180 |
+
"type": "VAE",
|
181 |
+
"link": 88
|
182 |
+
}
|
183 |
+
],
|
184 |
+
"outputs": [
|
185 |
+
{
|
186 |
+
"name": "LATENT",
|
187 |
+
"type": "LATENT",
|
188 |
+
"links": [
|
189 |
+
137
|
190 |
+
],
|
191 |
+
"slot_index": 0
|
192 |
+
}
|
193 |
+
],
|
194 |
+
"properties": {
|
195 |
+
"Node name for S&R": "VAEEncode"
|
196 |
+
},
|
197 |
+
"widgets_values": []
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"id": 44,
|
201 |
+
"type": "UNETLoader",
|
202 |
+
"pos": [
|
203 |
+
-2373.55029296875,
|
204 |
+
-193.91510009765625
|
205 |
+
],
|
206 |
+
"size": [
|
207 |
+
459.56060791015625,
|
208 |
+
82
|
209 |
+
],
|
210 |
+
"flags": {},
|
211 |
+
"order": 2,
|
212 |
+
"mode": 0,
|
213 |
+
"inputs": [],
|
214 |
+
"outputs": [
|
215 |
+
{
|
216 |
+
"name": "MODEL",
|
217 |
+
"type": "MODEL",
|
218 |
+
"links": [
|
219 |
+
135
|
220 |
+
],
|
221 |
+
"slot_index": 0
|
222 |
+
}
|
223 |
+
],
|
224 |
+
"properties": {
|
225 |
+
"Node name for S&R": "UNETLoader"
|
226 |
+
},
|
227 |
+
"widgets_values": [
|
228 |
+
"hyvideo\\hunyuan_video_720_fp8_e4m3fn.safetensors",
|
229 |
+
"fp8_e4m3fn_fast"
|
230 |
+
]
|
231 |
+
},
|
232 |
+
{
|
233 |
+
"id": 49,
|
234 |
+
"type": "VAELoader",
|
235 |
+
"pos": [
|
236 |
+
-1876.39306640625,
|
237 |
+
-35.19633865356445
|
238 |
+
],
|
239 |
+
"size": [
|
240 |
+
433.7603454589844,
|
241 |
+
58.71116256713867
|
242 |
+
],
|
243 |
+
"flags": {},
|
244 |
+
"order": 3,
|
245 |
+
"mode": 0,
|
246 |
+
"inputs": [],
|
247 |
+
"outputs": [
|
248 |
+
{
|
249 |
+
"name": "VAE",
|
250 |
+
"type": "VAE",
|
251 |
+
"links": [
|
252 |
+
74,
|
253 |
+
88
|
254 |
+
],
|
255 |
+
"slot_index": 0
|
256 |
+
}
|
257 |
+
],
|
258 |
+
"properties": {
|
259 |
+
"Node name for S&R": "VAELoader"
|
260 |
+
},
|
261 |
+
"widgets_values": [
|
262 |
+
"hyvid\\hunyuan_video_vae_bf16.safetensors"
|
263 |
+
]
|
264 |
+
},
|
265 |
+
{
|
266 |
+
"id": 47,
|
267 |
+
"type": "DualCLIPLoader",
|
268 |
+
"pos": [
|
269 |
+
-2284.893798828125,
|
270 |
+
150.4042205810547
|
271 |
+
],
|
272 |
+
"size": [
|
273 |
+
343.3958435058594,
|
274 |
+
106.86042785644531
|
275 |
+
],
|
276 |
+
"flags": {},
|
277 |
+
"order": 4,
|
278 |
+
"mode": 0,
|
279 |
+
"inputs": [],
|
280 |
+
"outputs": [
|
281 |
+
{
|
282 |
+
"name": "CLIP",
|
283 |
+
"type": "CLIP",
|
284 |
+
"links": [
|
285 |
+
56
|
286 |
+
],
|
287 |
+
"slot_index": 0
|
288 |
+
}
|
289 |
+
],
|
290 |
+
"properties": {
|
291 |
+
"Node name for S&R": "DualCLIPLoader"
|
292 |
+
},
|
293 |
+
"widgets_values": [
|
294 |
+
"clip_l.safetensors",
|
295 |
+
"llava_llama3_fp16.safetensors",
|
296 |
+
"hunyuan_video",
|
297 |
+
"default"
|
298 |
+
]
|
299 |
+
},
|
300 |
+
{
|
301 |
+
"id": 45,
|
302 |
+
"type": "CLIPTextEncode",
|
303 |
+
"pos": [
|
304 |
+
-1839.1649169921875,
|
305 |
+
143.5203094482422
|
306 |
+
],
|
307 |
+
"size": [
|
308 |
+
400,
|
309 |
+
200
|
310 |
+
],
|
311 |
+
"flags": {},
|
312 |
+
"order": 8,
|
313 |
+
"mode": 0,
|
314 |
+
"inputs": [
|
315 |
+
{
|
316 |
+
"name": "clip",
|
317 |
+
"type": "CLIP",
|
318 |
+
"link": 56
|
319 |
+
}
|
320 |
+
],
|
321 |
+
"outputs": [
|
322 |
+
{
|
323 |
+
"name": "CONDITIONING",
|
324 |
+
"type": "CONDITIONING",
|
325 |
+
"links": [
|
326 |
+
69,
|
327 |
+
82
|
328 |
+
],
|
329 |
+
"slot_index": 0
|
330 |
+
}
|
331 |
+
],
|
332 |
+
"properties": {
|
333 |
+
"Node name for S&R": "CLIPTextEncode"
|
334 |
+
},
|
335 |
+
"widgets_values": [
|
336 |
+
"woman puts on sunglasses"
|
337 |
+
]
|
338 |
+
},
|
339 |
+
{
|
340 |
+
"id": 53,
|
341 |
+
"type": "EmptyHunyuanLatentVideo",
|
342 |
+
"pos": [
|
343 |
+
-1120,
|
344 |
+
90
|
345 |
+
],
|
346 |
+
"size": [
|
347 |
+
315,
|
348 |
+
130
|
349 |
+
],
|
350 |
+
"flags": {},
|
351 |
+
"order": 10,
|
352 |
+
"mode": 0,
|
353 |
+
"inputs": [
|
354 |
+
{
|
355 |
+
"name": "width",
|
356 |
+
"type": "INT",
|
357 |
+
"link": 89,
|
358 |
+
"widget": {
|
359 |
+
"name": "width"
|
360 |
+
}
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"name": "height",
|
364 |
+
"type": "INT",
|
365 |
+
"link": 90,
|
366 |
+
"widget": {
|
367 |
+
"name": "height"
|
368 |
+
}
|
369 |
+
}
|
370 |
+
],
|
371 |
+
"outputs": [
|
372 |
+
{
|
373 |
+
"name": "LATENT",
|
374 |
+
"type": "LATENT",
|
375 |
+
"links": [
|
376 |
+
119
|
377 |
+
],
|
378 |
+
"slot_index": 0
|
379 |
+
}
|
380 |
+
],
|
381 |
+
"properties": {
|
382 |
+
"Node name for S&R": "EmptyHunyuanLatentVideo"
|
383 |
+
},
|
384 |
+
"widgets_values": [
|
385 |
+
960,
|
386 |
+
544,
|
387 |
+
65,
|
388 |
+
1
|
389 |
+
]
|
390 |
+
},
|
391 |
+
{
|
392 |
+
"id": 55,
|
393 |
+
"type": "ConditioningZeroOut",
|
394 |
+
"pos": [
|
395 |
+
-910,
|
396 |
+
300
|
397 |
+
],
|
398 |
+
"size": [
|
399 |
+
251.14309692382812,
|
400 |
+
26
|
401 |
+
],
|
402 |
+
"flags": {
|
403 |
+
"collapsed": true
|
404 |
+
},
|
405 |
+
"order": 12,
|
406 |
+
"mode": 0,
|
407 |
+
"inputs": [
|
408 |
+
{
|
409 |
+
"name": "conditioning",
|
410 |
+
"type": "CONDITIONING",
|
411 |
+
"link": 69
|
412 |
+
}
|
413 |
+
],
|
414 |
+
"outputs": [
|
415 |
+
{
|
416 |
+
"name": "CONDITIONING",
|
417 |
+
"type": "CONDITIONING",
|
418 |
+
"links": [
|
419 |
+
70
|
420 |
+
],
|
421 |
+
"slot_index": 0
|
422 |
+
}
|
423 |
+
],
|
424 |
+
"properties": {
|
425 |
+
"Node name for S&R": "ConditioningZeroOut"
|
426 |
+
},
|
427 |
+
"widgets_values": []
|
428 |
+
},
|
429 |
+
{
|
430 |
+
"id": 52,
|
431 |
+
"type": "BasicScheduler",
|
432 |
+
"pos": [
|
433 |
+
-600,
|
434 |
+
-350
|
435 |
+
],
|
436 |
+
"size": [
|
437 |
+
315,
|
438 |
+
106
|
439 |
+
],
|
440 |
+
"flags": {},
|
441 |
+
"order": 17,
|
442 |
+
"mode": 0,
|
443 |
+
"inputs": [
|
444 |
+
{
|
445 |
+
"name": "model",
|
446 |
+
"type": "MODEL",
|
447 |
+
"link": 78
|
448 |
+
}
|
449 |
+
],
|
450 |
+
"outputs": [
|
451 |
+
{
|
452 |
+
"name": "SIGMAS",
|
453 |
+
"type": "SIGMAS",
|
454 |
+
"links": [
|
455 |
+
62
|
456 |
+
],
|
457 |
+
"slot_index": 0
|
458 |
+
}
|
459 |
+
],
|
460 |
+
"properties": {
|
461 |
+
"Node name for S&R": "BasicScheduler"
|
462 |
+
},
|
463 |
+
"widgets_values": [
|
464 |
+
"simple",
|
465 |
+
20,
|
466 |
+
1
|
467 |
+
]
|
468 |
+
},
|
469 |
+
{
|
470 |
+
"id": 42,
|
471 |
+
"type": "SamplerCustom",
|
472 |
+
"pos": [
|
473 |
+
-640,
|
474 |
+
10
|
475 |
+
],
|
476 |
+
"size": [
|
477 |
+
355.20001220703125,
|
478 |
+
467.4666748046875
|
479 |
+
],
|
480 |
+
"flags": {},
|
481 |
+
"order": 18,
|
482 |
+
"mode": 0,
|
483 |
+
"inputs": [
|
484 |
+
{
|
485 |
+
"name": "model",
|
486 |
+
"type": "MODEL",
|
487 |
+
"link": 77
|
488 |
+
},
|
489 |
+
{
|
490 |
+
"name": "positive",
|
491 |
+
"type": "CONDITIONING",
|
492 |
+
"link": 83
|
493 |
+
},
|
494 |
+
{
|
495 |
+
"name": "negative",
|
496 |
+
"type": "CONDITIONING",
|
497 |
+
"link": 70
|
498 |
+
},
|
499 |
+
{
|
500 |
+
"name": "sampler",
|
501 |
+
"type": "SAMPLER",
|
502 |
+
"link": 61
|
503 |
+
},
|
504 |
+
{
|
505 |
+
"name": "sigmas",
|
506 |
+
"type": "SIGMAS",
|
507 |
+
"link": 62
|
508 |
+
},
|
509 |
+
{
|
510 |
+
"name": "latent_image",
|
511 |
+
"type": "LATENT",
|
512 |
+
"link": 119
|
513 |
+
}
|
514 |
+
],
|
515 |
+
"outputs": [
|
516 |
+
{
|
517 |
+
"name": "output",
|
518 |
+
"type": "LATENT",
|
519 |
+
"links": null
|
520 |
+
},
|
521 |
+
{
|
522 |
+
"name": "denoised_output",
|
523 |
+
"type": "LATENT",
|
524 |
+
"links": [
|
525 |
+
141
|
526 |
+
],
|
527 |
+
"slot_index": 1
|
528 |
+
}
|
529 |
+
],
|
530 |
+
"properties": {
|
531 |
+
"Node name for S&R": "SamplerCustom"
|
532 |
+
},
|
533 |
+
"widgets_values": [
|
534 |
+
true,
|
535 |
+
6,
|
536 |
+
"fixed",
|
537 |
+
1,
|
538 |
+
null
|
539 |
+
]
|
540 |
+
},
|
541 |
+
{
|
542 |
+
"id": 84,
|
543 |
+
"type": "GetLatentRangeFromBatch",
|
544 |
+
"pos": [
|
545 |
+
-240,
|
546 |
+
-100
|
547 |
+
],
|
548 |
+
"size": [
|
549 |
+
340.20001220703125,
|
550 |
+
82
|
551 |
+
],
|
552 |
+
"flags": {},
|
553 |
+
"order": 19,
|
554 |
+
"mode": 0,
|
555 |
+
"inputs": [
|
556 |
+
{
|
557 |
+
"name": "latents",
|
558 |
+
"type": "LATENT",
|
559 |
+
"link": 141
|
560 |
+
}
|
561 |
+
],
|
562 |
+
"outputs": [
|
563 |
+
{
|
564 |
+
"name": "LATENT",
|
565 |
+
"type": "LATENT",
|
566 |
+
"links": [
|
567 |
+
142
|
568 |
+
],
|
569 |
+
"slot_index": 0
|
570 |
+
}
|
571 |
+
],
|
572 |
+
"properties": {
|
573 |
+
"Node name for S&R": "GetLatentRangeFromBatch"
|
574 |
+
},
|
575 |
+
"widgets_values": [
|
576 |
+
1,
|
577 |
+
-1
|
578 |
+
]
|
579 |
+
},
|
580 |
+
{
|
581 |
+
"id": 50,
|
582 |
+
"type": "VHS_VideoCombine",
|
583 |
+
"pos": [
|
584 |
+
165.77645874023438,
|
585 |
+
-619.0606079101562
|
586 |
+
],
|
587 |
+
"size": [
|
588 |
+
1112.6898193359375,
|
589 |
+
1076.4598388671875
|
590 |
+
],
|
591 |
+
"flags": {},
|
592 |
+
"order": 21,
|
593 |
+
"mode": 0,
|
594 |
+
"inputs": [
|
595 |
+
{
|
596 |
+
"name": "images",
|
597 |
+
"type": "IMAGE",
|
598 |
+
"link": 105
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"name": "audio",
|
602 |
+
"type": "AUDIO",
|
603 |
+
"link": null,
|
604 |
+
"shape": 7
|
605 |
+
},
|
606 |
+
{
|
607 |
+
"name": "meta_batch",
|
608 |
+
"type": "VHS_BatchManager",
|
609 |
+
"link": null,
|
610 |
+
"shape": 7
|
611 |
+
},
|
612 |
+
{
|
613 |
+
"name": "vae",
|
614 |
+
"type": "VAE",
|
615 |
+
"link": null,
|
616 |
+
"shape": 7
|
617 |
+
}
|
618 |
+
],
|
619 |
+
"outputs": [
|
620 |
+
{
|
621 |
+
"name": "Filenames",
|
622 |
+
"type": "VHS_FILENAMES",
|
623 |
+
"links": null
|
624 |
+
}
|
625 |
+
],
|
626 |
+
"properties": {
|
627 |
+
"Node name for S&R": "VHS_VideoCombine"
|
628 |
+
},
|
629 |
+
"widgets_values": {
|
630 |
+
"frame_rate": 24,
|
631 |
+
"loop_count": 0,
|
632 |
+
"filename_prefix": "hyvidcomfy",
|
633 |
+
"format": "video/h264-mp4",
|
634 |
+
"pix_fmt": "yuv420p",
|
635 |
+
"crf": 19,
|
636 |
+
"save_metadata": true,
|
637 |
+
"trim_to_audio": false,
|
638 |
+
"pingpong": false,
|
639 |
+
"save_output": false,
|
640 |
+
"videopreview": {
|
641 |
+
"hidden": false,
|
642 |
+
"paused": false,
|
643 |
+
"params": {
|
644 |
+
"filename": "hyvidcomfy_00001.mp4",
|
645 |
+
"subfolder": "",
|
646 |
+
"type": "temp",
|
647 |
+
"format": "video/h264-mp4",
|
648 |
+
"frame_rate": 24,
|
649 |
+
"workflow": "hyvidcomfy_00001.png",
|
650 |
+
"fullpath": "N:\\AI\\ComfyUI\\temp\\hyvidcomfy_00001.mp4"
|
651 |
+
},
|
652 |
+
"muted": false
|
653 |
+
}
|
654 |
+
}
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"id": 54,
|
658 |
+
"type": "ModelSamplingSD3",
|
659 |
+
"pos": [
|
660 |
+
-1079.9112548828125,
|
661 |
+
-146.69448852539062
|
662 |
+
],
|
663 |
+
"size": [
|
664 |
+
315,
|
665 |
+
58
|
666 |
+
],
|
667 |
+
"flags": {},
|
668 |
+
"order": 16,
|
669 |
+
"mode": 0,
|
670 |
+
"inputs": [
|
671 |
+
{
|
672 |
+
"name": "model",
|
673 |
+
"type": "MODEL",
|
674 |
+
"link": 117
|
675 |
+
}
|
676 |
+
],
|
677 |
+
"outputs": [
|
678 |
+
{
|
679 |
+
"name": "MODEL",
|
680 |
+
"type": "MODEL",
|
681 |
+
"links": [
|
682 |
+
77,
|
683 |
+
78
|
684 |
+
],
|
685 |
+
"slot_index": 0
|
686 |
+
}
|
687 |
+
],
|
688 |
+
"properties": {
|
689 |
+
"Node name for S&R": "ModelSamplingSD3"
|
690 |
+
},
|
691 |
+
"widgets_values": [
|
692 |
+
9
|
693 |
+
]
|
694 |
+
},
|
695 |
+
{
|
696 |
+
"id": 80,
|
697 |
+
"type": "PathchSageAttentionKJ",
|
698 |
+
"pos": [
|
699 |
+
-2273.926513671875,
|
700 |
+
-36.720542907714844
|
701 |
+
],
|
702 |
+
"size": [
|
703 |
+
315,
|
704 |
+
58
|
705 |
+
],
|
706 |
+
"flags": {},
|
707 |
+
"order": 7,
|
708 |
+
"mode": 4,
|
709 |
+
"inputs": [
|
710 |
+
{
|
711 |
+
"name": "model",
|
712 |
+
"type": "MODEL",
|
713 |
+
"link": 135
|
714 |
+
}
|
715 |
+
],
|
716 |
+
"outputs": [
|
717 |
+
{
|
718 |
+
"name": "MODEL",
|
719 |
+
"type": "MODEL",
|
720 |
+
"links": [
|
721 |
+
136
|
722 |
+
],
|
723 |
+
"slot_index": 0
|
724 |
+
}
|
725 |
+
],
|
726 |
+
"properties": {
|
727 |
+
"Node name for S&R": "PathchSageAttentionKJ"
|
728 |
+
},
|
729 |
+
"widgets_values": [
|
730 |
+
"auto"
|
731 |
+
]
|
732 |
+
},
|
733 |
+
{
|
734 |
+
"id": 85,
|
735 |
+
"type": "Note",
|
736 |
+
"pos": [
|
737 |
+
-1838.572265625,
|
738 |
+
-302.1575927734375
|
739 |
+
],
|
740 |
+
"size": [
|
741 |
+
408.4594421386719,
|
742 |
+
58
|
743 |
+
],
|
744 |
+
"flags": {},
|
745 |
+
"order": 5,
|
746 |
+
"mode": 0,
|
747 |
+
"inputs": [],
|
748 |
+
"outputs": [],
|
749 |
+
"properties": {},
|
750 |
+
"widgets_values": [
|
751 |
+
"https://huggingface.co/Kijai/Leapfusion-image2vid-comfy/blob/main/leapfusion_img2vid544p_comfy.safetensors"
|
752 |
+
],
|
753 |
+
"color": "#432",
|
754 |
+
"bgcolor": "#653"
|
755 |
+
},
|
756 |
+
{
|
757 |
+
"id": 74,
|
758 |
+
"type": "LeapfusionHunyuanI2VPatcher",
|
759 |
+
"pos": [
|
760 |
+
-1059.552978515625,
|
761 |
+
-459.34674072265625
|
762 |
+
],
|
763 |
+
"size": [
|
764 |
+
277.3238525390625,
|
765 |
+
150
|
766 |
+
],
|
767 |
+
"flags": {},
|
768 |
+
"order": 15,
|
769 |
+
"mode": 0,
|
770 |
+
"inputs": [
|
771 |
+
{
|
772 |
+
"name": "model",
|
773 |
+
"type": "MODEL",
|
774 |
+
"link": 123
|
775 |
+
},
|
776 |
+
{
|
777 |
+
"name": "latent",
|
778 |
+
"type": "LATENT",
|
779 |
+
"link": 137
|
780 |
+
}
|
781 |
+
],
|
782 |
+
"outputs": [
|
783 |
+
{
|
784 |
+
"name": "MODEL",
|
785 |
+
"type": "MODEL",
|
786 |
+
"links": [
|
787 |
+
117
|
788 |
+
],
|
789 |
+
"slot_index": 0
|
790 |
+
}
|
791 |
+
],
|
792 |
+
"properties": {
|
793 |
+
"Node name for S&R": "LeapfusionHunyuanI2VPatcher"
|
794 |
+
},
|
795 |
+
"widgets_values": [
|
796 |
+
0,
|
797 |
+
0,
|
798 |
+
1,
|
799 |
+
0.8
|
800 |
+
]
|
801 |
+
},
|
802 |
+
{
|
803 |
+
"id": 59,
|
804 |
+
"type": "LoraLoaderModelOnly",
|
805 |
+
"pos": [
|
806 |
+
-1870.3748779296875,
|
807 |
+
-194.6091766357422
|
808 |
+
],
|
809 |
+
"size": [
|
810 |
+
442.8438720703125,
|
811 |
+
82
|
812 |
+
],
|
813 |
+
"flags": {},
|
814 |
+
"order": 11,
|
815 |
+
"mode": 0,
|
816 |
+
"inputs": [
|
817 |
+
{
|
818 |
+
"name": "model",
|
819 |
+
"type": "MODEL",
|
820 |
+
"link": 136
|
821 |
+
}
|
822 |
+
],
|
823 |
+
"outputs": [
|
824 |
+
{
|
825 |
+
"name": "MODEL",
|
826 |
+
"type": "MODEL",
|
827 |
+
"links": [
|
828 |
+
123
|
829 |
+
],
|
830 |
+
"slot_index": 0
|
831 |
+
}
|
832 |
+
],
|
833 |
+
"properties": {
|
834 |
+
"Node name for S&R": "LoraLoaderModelOnly"
|
835 |
+
},
|
836 |
+
"widgets_values": [
|
837 |
+
"hyvid\\musubi-tuner\\img2vid544p.safetensors",
|
838 |
+
1
|
839 |
+
]
|
840 |
+
},
|
841 |
+
{
|
842 |
+
"id": 66,
|
843 |
+
"type": "ImageResizeKJ",
|
844 |
+
"pos": [
|
845 |
+
-1821.1531982421875,
|
846 |
+
-632.925048828125
|
847 |
+
],
|
848 |
+
"size": [
|
849 |
+
315,
|
850 |
+
266
|
851 |
+
],
|
852 |
+
"flags": {},
|
853 |
+
"order": 6,
|
854 |
+
"mode": 0,
|
855 |
+
"inputs": [
|
856 |
+
{
|
857 |
+
"name": "image",
|
858 |
+
"type": "IMAGE",
|
859 |
+
"link": 86
|
860 |
+
},
|
861 |
+
{
|
862 |
+
"name": "get_image_size",
|
863 |
+
"type": "IMAGE",
|
864 |
+
"link": null,
|
865 |
+
"shape": 7
|
866 |
+
},
|
867 |
+
{
|
868 |
+
"name": "width_input",
|
869 |
+
"type": "INT",
|
870 |
+
"link": null,
|
871 |
+
"widget": {
|
872 |
+
"name": "width_input"
|
873 |
+
},
|
874 |
+
"shape": 7
|
875 |
+
},
|
876 |
+
{
|
877 |
+
"name": "height_input",
|
878 |
+
"type": "INT",
|
879 |
+
"link": null,
|
880 |
+
"widget": {
|
881 |
+
"name": "height_input"
|
882 |
+
},
|
883 |
+
"shape": 7
|
884 |
+
}
|
885 |
+
],
|
886 |
+
"outputs": [
|
887 |
+
{
|
888 |
+
"name": "IMAGE",
|
889 |
+
"type": "IMAGE",
|
890 |
+
"links": [
|
891 |
+
143
|
892 |
+
],
|
893 |
+
"slot_index": 0
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"name": "width",
|
897 |
+
"type": "INT",
|
898 |
+
"links": [
|
899 |
+
89
|
900 |
+
],
|
901 |
+
"slot_index": 1
|
902 |
+
},
|
903 |
+
{
|
904 |
+
"name": "height",
|
905 |
+
"type": "INT",
|
906 |
+
"links": [
|
907 |
+
90
|
908 |
+
],
|
909 |
+
"slot_index": 2
|
910 |
+
}
|
911 |
+
],
|
912 |
+
"properties": {
|
913 |
+
"Node name for S&R": "ImageResizeKJ"
|
914 |
+
},
|
915 |
+
"widgets_values": [
|
916 |
+
960,
|
917 |
+
640,
|
918 |
+
"lanczos",
|
919 |
+
false,
|
920 |
+
2,
|
921 |
+
0,
|
922 |
+
0,
|
923 |
+
"center"
|
924 |
+
]
|
925 |
+
},
|
926 |
+
{
|
927 |
+
"id": 86,
|
928 |
+
"type": "ImageNoiseAugmentation",
|
929 |
+
"pos": [
|
930 |
+
-1361.111572265625,
|
931 |
+
-667.0104370117188
|
932 |
+
],
|
933 |
+
"size": [
|
934 |
+
315,
|
935 |
+
106
|
936 |
+
],
|
937 |
+
"flags": {},
|
938 |
+
"order": 9,
|
939 |
+
"mode": 0,
|
940 |
+
"inputs": [
|
941 |
+
{
|
942 |
+
"name": "image",
|
943 |
+
"type": "IMAGE",
|
944 |
+
"link": 143
|
945 |
+
}
|
946 |
+
],
|
947 |
+
"outputs": [
|
948 |
+
{
|
949 |
+
"name": "IMAGE",
|
950 |
+
"type": "IMAGE",
|
951 |
+
"links": [
|
952 |
+
144
|
953 |
+
],
|
954 |
+
"slot_index": 0
|
955 |
+
}
|
956 |
+
],
|
957 |
+
"properties": {
|
958 |
+
"Node name for S&R": "ImageNoiseAugmentation"
|
959 |
+
},
|
960 |
+
"widgets_values": [
|
961 |
+
0.05,
|
962 |
+
123,
|
963 |
+
"fixed"
|
964 |
+
]
|
965 |
+
}
|
966 |
+
],
|
967 |
+
"links": [
|
968 |
+
[
|
969 |
+
56,
|
970 |
+
47,
|
971 |
+
0,
|
972 |
+
45,
|
973 |
+
0,
|
974 |
+
"CLIP"
|
975 |
+
],
|
976 |
+
[
|
977 |
+
61,
|
978 |
+
51,
|
979 |
+
0,
|
980 |
+
42,
|
981 |
+
3,
|
982 |
+
"SAMPLER"
|
983 |
+
],
|
984 |
+
[
|
985 |
+
62,
|
986 |
+
52,
|
987 |
+
0,
|
988 |
+
42,
|
989 |
+
4,
|
990 |
+
"SIGMAS"
|
991 |
+
],
|
992 |
+
[
|
993 |
+
69,
|
994 |
+
45,
|
995 |
+
0,
|
996 |
+
55,
|
997 |
+
0,
|
998 |
+
"CONDITIONING"
|
999 |
+
],
|
1000 |
+
[
|
1001 |
+
70,
|
1002 |
+
55,
|
1003 |
+
0,
|
1004 |
+
42,
|
1005 |
+
2,
|
1006 |
+
"CONDITIONING"
|
1007 |
+
],
|
1008 |
+
[
|
1009 |
+
74,
|
1010 |
+
49,
|
1011 |
+
0,
|
1012 |
+
57,
|
1013 |
+
1,
|
1014 |
+
"VAE"
|
1015 |
+
],
|
1016 |
+
[
|
1017 |
+
77,
|
1018 |
+
54,
|
1019 |
+
0,
|
1020 |
+
42,
|
1021 |
+
0,
|
1022 |
+
"MODEL"
|
1023 |
+
],
|
1024 |
+
[
|
1025 |
+
78,
|
1026 |
+
54,
|
1027 |
+
0,
|
1028 |
+
52,
|
1029 |
+
0,
|
1030 |
+
"MODEL"
|
1031 |
+
],
|
1032 |
+
[
|
1033 |
+
82,
|
1034 |
+
45,
|
1035 |
+
0,
|
1036 |
+
62,
|
1037 |
+
0,
|
1038 |
+
"CONDITIONING"
|
1039 |
+
],
|
1040 |
+
[
|
1041 |
+
83,
|
1042 |
+
62,
|
1043 |
+
0,
|
1044 |
+
42,
|
1045 |
+
1,
|
1046 |
+
"CONDITIONING"
|
1047 |
+
],
|
1048 |
+
[
|
1049 |
+
86,
|
1050 |
+
65,
|
1051 |
+
0,
|
1052 |
+
66,
|
1053 |
+
0,
|
1054 |
+
"IMAGE"
|
1055 |
+
],
|
1056 |
+
[
|
1057 |
+
88,
|
1058 |
+
49,
|
1059 |
+
0,
|
1060 |
+
64,
|
1061 |
+
1,
|
1062 |
+
"VAE"
|
1063 |
+
],
|
1064 |
+
[
|
1065 |
+
89,
|
1066 |
+
66,
|
1067 |
+
1,
|
1068 |
+
53,
|
1069 |
+
0,
|
1070 |
+
"INT"
|
1071 |
+
],
|
1072 |
+
[
|
1073 |
+
90,
|
1074 |
+
66,
|
1075 |
+
2,
|
1076 |
+
53,
|
1077 |
+
1,
|
1078 |
+
"INT"
|
1079 |
+
],
|
1080 |
+
[
|
1081 |
+
105,
|
1082 |
+
57,
|
1083 |
+
0,
|
1084 |
+
50,
|
1085 |
+
0,
|
1086 |
+
"IMAGE"
|
1087 |
+
],
|
1088 |
+
[
|
1089 |
+
117,
|
1090 |
+
74,
|
1091 |
+
0,
|
1092 |
+
54,
|
1093 |
+
0,
|
1094 |
+
"MODEL"
|
1095 |
+
],
|
1096 |
+
[
|
1097 |
+
119,
|
1098 |
+
53,
|
1099 |
+
0,
|
1100 |
+
42,
|
1101 |
+
5,
|
1102 |
+
"LATENT"
|
1103 |
+
],
|
1104 |
+
[
|
1105 |
+
123,
|
1106 |
+
59,
|
1107 |
+
0,
|
1108 |
+
74,
|
1109 |
+
0,
|
1110 |
+
"MODEL"
|
1111 |
+
],
|
1112 |
+
[
|
1113 |
+
135,
|
1114 |
+
44,
|
1115 |
+
0,
|
1116 |
+
80,
|
1117 |
+
0,
|
1118 |
+
"MODEL"
|
1119 |
+
],
|
1120 |
+
[
|
1121 |
+
136,
|
1122 |
+
80,
|
1123 |
+
0,
|
1124 |
+
59,
|
1125 |
+
0,
|
1126 |
+
"MODEL"
|
1127 |
+
],
|
1128 |
+
[
|
1129 |
+
137,
|
1130 |
+
64,
|
1131 |
+
0,
|
1132 |
+
74,
|
1133 |
+
1,
|
1134 |
+
"LATENT"
|
1135 |
+
],
|
1136 |
+
[
|
1137 |
+
141,
|
1138 |
+
42,
|
1139 |
+
1,
|
1140 |
+
84,
|
1141 |
+
0,
|
1142 |
+
"LATENT"
|
1143 |
+
],
|
1144 |
+
[
|
1145 |
+
142,
|
1146 |
+
84,
|
1147 |
+
0,
|
1148 |
+
57,
|
1149 |
+
0,
|
1150 |
+
"LATENT"
|
1151 |
+
],
|
1152 |
+
[
|
1153 |
+
143,
|
1154 |
+
66,
|
1155 |
+
0,
|
1156 |
+
86,
|
1157 |
+
0,
|
1158 |
+
"IMAGE"
|
1159 |
+
],
|
1160 |
+
[
|
1161 |
+
144,
|
1162 |
+
86,
|
1163 |
+
0,
|
1164 |
+
64,
|
1165 |
+
0,
|
1166 |
+
"IMAGE"
|
1167 |
+
]
|
1168 |
+
],
|
1169 |
+
"groups": [],
|
1170 |
+
"config": {},
|
1171 |
+
"extra": {
|
1172 |
+
"ds": {
|
1173 |
+
"scale": 0.740024994425854,
|
1174 |
+
"offset": [
|
1175 |
+
2525.036093151529,
|
1176 |
+
802.59123935694
|
1177 |
+
]
|
1178 |
+
},
|
1179 |
+
"node_versions": {
|
1180 |
+
"comfy-core": "0.3.13",
|
1181 |
+
"ComfyUI-KJNodes": "a8aeef670b3f288303f956bf94385cb87978ea93",
|
1182 |
+
"ComfyUI-VideoHelperSuite": "c47b10ca1798b4925ff5a5f07d80c51ca80a837d"
|
1183 |
+
},
|
1184 |
+
"VHS_latentpreview": true,
|
1185 |
+
"VHS_latentpreviewrate": 0
|
1186 |
+
},
|
1187 |
+
"version": 0.4
|
1188 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d897f04ff2bb452e29a8f2a3c5c3cd5c55e95f314242cd645fbbe24a5ac59961
|
3 |
+
size 6416109
|
custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f199d6bf3180fe7271073c3769dcb764b40f35f41b30fcb183ae5bf4b6a9997f
|
3 |
+
size 6416109
|
custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02934db0a0b92a9cdda402e42548560beda7d31b268e561dbc6815551e876268
|
3 |
+
size 6416109
|
custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:635e998063a10211633edd3e4b1676201822cd67f790ec71dba5f32d8b625c8b
|
3 |
+
size 6416109
|
custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_loras.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
source for the loras:
|
2 |
+
https://github.com/duxiaodan/intrinsic-lora
|
3 |
+
|
4 |
+
Renamed and conveted to .safetensors
|
custom_nodes/ComfyUI-KJNodes-main/kjweb_async/marked.min.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* marked v12.0.1 - a markdown parser
|
3 |
+
* Copyright (c) 2011-2024, Christopher Jeffrey. (MIT Licensed)
|
4 |
+
* https://github.com/markedjs/marked
|
5 |
+
*/
|
6 |
+
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length<t;)n.push("");for(;s<n.length;s++)n[s]=n[s].trim().replace(/\\\|/g,"|");return n}function x(e,t,n){const s=e.length;if(0===s)return"";let r=0;for(;r<s;){const i=e.charAt(s-r-1);if(i!==t||n){if(i===t||!n)break;r++}else r++}return e.slice(0,s-r)}function b(e,t,n,s){const r=t.href,i=t.title?c(t.title):null,l=e[1].replace(/\\([\[\]])/g,"$1");if("!"!==e[0].charAt(0)){s.state.inLink=!0;const e={type:"link",raw:n,href:r,title:i,text:l,tokens:s.inlineTokens(l)};return s.state.inLink=!1,e}return{type:"image",raw:n,href:r,title:i,text:c(l)}}class w{options;rules;lexer;constructor(t){this.options=t||e.defaults}space(e){const t=this.rules.block.newline.exec(e);if(t&&t[0].length>0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e<r.items.length;e++)if(this.lexer.state.top=!1,r.items[e].tokens=this.lexer.blockTokens(r.items[e].text,[]),!r.loose){const t=r.items[e].tokens.filter((e=>"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e<r.items.length;e++)r.items[e].loose=!0;return r}}html(e){const t=this.rules.block.html.exec(e);if(t){return{type:"html",block:!0,raw:t[0],pre:"pre"===t[1]||"script"===t[1]||"style"===t[1],text:t[0]}}}def(e){const t=this.rules.block.def.exec(e);if(t){const e=t[1].toLowerCase().replace(/\s+/g," "),n=t[2]?t[2].replace(/^<(.*)>$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^<a /i.test(t[0])?this.lexer.state.inLink=!0:this.lexer.state.inLink&&/^<\/a>/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^</.test(e)){if(!/>$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s<e.length;s++)if("\\"===e[s])s++;else if(e[s]===t[0])n++;else if(e[s]===t[1]&&(n--,n<0))return s;return-1}(t[2],"()");if(e>-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^</.test(n)&&(n=this.options.pedantic&&!/>$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).replace(/blockCode/g,/ {4}/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/<!--(?:-?>|[\s\S]*?(?:-->|$))/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|<![A-Z][\\s\\S]*?(?:>\\n*|$)|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),q={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},Z=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...q,table:Z,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",Z).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...q,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)|<tag(?:\"[^\"]*\"|'[^']*'|\\s[^'\"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}\\p{S}",C=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),M=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),O=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^</[a-zA-Z][\\w:-]*\\s*>|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^<![a-zA-Z]+\\s[\\s\\S]*?>|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:M,emStrongRDelimAst:O,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:C,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,url:f},V={...K,link:k(/^!?\[(label)\]\((.*?)\)/).replace("label",F).getRegex(),reflink:k(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace("label",F).getRegex()},W={...K,escape:k(Q).replace("])","~|])").getRegex(),url:k(/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,"i").replace("email",/[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/).getRegex(),_backpedal:/(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/,del:/^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,text:/^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/},Y={...W,br:k(v).replace("{2,}","*").getRegex(),text:k(W.text).replace("\\b_","\\b_| {2,}\\n").replace(/\{2,\}/g,"*").getRegex()},ee={normal:q,gfm:L,pedantic:P},te={normal:K,gfm:W,breaks:Y,pedantic:V};class ne{tokens;options;state;tokenizer;inlineQueue;constructor(t){this.tokens=[],this.tokens.links=Object.create(null),this.options=t||e.defaults,this.options.tokenizer=this.options.tokenizer||new w,this.tokenizer=this.options.tokenizer,this.tokenizer.options=this.options,this.tokenizer.lexer=this,this.inlineQueue=[],this.state={inLink:!1,inRawBlock:!1,top:!0};const n={block:ee.normal,inline:te.normal};this.options.pedantic?(n.block=ee.pedantic,n.inline=te.pedantic):this.options.gfm&&(n.block=ee.gfm,this.options.breaks?n.inline=te.breaks:n.inline=te.gfm),this.tokenizer.rules=n}static get rules(){return{block:ee,inline:te}}static lex(e,t){return new ne(t).lex(e)}static lexInline(e,t){return new ne(t).inlineTokens(e)}lex(e){e=e.replace(/\r\n|\r/g,"\n"),this.blockTokens(e,this.tokens);for(let e=0;e<this.inlineQueue.length;e++){const t=this.inlineQueue[e];this.inlineTokens(t.src,t.tokens)}return this.inlineQueue=[],this.tokens}blockTokens(e,t=[]){let n,s,r,i;for(e=this.options.pedantic?e.replace(/\t/g," ").replace(/^ +$/gm,""):e.replace(/^( *)(\t+)/gm,((e,t,n)=>t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?'<pre><code class="language-'+c(s)+'">'+(n?e:c(e,!0))+"</code></pre>\n":"<pre><code>"+(n?e:c(e,!0))+"</code></pre>\n"}blockquote(e){return`<blockquote>\n${e}</blockquote>\n`}html(e,t){return e}heading(e,t,n){return`<h${t}>${e}</h${t}>\n`}hr(){return"<hr>\n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"</"+s+">\n"}listitem(e,t,n){return`<li>${e}</li>\n`}checkbox(e){return"<input "+(e?'checked="" ':"")+'disabled="" type="checkbox">'}paragraph(e){return`<p>${e}</p>\n`}table(e,t){return t&&(t=`<tbody>${t}</tbody>`),"<table>\n<thead>\n"+e+"</thead>\n"+t+"</table>\n"}tablerow(e){return`<tr>\n${e}</tr>\n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`</${n}>\n`}strong(e){return`<strong>${e}</strong>`}em(e){return`<em>${e}</em>`}codespan(e){return`<code>${e}</code>`}br(){return"<br>"}del(e){return`<del>${e}</del>`}link(e,t,n){const s=g(e);if(null===s)return n;let r='<a href="'+(e=s)+'"';return t&&(r+=' title="'+t+'"'),r+=">"+n+"</a>",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=`<img src="${e=s}" alt="${n}"`;return t&&(r+=` title="${t}"`),r+=">",r}text(e){return e}}class re{strong(e){return e}em(e){return e}codespan(e){return e}del(e){return e}html(e){return e}text(e){return e}link(e,t,n){return""+n}image(e,t,n){return""+n}br(){return""}}class ie{options;renderer;textRenderer;constructor(t){this.options=t||e.defaults,this.options.renderer=this.options.renderer||new se,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new re}static parse(e,t){return new ie(t).parse(e)}static parseInline(e,t){return new ie(t).parseInline(e)}parse(e,t=!0){let n="";for(let s=0;s<e.length;s++){const r=e[s];if(this.options.extensions&&this.options.extensions.renderers&&this.options.extensions.renderers[r.type]){const e=r,t=this.options.extensions.renderers[e.type].call({parser:this},e);if(!1!==t||!["space","hr","heading","code","table","blockquote","list","html","paragraph","text"].includes(e.type)){n+=t||"";continue}}switch(r.type){case"space":continue;case"hr":n+=this.renderer.hr();continue;case"heading":{const e=r;n+=this.renderer.heading(this.parseInline(e.tokens),e.depth,p(this.parseInline(e.tokens,this.textRenderer)));continue}case"code":{const e=r;n+=this.renderer.code(e.text,e.lang,!!e.escaped);continue}case"table":{const e=r;let t="",s="";for(let t=0;t<e.header.length;t++)s+=this.renderer.tablecell(this.parseInline(e.header[t].tokens),{header:!0,align:e.align[t]});t+=this.renderer.tablerow(s);let i="";for(let t=0;t<e.rows.length;t++){const n=e.rows[t];s="";for(let t=0;t<n.length;t++)s+=this.renderer.tablecell(this.parseInline(n[t].tokens),{header:!1,align:e.align[t]});i+=this.renderer.tablerow(s)}n+=this.renderer.table(t,i);continue}case"blockquote":{const e=r,t=this.parse(e.tokens);n+=this.renderer.blockquote(t);continue}case"list":{const e=r,t=e.ordered,s=e.start,i=e.loose;let l="";for(let t=0;t<e.items.length;t++){const n=e.items[t],s=n.checked,r=n.task;let o="";if(n.task){const e=this.renderer.checkbox(!!s);i?n.tokens.length>0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1<e.length&&"text"===e[s+1].type;)i=e[++s],l+="\n"+(i.tokens?this.parseInline(i.tokens):i.text);n+=t?this.renderer.paragraph(l):l;continue}default:{const e='Token with "'+r.type+'" type was not found.';if(this.options.silent)return console.error(e),"";throw new Error(e)}}}return n}parseInline(e,t){t=t||this.renderer;let n="";for(let s=0;s<e.length;s++){const r=e[s];if(this.options.extensions&&this.options.extensions.renderers&&this.options.extensions.renderers[r.type]){const e=this.options.extensions.renderers[r.type].call({parser:this},r);if(!1!==e||!["escape","html","link","image","strong","em","codespan","br","del","text"].includes(r.type)){n+=e||"";continue}}switch(r.type){case"escape":{const e=r;n+=t.text(e.text);break}case"html":{const e=r;n+=t.html(e.text);break}case"link":{const e=r;n+=t.link(e.href,e.title,this.parseInline(e.tokens,t));break}case"image":{const e=r;n+=t.image(e.href,e.title,e.text);break}case"strong":{const e=r;n+=t.strong(this.parseInline(e.tokens,t));break}case"em":{const e=r;n+=t.em(this.parseInline(e.tokens,t));break}case"codespan":{const e=r;n+=t.codespan(e.text);break}case"br":n+=t.br();break;case"del":{const e=r;n+=t.del(this.parseInline(e.tokens,t));break}case"text":{const e=r;n+=t.text(e.text);break}default:{const e='Token with "'+r.type+'" type was not found.';if(this.options.silent)return console.error(e),"";throw new Error(e)}}}return n}}class le{options;constructor(t){this.options=t||e.defaults}static passThroughHooks=new Set(["preprocess","postprocess","processAllTokens"]);preprocess(e){return e}postprocess(e){return e}processAllTokens(e){return e}}class oe{defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};options=this.setOptions;parse=this.#e(ne.lex,ie.parse);parseInline=this.#e(ne.lexInline,ie.parseInline);Parser=ie;Renderer=se;TextRenderer=re;Lexer=ne;Tokenizer=w;Hooks=le;constructor(...e){this.use(...e)}walkTokens(e,t){let n=[];for(const s of e)switch(n=n.concat(t.call(this,s)),s.type){case"table":{const e=s;for(const s of e.header)n=n.concat(this.walkTokens(s.tokens,t));for(const s of e.rows)for(const e of s)n=n.concat(this.walkTokens(e.tokens,t));break}case"list":{const e=s;n=n.concat(this.walkTokens(e.items,t));break}default:{const e=s;this.defaults.extensions?.childTokens?.[e.type]?this.defaults.extensions.childTokens[e.type].forEach((s=>{const r=e[s].flat(1/0);n=n.concat(this.walkTokens(r,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="<p>An error occurred:</p><pre>"+c(n.message+"",!0)+"</pre>";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke}));
|
custom_nodes/ComfyUI-KJNodes-main/kjweb_async/protovis.min.js
ADDED
The diff for this file is too large to render.
See raw diff
|
|
custom_nodes/ComfyUI-KJNodes-main/kjweb_async/purify.min.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
/*! @license DOMPurify 3.0.11 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.11/LICENSE */
|
2 |
+
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;i||(i=function(e){return e}),a||(a=function(e){return e}),c||(c=function(e,t,n){return e.apply(t,n)}),s||(s=function(e,t){return new e(...t)});const u=b(Array.prototype.forEach),m=b(Array.prototype.pop),p=b(Array.prototype.push),f=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(Object.prototype.hasOwnProperty),A=b(RegExp.prototype.test),_=(N=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n<e;n++)t[n]=arguments[n];return s(N,t)});var N;function b(e){return function(t){for(var n=arguments.length,o=new Array(n>1?n-1:0),r=1;r<n;r++)o[r-1]=arguments[r];return c(e,t,o)}}function S(e,o){let r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:f;t&&t(e,null);let i=o.length;for(;i--;){let t=o[i];if("string"==typeof t){const e=r(t);e!==t&&(n(o)||(o[i]=e),t=e)}e[t]=!0}return e}function R(e){for(let t=0;t<e.length;t++){E(e,t)||(e[t]=null)}return e}function w(t){const n=l(null);for(const[o,r]of e(t)){E(t,o)&&(Array.isArray(r)?n[o]=R(r):r&&"object"==typeof r&&r.constructor===Object?n[o]=w(r):n[o]=r)}return n}function L(e,t){for(;null!==e;){const n=r(e,t);if(n){if(n.get)return b(n.get);if("function"==typeof n.value)return b(n.value)}e=o(e)}return function(){return null}}const D=i(["a","abbr","acronym","address","area","article","aside","audio","b","bdi","bdo","big","blink","blockquote","body","br","button","canvas","caption","center","cite","code","col","colgroup","content","data","datalist","dd","decorator","del","details","dfn","dialog","dir","div","dl","dt","element","em","fieldset","figcaption","figure","font","footer","form","h1","h2","h3","h4","h5","h6","head","header","hgroup","hr","html","i","img","input","ins","kbd","label","legend","li","main","map","mark","marquee","menu","menuitem","meter","nav","nobr","ol","optgroup","option","output","p","picture","pre","progress","q","rp","rt","ruby","s","samp","section","select","shadow","small","source","spacer","span","strike","strong","style","sub","summary","sup","table","tbody","td","template","textarea","tfoot","th","thead","time","tr","track","tt","u","ul","var","video","wbr"]),C=i(["svg","a","altglyph","altglyphdef","altglyphitem","animatecolor","animatemotion","animatetransform","circle","clippath","defs","desc","ellipse","filter","font","g","glyph","glyphref","hkern","image","line","lineargradient","marker","mask","metadata","mpath","path","pattern","polygon","polyline","radialgradient","rect","stop","style","switch","symbol","text","textpath","title","tref","tspan","view","vkern"]),O=i(["feBlend","feColorMatrix","feComponentTransfer","feComposite","feConvolveMatrix","feDiffuseLighting","feDisplacementMap","feDistantLight","feDropShadow","feFlood","feFuncA","feFuncB","feFuncG","feFuncR","feGaussianBlur","feImage","feMerge","feMergeNode","feMorphology","feOffset","fePointLight","feSpecularLighting","feSpotLight","feTile","feTurbulence"]),x=i(["animate","color-profile","cursor","discard","font-face","font-face-format","font-face-name","font-face-src","font-face-uri","foreignobject","hatch","hatchpath","mesh","meshgradient","meshpatch","meshrow","missing-glyph","script","set","solidcolor","unknown","use"]),v=i(["math","menclose","merror","mfenced","mfrac","mglyph","mi","mlabeledtr","mmultiscripts","mn","mo","mover","mpadded","mphantom","mroot","mrow","ms","mspace","msqrt","mstyle","msub","msup","msubsup","mtable","mtd","mtext","mtr","munder","munderover","mprescripts"]),k=i(["maction","maligngroup","malignmark","mlongdiv","mscarries","mscarry","msgroup","mstack","msline","msrow","semantics","annotation","annotation-xml","mprescripts","none"]),M=i(["#text"]),I=i(["accept","action","align","alt","autocapitalize","autocomplete","autopictureinpicture","autoplay","background","bgcolor","border","capture","cellpadding","cellspacing","checked","cite","class","clear","color","cols","colspan","controls","controlslist","coords","crossorigin","datetime","decoding","default","dir","disabled","disablepictureinpicture","disableremoteplayback","download","draggable","enctype","enterkeyhint","face","for","headers","height","hidden","high","href","hreflang","id","inputmode","integrity","ismap","kind","label","lang","list","loading","loop","low","max","maxlength","media","method","min","minlength","multiple","muted","name","nonce","noshade","novalidate","nowrap","open","optimum","pattern","placeholder","playsinline","poster","preload","pubdate","radiogroup","readonly","rel","required","rev","reversed","role","rows","rowspan","spellcheck","scope","selected","shape","size","sizes","span","srclang","start","src","srcset","step","style","summary","tabindex","title","translate","type","usemap","valign","value","width","wrap","xmlns","slot"]),U=i(["accent-height","accumulate","additive","alignment-baseline","ascent","attributename","attributetype","azimuth","basefrequency","baseline-shift","begin","bias","by","class","clip","clippathunits","clip-path","clip-rule","color","color-interpolation","color-interpolation-filters","color-profile","color-rendering","cx","cy","d","dx","dy","diffuseconstant","direction","display","divisor","dur","edgemode","elevation","end","fill","fill-opacity","fill-rule","filter","filterunits","flood-color","flood-opacity","font-family","font-size","font-size-adjust","font-stretch","font-style","font-variant","font-weight","fx","fy","g1","g2","glyph-name","glyphref","gradientunits","gradienttransform","height","href","id","image-rendering","in","in2","k","k1","k2","k3","k4","kerning","keypoints","keysplines","keytimes","lang","lengthadjust","letter-spacing","kernelmatrix","kernelunitlength","lighting-color","local","marker-end","marker-mid","marker-start","markerheight","markerunits","markerwidth","maskcontentunits","maskunits","max","mask","media","method","mode","min","name","numoctaves","offset","operator","opacity","order","orient","orientation","origin","overflow","paint-order","path","pathlength","patterncontentunits","patterntransform","patternunits","points","preservealpha","preserveaspectratio","primitiveunits","r","rx","ry","radius","refx","refy","repeatcount","repeatdur","restart","result","rotate","scale","seed","shape-rendering","specularconstant","specularexponent","spreadmethod","startoffset","stddeviation","stitchtiles","stop-color","stop-opacity","stroke-dasharray","stroke-dashoffset","stroke-linecap","stroke-linejoin","stroke-miterlimit","stroke-opacity","stroke","stroke-width","style","surfacescale","systemlanguage","tabindex","targetx","targety","transform","transform-origin","text-anchor","text-decoration","text-rendering","textlength","type","u1","u2","unicode","values","viewbox","visibility","version","vert-adv-y","vert-origin-x","vert-origin-y","width","word-spacing","wrap","writing-mode","xchannelselector","ychannelselector","x","x1","x2","xmlns","y","y1","y2","z","zoomandpan"]),P=i(["accent","accentunder","align","bevelled","close","columnsalign","columnlines","columnspan","denomalign","depth","dir","display","displaystyle","encoding","fence","frame","height","href","id","largeop","length","linethickness","lspace","lquote","mathbackground","mathcolor","mathsize","mathvariant","maxsize","minsize","movablelimits","notation","numalign","open","rowalign","rowlines","rowspacing","rowspan","rspace","rquote","scriptlevel","scriptminsize","scriptsizemultiplier","selection","separator","separators","stretchy","subscriptshift","supscriptshift","symmetric","voffset","width","xmlns"]),F=i(["xlink:href","xml:id","xlink:title","xml:space","xmlns:xlink"]),H=a(/\{\{[\w\W]*|[\w\W]*\}\}/gm),z=a(/<%[\w\W]*|[\w\W]*%>/gm),B=a(/\${[\w\W]*}/gm),W=a(/^data-[\-\w.\u00B7-\uFFFF]/),G=a(/^aria-[\-\w]+$/),Y=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),j=a(/^(?:\w+script|data):/i),X=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q=a(/^html$/i),$=a(/^[a-z][.\w]*(-[.\w]+)+$/i);var K=Object.freeze({__proto__:null,MUSTACHE_EXPR:H,ERB_EXPR:z,TMPLIT_EXPR:B,DATA_ATTR:W,ARIA_ATTR:G,IS_ALLOWED_URI:Y,IS_SCRIPT_OR_DATA:j,ATTR_WHITESPACE:X,DOCTYPE_NAME:q,CUSTOM_ELEMENT:$});const V=function(){return"undefined"==typeof window?null:window},Z=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var J=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:V();const o=e=>t(e);if(o.version="3.0.11",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;let{document:r}=n;const a=r,c=a.currentScript,{DocumentFragment:s,HTMLTemplateElement:N,Node:b,Element:R,NodeFilter:H,NamedNodeMap:z=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:B,DOMParser:W,trustedTypes:G}=n,j=R.prototype,X=L(j,"cloneNode"),$=L(j,"nextSibling"),J=L(j,"childNodes"),Q=L(j,"parentNode");if("function"==typeof N){const e=r.createElement("template");e.content&&e.content.ownerDocument&&(r=e.content.ownerDocument)}let ee,te="";const{implementation:ne,createNodeIterator:oe,createDocumentFragment:re,getElementsByTagName:ie}=r,{importNode:ae}=a;let le={};o.isSupported="function"==typeof e&&"function"==typeof Q&&ne&&void 0!==ne.createHTMLDocument;const{MUSTACHE_EXPR:ce,ERB_EXPR:se,TMPLIT_EXPR:ue,DATA_ATTR:me,ARIA_ATTR:pe,IS_SCRIPT_OR_DATA:fe,ATTR_WHITESPACE:de,CUSTOM_ELEMENT:he}=K;let{IS_ALLOWED_URI:ge}=K,Te=null;const ye=S({},[...D,...C,...O,...v,...M]);let Ee=null;const Ae=S({},[...I,...U,...P,...F]);let _e=Object.seal(l(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ne=null,be=null,Se=!0,Re=!0,we=!1,Le=!0,De=!1,Ce=!0,Oe=!1,xe=!1,ve=!1,ke=!1,Me=!1,Ie=!1,Ue=!0,Pe=!1;const Fe="user-content-";let He=!0,ze=!1,Be={},We=null;const Ge=S({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let Ye=null;const je=S({},["audio","video","img","source","image","track"]);let Xe=null;const qe=S({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),$e="http://www.w3.org/1998/Math/MathML",Ke="http://www.w3.org/2000/svg",Ve="http://www.w3.org/1999/xhtml";let Ze=Ve,Je=!1,Qe=null;const et=S({},[$e,Ke,Ve],d);let tt=null;const nt=["application/xhtml+xml","text/html"],ot="text/html";let rt=null,it=null;const at=r.createElement("form"),lt=function(e){return e instanceof RegExp||e instanceof Function},ct=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!it||it!==e){if(e&&"object"==typeof e||(e={}),e=w(e),tt=-1===nt.indexOf(e.PARSER_MEDIA_TYPE)?ot:e.PARSER_MEDIA_TYPE,rt="application/xhtml+xml"===tt?d:f,Te=E(e,"ALLOWED_TAGS")?S({},e.ALLOWED_TAGS,rt):ye,Ee=E(e,"ALLOWED_ATTR")?S({},e.ALLOWED_ATTR,rt):Ae,Qe=E(e,"ALLOWED_NAMESPACES")?S({},e.ALLOWED_NAMESPACES,d):et,Xe=E(e,"ADD_URI_SAFE_ATTR")?S(w(qe),e.ADD_URI_SAFE_ATTR,rt):qe,Ye=E(e,"ADD_DATA_URI_TAGS")?S(w(je),e.ADD_DATA_URI_TAGS,rt):je,We=E(e,"FORBID_CONTENTS")?S({},e.FORBID_CONTENTS,rt):Ge,Ne=E(e,"FORBID_TAGS")?S({},e.FORBID_TAGS,rt):{},be=E(e,"FORBID_ATTR")?S({},e.FORBID_ATTR,rt):{},Be=!!E(e,"USE_PROFILES")&&e.USE_PROFILES,Se=!1!==e.ALLOW_ARIA_ATTR,Re=!1!==e.ALLOW_DATA_ATTR,we=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Le=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,De=e.SAFE_FOR_TEMPLATES||!1,Ce=!1!==e.SAFE_FOR_XML,Oe=e.WHOLE_DOCUMENT||!1,ke=e.RETURN_DOM||!1,Me=e.RETURN_DOM_FRAGMENT||!1,Ie=e.RETURN_TRUSTED_TYPE||!1,ve=e.FORCE_BODY||!1,Ue=!1!==e.SANITIZE_DOM,Pe=e.SANITIZE_NAMED_PROPS||!1,He=!1!==e.KEEP_CONTENT,ze=e.IN_PLACE||!1,ge=e.ALLOWED_URI_REGEXP||Y,Ze=e.NAMESPACE||Ve,_e=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(_e.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(_e.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(_e.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),De&&(Re=!1),Me&&(ke=!0),Be&&(Te=S({},M),Ee=[],!0===Be.html&&(S(Te,D),S(Ee,I)),!0===Be.svg&&(S(Te,C),S(Ee,U),S(Ee,F)),!0===Be.svgFilters&&(S(Te,O),S(Ee,U),S(Ee,F)),!0===Be.mathMl&&(S(Te,v),S(Ee,P),S(Ee,F))),e.ADD_TAGS&&(Te===ye&&(Te=w(Te)),S(Te,e.ADD_TAGS,rt)),e.ADD_ATTR&&(Ee===Ae&&(Ee=w(Ee)),S(Ee,e.ADD_ATTR,rt)),e.ADD_URI_SAFE_ATTR&&S(Xe,e.ADD_URI_SAFE_ATTR,rt),e.FORBID_CONTENTS&&(We===Ge&&(We=w(We)),S(We,e.FORBID_CONTENTS,rt)),He&&(Te["#text"]=!0),Oe&&S(Te,["html","head","body"]),Te.table&&(S(Te,["tbody"]),delete Ne.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');ee=e.TRUSTED_TYPES_POLICY,te=ee.createHTML("")}else void 0===ee&&(ee=Z(G,c)),null!==ee&&"string"==typeof te&&(te=ee.createHTML(""));i&&i(e),it=e}},st=S({},["mi","mo","mn","ms","mtext"]),ut=S({},["foreignobject","desc","title","annotation-xml"]),mt=S({},["title","style","font","a","script"]),pt=S({},[...C,...O,...x]),ft=S({},[...v,...k]),dt=function(e){let t=Q(e);t&&t.tagName||(t={namespaceURI:Ze,tagName:"template"});const n=f(e.tagName),o=f(t.tagName);return!!Qe[e.namespaceURI]&&(e.namespaceURI===Ke?t.namespaceURI===Ve?"svg"===n:t.namespaceURI===$e?"svg"===n&&("annotation-xml"===o||st[o]):Boolean(pt[n]):e.namespaceURI===$e?t.namespaceURI===Ve?"math"===n:t.namespaceURI===Ke?"math"===n&&ut[o]:Boolean(ft[n]):e.namespaceURI===Ve?!(t.namespaceURI===Ke&&!ut[o])&&(!(t.namespaceURI===$e&&!st[o])&&(!ft[n]&&(mt[n]||!pt[n]))):!("application/xhtml+xml"!==tt||!Qe[e.namespaceURI]))},ht=function(e){p(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},gt=function(e,t){try{p(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){p(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!Ee[e])if(ke||Me)try{ht(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},Tt=function(e){let t=null,n=null;if(ve)e="<remove></remove>"+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===tt&&Ze===Ve&&(e='<html xmlns="http://www.w3.org/1999/xhtml"><head></head><body>'+e+"</body></html>");const o=ee?ee.createHTML(e):e;if(Ze===Ve)try{t=(new W).parseFromString(o,tt)}catch(e){}if(!t||!t.documentElement){t=ne.createDocument(Ze,"template",null);try{t.documentElement.innerHTML=Je?te:o}catch(e){}}const i=t.body||t.documentElement;return e&&n&&i.insertBefore(r.createTextNode(n),i.childNodes[0]||null),Ze===Ve?ie.call(t,Oe?"html":"body")[0]:Oe?t.documentElement:i},yt=function(e){return oe.call(e.ownerDocument||e,e,H.SHOW_ELEMENT|H.SHOW_COMMENT|H.SHOW_TEXT|H.SHOW_PROCESSING_INSTRUCTION|H.SHOW_CDATA_SECTION,null)},Et=function(e){return e instanceof B&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof z)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},At=function(e){return"function"==typeof b&&e instanceof b},_t=function(e,t,n){le[e]&&u(le[e],(e=>{e.call(o,t,n,it)}))},Nt=function(e){let t=null;if(_t("beforeSanitizeElements",e,null),Et(e))return ht(e),!0;const n=rt(e.nodeName);if(_t("uponSanitizeElement",e,{tagName:n,allowedTags:Te}),e.hasChildNodes()&&!At(e.firstElementChild)&&A(/<[/\w]/g,e.innerHTML)&&A(/<[/\w]/g,e.textContent))return ht(e),!0;if(7===e.nodeType)return ht(e),!0;if(Ce&&8===e.nodeType&&A(/<[/\w]/g,e.data))return ht(e),!0;if(!Te[n]||Ne[n]){if(!Ne[n]&&St(n)){if(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n))return!1;if(_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))return!1}if(He&&!We[n]){const t=Q(e)||e.parentNode,n=J(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(X(n[o],!0),$(e))}}return ht(e),!0}return e instanceof R&&!dt(e)?(ht(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!A(/<\/no(script|embed|frames)/i,e.innerHTML)?(De&&3===e.nodeType&&(t=e.textContent,u([ce,se,ue],(e=>{t=g(t,e," ")})),e.textContent!==t&&(p(o.removed,{element:e.cloneNode()}),e.textContent=t)),_t("afterSanitizeElements",e,null),!1):(ht(e),!0)},bt=function(e,t,n){if(Ue&&("id"===t||"name"===t)&&(n in r||n in at))return!1;if(Re&&!be[t]&&A(me,t));else if(Se&&A(pe,t));else if(!Ee[t]||be[t]){if(!(St(e)&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,e)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(e))&&(_e.attributeNameCheck instanceof RegExp&&A(_e.attributeNameCheck,t)||_e.attributeNameCheck instanceof Function&&_e.attributeNameCheck(t))||"is"===t&&_e.allowCustomizedBuiltInElements&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))))return!1}else if(Xe[t]);else if(A(ge,g(n,de,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!Ye[e]){if(we&&!A(fe,g(n,de,"")));else if(n)return!1}else;return!0},St=function(e){return"annotation-xml"!==e&&h(e,he)},Rt=function(e){_t("beforeSanitizeAttributes",e,null);const{attributes:t}=e;if(!t)return;const n={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:Ee};let r=t.length;for(;r--;){const i=t[r],{name:a,namespaceURI:l,value:c}=i,s=rt(a);let p="value"===a?c:y(c);if(n.attrName=s,n.attrValue=p,n.keepAttr=!0,n.forceKeepAttr=void 0,_t("uponSanitizeAttribute",e,n),p=n.attrValue,n.forceKeepAttr)continue;if(gt(a,e),!n.keepAttr)continue;if(!Le&&A(/\/>/i,p)){gt(a,e);continue}De&&u([ce,se,ue],(e=>{p=g(p,e," ")}));const f=rt(e.nodeName);if(bt(f,s,p)){if(!Pe||"id"!==s&&"name"!==s||(gt(a,e),p=Fe+p),ee&&"object"==typeof G&&"function"==typeof G.getAttributeType)if(l);else switch(G.getAttributeType(f,s)){case"TrustedHTML":p=ee.createHTML(p);break;case"TrustedScriptURL":p=ee.createScriptURL(p)}try{l?e.setAttributeNS(l,a,p):e.setAttribute(a,p),m(o.removed)}catch(e){}}}_t("afterSanitizeAttributes",e,null)},wt=function e(t){let n=null;const o=yt(t);for(_t("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)_t("uponSanitizeShadowNode",n,null),Nt(n)||(n.content instanceof s&&e(n.content),Rt(n));_t("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=null,r=null,i=null,l=null;if(Je=!e,Je&&(e="\x3c!--\x3e"),"string"!=typeof e&&!At(e)){if("function"!=typeof e.toString)throw _("toString is not a function");if("string"!=typeof(e=e.toString()))throw _("dirty is not a string, aborting")}if(!o.isSupported)return e;if(xe||ct(t),o.removed=[],"string"==typeof e&&(ze=!1),ze){if(e.nodeName){const t=rt(e.nodeName);if(!Te[t]||Ne[t])throw _("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof b)n=Tt("\x3c!----\x3e"),r=n.ownerDocument.importNode(e,!0),1===r.nodeType&&"BODY"===r.nodeName||"HTML"===r.nodeName?n=r:n.appendChild(r);else{if(!ke&&!De&&!Oe&&-1===e.indexOf("<"))return ee&&Ie?ee.createHTML(e):e;if(n=Tt(e),!n)return ke?null:Ie?te:""}n&&ve&&ht(n.firstChild);const c=yt(ze?e:n);for(;i=c.nextNode();)Nt(i)||(i.content instanceof s&&wt(i.content),Rt(i));if(ze)return e;if(ke){if(Me)for(l=re.call(n.ownerDocument);n.firstChild;)l.appendChild(n.firstChild);else l=n;return(Ee.shadowroot||Ee.shadowrootmode)&&(l=ae.call(a,l,!0)),l}let m=Oe?n.outerHTML:n.innerHTML;return Oe&&Te["!doctype"]&&n.ownerDocument&&n.ownerDocument.doctype&&n.ownerDocument.doctype.name&&A(q,n.ownerDocument.doctype.name)&&(m="<!DOCTYPE "+n.ownerDocument.doctype.name+">\n"+m),De&&u([ce,se,ue],(e=>{m=g(m,e," ")})),ee&&Ie?ee.createHTML(m):m},o.setConfig=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};ct(e),xe=!0},o.clearConfig=function(){it=null,xe=!1},o.isValidAttribute=function(e,t,n){it||ct({});const o=rt(e),r=rt(t);return bt(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(le[e]=le[e]||[],p(le[e],t))},o.removeHook=function(e){if(le[e])return m(le[e])},o.removeHooks=function(e){le[e]&&(le[e]=[])},o.removeAllHooks=function(){le={}},o}();return J}));
|
3 |
+
//# sourceMappingURL=purify.min.js.map
|
custom_nodes/ComfyUI-KJNodes-main/kjweb_async/svg-path-properties.min.js
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
// http://geoexamples.com/path-properties/ v1.2.0 Copyright 2023 Roger Veciana i Rovira
|
2 |
+
!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).svgPathProperties={})}(this,(function(t){"use strict";function n(t,n){for(var e=0;e<n.length;e++){var i=n[e];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,s(i.key),i)}}function e(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),Object.defineProperty(t,"prototype",{writable:!1}),t}function i(t,n,e){return(n=s(n))in t?Object.defineProperty(t,n,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[n]=e,t}function r(t){return function(t){if(Array.isArray(t))return h(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,n){if(!t)return;if("string"==typeof t)return h(t,n);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return h(t,n)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function h(t,n){(null==n||n>t.length)&&(n=t.length);for(var e=0,i=new Array(n);e<n;e++)i[e]=t[e];return i}function s(t){var n=function(t,n){if("object"!=typeof t||null===t)return t;var e=t[Symbol.toPrimitive];if(void 0!==e){var i=e.call(t,n||"default");if("object"!=typeof i)return i;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===n?String:Number)(t)}(t,"string");return"symbol"==typeof n?n:String(n)}var a={a:7,c:6,h:1,l:2,m:2,q:4,s:4,t:2,v:1,z:0},o=/([astvzqmhlc])([^astvzqmhlc]*)/gi,g=/-?[0-9]*\.?[0-9]+(?:e[-+]?\d+)?/gi,u=function(t){var n=t.match(g);return n?n.map(Number):[]},l=e((function(t,n,e,r){var h=this;i(this,"x0",void 0),i(this,"x1",void 0),i(this,"y0",void 0),i(this,"y1",void 0),i(this,"getTotalLength",(function(){return Math.sqrt(Math.pow(h.x0-h.x1,2)+Math.pow(h.y0-h.y1,2))})),i(this,"getPointAtLength",(function(t){var n=t/Math.sqrt(Math.pow(h.x0-h.x1,2)+Math.pow(h.y0-h.y1,2));n=Number.isNaN(n)?1:n;var e=(h.x1-h.x0)*n,i=(h.y1-h.y0)*n;return{x:h.x0+e,y:h.y0+i}})),i(this,"getTangentAtLength",(function(t){var n=Math.sqrt((h.x1-h.x0)*(h.x1-h.x0)+(h.y1-h.y0)*(h.y1-h.y0));return{x:(h.x1-h.x0)/n,y:(h.y1-h.y0)/n}})),i(this,"getPropertiesAtLength",(function(t){var n=h.getPointAtLength(t),e=h.getTangentAtLength(t);return{x:n.x,y:n.y,tangentX:e.x,tangentY:e.y}})),this.x0=t,this.x1=n,this.y0=e,this.y1=r})),c=e((function(t,n,e,r,h,s,a,o,g){var u=this;i(this,"x0",void 0),i(this,"y0",void 0),i(this,"rx",void 0),i(this,"ry",void 0),i(this,"xAxisRotate",void 0),i(this,"LargeArcFlag",void 0),i(this,"SweepFlag",void 0),i(this,"x1",void 0),i(this,"y1",void 0),i(this,"length",void 0),i(this,"getTotalLength",(function(){return u.length})),i(this,"getPointAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n=f({x:u.x0,y:u.y0},u.rx,u.ry,u.xAxisRotate,u.LargeArcFlag,u.SweepFlag,{x:u.x1,y:u.y1},t/u.length);return{x:n.x,y:n.y}})),i(this,"getTangentAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n,e=.05,i=u.getPointAtLength(t);t<0?t=0:t>u.length&&(t=u.length);var r=(n=t<u.length-e?u.getPointAtLength(t+e):u.getPointAtLength(t-e)).x-i.x,h=n.y-i.y,s=Math.sqrt(r*r+h*h);return t<u.length-e?{x:-r/s,y:-h/s}:{x:r/s,y:h/s}})),i(this,"getPropertiesAtLength",(function(t){var n=u.getTangentAtLength(t),e=u.getPointAtLength(t);return{x:e.x,y:e.y,tangentX:n.x,tangentY:n.y}})),this.x0=t,this.y0=n,this.rx=e,this.ry=r,this.xAxisRotate=h,this.LargeArcFlag=s,this.SweepFlag=a,this.x1=o,this.y1=g;var l=y(300,(function(i){return f({x:t,y:n},e,r,h,s,a,{x:o,y:g},i)}));this.length=l.arcLength})),f=function(t,n,e,i,r,h,s,a){n=Math.abs(n),e=Math.abs(e),i=p(i,360);var o=x(i);if(t.x===s.x&&t.y===s.y)return{x:t.x,y:t.y,ellipticalArcAngle:0};if(0===n||0===e)return{x:0,y:0,ellipticalArcAngle:0};var g=(t.x-s.x)/2,u=(t.y-s.y)/2,l={x:Math.cos(o)*g+Math.sin(o)*u,y:-Math.sin(o)*g+Math.cos(o)*u},c=Math.pow(l.x,2)/Math.pow(n,2)+Math.pow(l.y,2)/Math.pow(e,2);c>1&&(n=Math.sqrt(c)*n,e=Math.sqrt(c)*e);var f=(Math.pow(n,2)*Math.pow(e,2)-Math.pow(n,2)*Math.pow(l.y,2)-Math.pow(e,2)*Math.pow(l.x,2))/(Math.pow(n,2)*Math.pow(l.y,2)+Math.pow(e,2)*Math.pow(l.x,2));f=f<0?0:f;var y=(r!==h?1:-1)*Math.sqrt(f),v=y*(n*l.y/e),M=y*(-e*l.x/n),L={x:Math.cos(o)*v-Math.sin(o)*M+(t.x+s.x)/2,y:Math.sin(o)*v+Math.cos(o)*M+(t.y+s.y)/2},d={x:(l.x-v)/n,y:(l.y-M)/e},A=w({x:1,y:0},d),b=w(d,{x:(-l.x-v)/n,y:(-l.y-M)/e});!h&&b>0?b-=2*Math.PI:h&&b<0&&(b+=2*Math.PI);var P=A+(b%=2*Math.PI)*a,m=n*Math.cos(P),T=e*Math.sin(P);return{x:Math.cos(o)*m-Math.sin(o)*T+L.x,y:Math.sin(o)*m+Math.cos(o)*T+L.y,ellipticalArcStartAngle:A,ellipticalArcEndAngle:A+b,ellipticalArcAngle:P,ellipticalArcCenter:L,resultantRx:n,resultantRy:e}},y=function(t,n){t=t||500;for(var e,i=0,r=[],h=[],s=n(0),a=0;a<t;a++){var o=M(a*(1/t),0,1);e=n(o),i+=v(s,e),h.push([s,e]),r.push({t:o,arcLength:i}),s=e}return e=n(1),h.push([s,e]),i+=v(s,e),r.push({t:1,arcLength:i}),{arcLength:i,arcLengthMap:r,approximationLines:h}},p=function(t,n){return(t%n+n)%n},x=function(t){return t*(Math.PI/180)},v=function(t,n){return Math.sqrt(Math.pow(n.x-t.x,2)+Math.pow(n.y-t.y,2))},M=function(t,n,e){return Math.min(Math.max(t,n),e)},w=function(t,n){var e=t.x*n.x+t.y*n.y,i=Math.sqrt((Math.pow(t.x,2)+Math.pow(t.y,2))*(Math.pow(n.x,2)+Math.pow(n.y,2)));return(t.x*n.y-t.y*n.x<0?-1:1)*Math.acos(e/i)},L=[[],[],[-.5773502691896257,.5773502691896257],[0,-.7745966692414834,.7745966692414834],[-.33998104358485626,.33998104358485626,-.8611363115940526,.8611363115940526],[0,-.5384693101056831,.5384693101056831,-.906179845938664,.906179845938664],[.6612093864662645,-.6612093864662645,-.2386191860831969,.2386191860831969,-.932469514203152,.932469514203152],[0,.4058451513773972,-.4058451513773972,-.7415311855993945,.7415311855993945,-.9491079123427585,.9491079123427585],[-.1834346424956498,.1834346424956498,-.525532409916329,.525532409916329,-.7966664774136267,.7966664774136267,-.9602898564975363,.9602898564975363],[0,-.8360311073266358,.8360311073266358,-.9681602395076261,.9681602395076261,-.3242534234038089,.3242534234038089,-.6133714327005904,.6133714327005904],[-.14887433898163122,.14887433898163122,-.4333953941292472,.4333953941292472,-.6794095682990244,.6794095682990244,-.8650633666889845,.8650633666889845,-.9739065285171717,.9739065285171717],[0,-.26954315595234496,.26954315595234496,-.5190961292068118,.5190961292068118,-.7301520055740494,.7301520055740494,-.8870625997680953,.8870625997680953,-.978228658146057,.978228658146057],[-.1252334085114689,.1252334085114689,-.3678314989981802,.3678314989981802,-.5873179542866175,.5873179542866175,-.7699026741943047,.7699026741943047,-.9041172563704749,.9041172563704749,-.9815606342467192,.9815606342467192],[0,-.2304583159551348,.2304583159551348,-.44849275103644687,.44849275103644687,-.6423493394403402,.6423493394403402,-.8015780907333099,.8015780907333099,-.9175983992229779,.9175983992229779,-.9841830547185881,.9841830547185881],[-.10805494870734367,.10805494870734367,-.31911236892788974,.31911236892788974,-.5152486363581541,.5152486363581541,-.6872929048116855,.6872929048116855,-.827201315069765,.827201315069765,-.9284348836635735,.9284348836635735,-.9862838086968123,.9862838086968123],[0,-.20119409399743451,.20119409399743451,-.3941513470775634,.3941513470775634,-.5709721726085388,.5709721726085388,-.7244177313601701,.7244177313601701,-.8482065834104272,.8482065834104272,-.937273392400706,.937273392400706,-.9879925180204854,.9879925180204854],[-.09501250983763744,.09501250983763744,-.2816035507792589,.2816035507792589,-.45801677765722737,.45801677765722737,-.6178762444026438,.6178762444026438,-.755404408355003,.755404408355003,-.8656312023878318,.8656312023878318,-.9445750230732326,.9445750230732326,-.9894009349916499,.9894009349916499],[0,-.17848418149584785,.17848418149584785,-.3512317634538763,.3512317634538763,-.5126905370864769,.5126905370864769,-.6576711592166907,.6576711592166907,-.7815140038968014,.7815140038968014,-.8802391537269859,.8802391537269859,-.9506755217687678,.9506755217687678,-.9905754753144174,.9905754753144174],[-.0847750130417353,.0847750130417353,-.2518862256915055,.2518862256915055,-.41175116146284263,.41175116146284263,-.5597708310739475,.5597708310739475,-.6916870430603532,.6916870430603532,-.8037049589725231,.8037049589725231,-.8926024664975557,.8926024664975557,-.9558239495713977,.9558239495713977,-.9915651684209309,.9915651684209309],[0,-.16035864564022537,.16035864564022537,-.31656409996362983,.31656409996362983,-.46457074137596094,.46457074137596094,-.600545304661681,.600545304661681,-.7209661773352294,.7209661773352294,-.8227146565371428,.8227146565371428,-.9031559036148179,.9031559036148179,-.96020815213483,.96020815213483,-.9924068438435844,.9924068438435844],[-.07652652113349734,.07652652113349734,-.22778585114164507,.22778585114164507,-.37370608871541955,.37370608871541955,-.5108670019508271,.5108670019508271,-.636053680726515,.636053680726515,-.7463319064601508,.7463319064601508,-.8391169718222188,.8391169718222188,-.912234428251326,.912234428251326,-.9639719272779138,.9639719272779138,-.9931285991850949,.9931285991850949],[0,-.1455618541608951,.1455618541608951,-.2880213168024011,.2880213168024011,-.4243421202074388,.4243421202074388,-.5516188358872198,.5516188358872198,-.6671388041974123,.6671388041974123,-.7684399634756779,.7684399634756779,-.8533633645833173,.8533633645833173,-.9200993341504008,.9200993341504008,-.9672268385663063,.9672268385663063,-.9937521706203895,.9937521706203895],[-.06973927331972223,.06973927331972223,-.20786042668822127,.20786042668822127,-.34193582089208424,.34193582089208424,-.469355837986757,.469355837986757,-.5876404035069116,.5876404035069116,-.6944872631866827,.6944872631866827,-.7878168059792081,.7878168059792081,-.8658125777203002,.8658125777203002,-.926956772187174,.926956772187174,-.9700604978354287,.9700604978354287,-.9942945854823992,.9942945854823992],[0,-.1332568242984661,.1332568242984661,-.26413568097034495,.26413568097034495,-.3903010380302908,.3903010380302908,-.5095014778460075,.5095014778460075,-.6196098757636461,.6196098757636461,-.7186613631319502,.7186613631319502,-.8048884016188399,.8048884016188399,-.8767523582704416,.8767523582704416,-.9329710868260161,.9329710868260161,-.9725424712181152,.9725424712181152,-.9947693349975522,.9947693349975522],[-.06405689286260563,.06405689286260563,-.1911188674736163,.1911188674736163,-.3150426796961634,.3150426796961634,-.4337935076260451,.4337935076260451,-.5454214713888396,.5454214713888396,-.6480936519369755,.6480936519369755,-.7401241915785544,.7401241915785544,-.820001985973903,.820001985973903,-.8864155270044011,.8864155270044011,-.9382745520027328,.9382745520027328,-.9747285559713095,.9747285559713095,-.9951872199970213,.9951872199970213]],d=[[],[],[1,1],[.8888888888888888,.5555555555555556,.5555555555555556],[.6521451548625461,.6521451548625461,.34785484513745385,.34785484513745385],[.5688888888888889,.47862867049936647,.47862867049936647,.23692688505618908,.23692688505618908],[.3607615730481386,.3607615730481386,.46791393457269104,.46791393457269104,.17132449237917036,.17132449237917036],[.4179591836734694,.3818300505051189,.3818300505051189,.27970539148927664,.27970539148927664,.1294849661688697,.1294849661688697],[.362683783378362,.362683783378362,.31370664587788727,.31370664587788727,.22238103445337448,.22238103445337448,.10122853629037626,.10122853629037626],[.3302393550012598,.1806481606948574,.1806481606948574,.08127438836157441,.08127438836157441,.31234707704000286,.31234707704000286,.26061069640293544,.26061069640293544],[.29552422471475287,.29552422471475287,.26926671930999635,.26926671930999635,.21908636251598204,.21908636251598204,.1494513491505806,.1494513491505806,.06667134430868814,.06667134430868814],[.2729250867779006,.26280454451024665,.26280454451024665,.23319376459199048,.23319376459199048,.18629021092773426,.18629021092773426,.1255803694649046,.1255803694649046,.05566856711617366,.05566856711617366],[.24914704581340277,.24914704581340277,.2334925365383548,.2334925365383548,.20316742672306592,.20316742672306592,.16007832854334622,.16007832854334622,.10693932599531843,.10693932599531843,.04717533638651183,.04717533638651183],[.2325515532308739,.22628318026289723,.22628318026289723,.2078160475368885,.2078160475368885,.17814598076194574,.17814598076194574,.13887351021978725,.13887351021978725,.09212149983772845,.09212149983772845,.04048400476531588,.04048400476531588],[.2152638534631578,.2152638534631578,.2051984637212956,.2051984637212956,.18553839747793782,.18553839747793782,.15720316715819355,.15720316715819355,.12151857068790319,.12151857068790319,.08015808715976021,.08015808715976021,.03511946033175186,.03511946033175186],[.2025782419255613,.19843148532711158,.19843148532711158,.1861610000155622,.1861610000155622,.16626920581699392,.16626920581699392,.13957067792615432,.13957067792615432,.10715922046717194,.10715922046717194,.07036604748810812,.07036604748810812,.03075324199611727,.03075324199611727],[.1894506104550685,.1894506104550685,.18260341504492358,.18260341504492358,.16915651939500254,.16915651939500254,.14959598881657674,.14959598881657674,.12462897125553388,.12462897125553388,.09515851168249279,.09515851168249279,.062253523938647894,.062253523938647894,.027152459411754096,.027152459411754096],[.17944647035620653,.17656270536699264,.17656270536699264,.16800410215645004,.16800410215645004,.15404576107681028,.15404576107681028,.13513636846852548,.13513636846852548,.11188384719340397,.11188384719340397,.08503614831717918,.08503614831717918,.0554595293739872,.0554595293739872,.02414830286854793,.02414830286854793],[.1691423829631436,.1691423829631436,.16427648374583273,.16427648374583273,.15468467512626524,.15468467512626524,.14064291467065065,.14064291467065065,.12255520671147846,.12255520671147846,.10094204410628717,.10094204410628717,.07642573025488905,.07642573025488905,.0497145488949698,.0497145488949698,.02161601352648331,.02161601352648331],[.1610544498487837,.15896884339395434,.15896884339395434,.15276604206585967,.15276604206585967,.1426067021736066,.1426067021736066,.12875396253933621,.12875396253933621,.11156664554733399,.11156664554733399,.09149002162245,.09149002162245,.06904454273764123,.06904454273764123,.0448142267656996,.0448142267656996,.019461788229726478,.019461788229726478],[.15275338713072584,.15275338713072584,.14917298647260374,.14917298647260374,.14209610931838204,.14209610931838204,.13168863844917664,.13168863844917664,.11819453196151841,.11819453196151841,.10193011981724044,.10193011981724044,.08327674157670475,.08327674157670475,.06267204833410907,.06267204833410907,.04060142980038694,.04060142980038694,.017614007139152118,.017614007139152118],[.14608113364969041,.14452440398997005,.14452440398997005,.13988739479107315,.13988739479107315,.13226893863333747,.13226893863333747,.12183141605372853,.12183141605372853,.10879729916714838,.10879729916714838,.09344442345603386,.09344442345603386,.0761001136283793,.0761001136283793,.057134425426857205,.057134425426857205,.036953789770852494,.036953789770852494,.016017228257774335,.016017228257774335],[.13925187285563198,.13925187285563198,.13654149834601517,.13654149834601517,.13117350478706238,.13117350478706238,.12325237681051242,.12325237681051242,.11293229608053922,.11293229608053922,.10041414444288096,.10041414444288096,.08594160621706773,.08594160621706773,.06979646842452049,.06979646842452049,.052293335152683286,.052293335152683286,.03377490158481415,.03377490158481415,.0146279952982722,.0146279952982722],[.13365457218610619,.1324620394046966,.1324620394046966,.12890572218808216,.12890572218808216,.12304908430672953,.12304908430672953,.11499664022241136,.11499664022241136,.10489209146454141,.10489209146454141,.09291576606003515,.09291576606003515,.07928141177671895,.07928141177671895,.06423242140852585,.06423242140852585,.04803767173108467,.04803767173108467,.030988005856979445,.030988005856979445,.013411859487141771,.013411859487141771],[.12793819534675216,.12793819534675216,.1258374563468283,.1258374563468283,.12167047292780339,.12167047292780339,.1155056680537256,.1155056680537256,.10744427011596563,.10744427011596563,.09761865210411388,.09761865210411388,.08619016153195327,.08619016153195327,.0733464814110803,.0733464814110803,.05929858491543678,.05929858491543678,.04427743881741981,.04427743881741981,.028531388628933663,.028531388628933663,.0123412297999872,.0123412297999872]],A=[[1],[1,1],[1,2,1],[1,3,3,1]],b=function(t,n,e){return{x:(1-e)*(1-e)*(1-e)*t[0]+3*(1-e)*(1-e)*e*t[1]+3*(1-e)*e*e*t[2]+e*e*e*t[3],y:(1-e)*(1-e)*(1-e)*n[0]+3*(1-e)*(1-e)*e*n[1]+3*(1-e)*e*e*n[2]+e*e*e*n[3]}},P=function(t,n,e){return T([3*(t[1]-t[0]),3*(t[2]-t[1]),3*(t[3]-t[2])],[3*(n[1]-n[0]),3*(n[2]-n[1]),3*(n[3]-n[2])],e)},m=function(t,n,e){var i,r,h;i=e/2,r=0;for(var s=0;s<20;s++)h=i*L[20][s]+i,r+=d[20][s]*S(t,n,h);return i*r},T=function(t,n,e){return{x:(1-e)*(1-e)*t[0]+2*(1-e)*e*t[1]+e*e*t[2],y:(1-e)*(1-e)*n[0]+2*(1-e)*e*n[1]+e*e*n[2]}},q=function(t,n,e){void 0===e&&(e=1);var i=t[0]-2*t[1]+t[2],r=n[0]-2*n[1]+n[2],h=2*t[1]-2*t[0],s=2*n[1]-2*n[0],a=4*(i*i+r*r),o=4*(i*h+r*s),g=h*h+s*s;if(0===a)return e*Math.sqrt(Math.pow(t[2]-t[0],2)+Math.pow(n[2]-n[0],2));var u=o/(2*a),l=e+u,c=g/a-u*u,f=l*l+c>0?Math.sqrt(l*l+c):0,y=u*u+c>0?Math.sqrt(u*u+c):0,p=u+Math.sqrt(u*u+c)!==0&&(l+f)/(u+y)!=0?c*Math.log(Math.abs((l+f)/(u+y))):0;return Math.sqrt(a)/2*(l*f-u*y+p)},_=function(t,n,e){return{x:2*(1-e)*(t[1]-t[0])+2*e*(t[2]-t[1]),y:2*(1-e)*(n[1]-n[0])+2*e*(n[2]-n[1])}};function S(t,n,e){var i=N(1,e,t),r=N(1,e,n),h=i*i+r*r;return Math.sqrt(h)}var N=function t(n,e,i){var r,h,s=i.length-1;if(0===s)return 0;if(0===n){h=0;for(var a=0;a<=s;a++)h+=A[s][a]*Math.pow(1-e,s-a)*Math.pow(e,a)*i[a];return h}r=new Array(s);for(var o=0;o<s;o++)r[o]=s*(i[o+1]-i[o]);return t(n-1,e,r)},C=function(t,n,e){for(var i=1,r=t/n,h=(t-e(r))/n,s=0;i>.001;){var a=e(r+h),o=Math.abs(t-a)/n;if(o<i)i=o,r+=h;else{var g=e(r-h),u=Math.abs(t-g)/n;u<i?(i=u,r-=h):h/=2}if(++s>500)break}return r},j=e((function(t,n,e,r,h,s,a,o){var g=this;i(this,"a",void 0),i(this,"b",void 0),i(this,"c",void 0),i(this,"d",void 0),i(this,"length",void 0),i(this,"getArcLength",void 0),i(this,"getPoint",void 0),i(this,"getDerivative",void 0),i(this,"getTotalLength",(function(){return g.length})),i(this,"getPointAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)}));return g.getPoint(n,e,i)})),i(this,"getTangentAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)})),r=g.getDerivative(n,e,i),h=Math.sqrt(r.x*r.x+r.y*r.y);return h>0?{x:r.x/h,y:r.y/h}:{x:0,y:0}})),i(this,"getPropertiesAtLength",(function(t){var n,e=[g.a.x,g.b.x,g.c.x,g.d.x],i=[g.a.y,g.b.y,g.c.y,g.d.y],r=C(t,g.length,(function(t){return g.getArcLength(e,i,t)})),h=g.getDerivative(e,i,r),s=Math.sqrt(h.x*h.x+h.y*h.y);n=s>0?{x:h.x/s,y:h.y/s}:{x:0,y:0};var a=g.getPoint(e,i,r);return{x:a.x,y:a.y,tangentX:n.x,tangentY:n.y}})),i(this,"getC",(function(){return g.c})),i(this,"getD",(function(){return g.d})),this.a={x:t,y:n},this.b={x:e,y:r},this.c={x:h,y:s},void 0!==a&&void 0!==o?(this.getArcLength=m,this.getPoint=b,this.getDerivative=P,this.d={x:a,y:o}):(this.getArcLength=q,this.getPoint=T,this.getDerivative=_,this.d={x:0,y:0}),this.length=this.getArcLength([this.a.x,this.b.x,this.c.x,this.d.x],[this.a.y,this.b.y,this.c.y,this.d.y],1)})),O=e((function(t){var n=this;i(this,"length",0),i(this,"partial_lengths",[]),i(this,"functions",[]),i(this,"initial_point",null),i(this,"getPartAtLength",(function(t){t<0?t=0:t>n.length&&(t=n.length);for(var e=n.partial_lengths.length-1;n.partial_lengths[e]>=t&&e>0;)e--;return e++,{fraction:t-n.partial_lengths[e-1],i:e}})),i(this,"getTotalLength",(function(){return n.length})),i(this,"getPointAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPointAtLength(e.fraction);if(n.initial_point)return n.initial_point;throw new Error("Wrong function at this part.")})),i(this,"getTangentAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getTangentAtLength(e.fraction);if(n.initial_point)return{x:0,y:0};throw new Error("Wrong function at this part.")})),i(this,"getPropertiesAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPropertiesAtLength(e.fraction);if(n.initial_point)return{x:n.initial_point.x,y:n.initial_point.y,tangentX:0,tangentY:0};throw new Error("Wrong function at this part.")})),i(this,"getParts",(function(){for(var t=[],e=0;e<n.functions.length;e++)if(null!==n.functions[e]){n.functions[e]=n.functions[e];var i={start:n.functions[e].getPointAtLength(0),end:n.functions[e].getPointAtLength(n.partial_lengths[e]-n.partial_lengths[e-1]),length:n.partial_lengths[e]-n.partial_lengths[e-1],getPointAtLength:n.functions[e].getPointAtLength,getTangentAtLength:n.functions[e].getTangentAtLength,getPropertiesAtLength:n.functions[e].getPropertiesAtLength};t.push(i)}return t}));for(var e,h=Array.isArray(t)?t:function(t){var n=(t&&t.length>0?t:"M0,0").match(o);if(!n)throw new Error("No path elements found in string ".concat(t));return n.reduce((function(t,n){var e=n.charAt(0),i=e.toLowerCase(),h=u(n.substring(1));if("m"===i&&h.length>2&&(t.push([e].concat(r(h.splice(0,2)))),i="l",e="m"===e?"l":"L"),"a"===i.toLowerCase()&&(5===h.length||6===h.length)){var s=n.substring(1).trim().split(" ");h=[Number(s[0]),Number(s[1]),Number(s[2]),Number(s[3].charAt(0)),Number(s[3].charAt(1)),Number(s[3].substring(2)),Number(s[4])]}for(;h.length>=0;){if(h.length===a[i]){t.push([e].concat(r(h.splice(0,a[i]))));break}if(h.length<a[i])throw new Error('Malformed path data: "'.concat(e,'" must have ').concat(a[i]," elements and has ").concat(h.length,": ").concat(n));t.push([e].concat(r(h.splice(0,a[i]))))}return t}),[])}(t),s=[0,0],g=[0,0],f=[0,0],y=0;y<h.length;y++){if("M"===h[y][0])f=[(s=[h[y][1],h[y][2]])[0],s[1]],this.functions.push(null),0===y&&(this.initial_point={x:h[y][1],y:h[y][2]});else if("m"===h[y][0])f=[(s=[h[y][1]+s[0],h[y][2]+s[1]])[0],s[1]],this.functions.push(null);else if("L"===h[y][0])this.length+=Math.sqrt(Math.pow(s[0]-h[y][1],2)+Math.pow(s[1]-h[y][2],2)),this.functions.push(new l(s[0],h[y][1],s[1],h[y][2])),s=[h[y][1],h[y][2]];else if("l"===h[y][0])this.length+=Math.sqrt(Math.pow(h[y][1],2)+Math.pow(h[y][2],2)),this.functions.push(new l(s[0],h[y][1]+s[0],s[1],h[y][2]+s[1])),s=[h[y][1]+s[0],h[y][2]+s[1]];else if("H"===h[y][0])this.length+=Math.abs(s[0]-h[y][1]),this.functions.push(new l(s[0],h[y][1],s[1],s[1])),s[0]=h[y][1];else if("h"===h[y][0])this.length+=Math.abs(h[y][1]),this.functions.push(new l(s[0],s[0]+h[y][1],s[1],s[1])),s[0]=h[y][1]+s[0];else if("V"===h[y][0])this.length+=Math.abs(s[1]-h[y][1]),this.functions.push(new l(s[0],s[0],s[1],h[y][1])),s[1]=h[y][1];else if("v"===h[y][0])this.length+=Math.abs(h[y][1]),this.functions.push(new l(s[0],s[0],s[1],s[1]+h[y][1])),s[1]=h[y][1]+s[1];else if("z"===h[y][0]||"Z"===h[y][0])this.length+=Math.sqrt(Math.pow(f[0]-s[0],2)+Math.pow(f[1]-s[1],2)),this.functions.push(new l(s[0],f[0],s[1],f[1])),s=[f[0],f[1]];else if("C"===h[y][0])e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],h[y][5],h[y][6]),this.length+=e.getTotalLength(),s=[h[y][5],h[y][6]],this.functions.push(e);else if("c"===h[y][0])(e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],s[0]+h[y][5],s[1]+h[y][6])).getTotalLength()>0?(this.length+=e.getTotalLength(),this.functions.push(e),s=[h[y][5]+s[0],h[y][6]+s[1]]):this.functions.push(new l(s[0],s[0],s[1],s[1]));else if("S"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var p=e.getC();e=new j(s[0],s[1],2*s[0]-p.x,2*s[1]-p.y,h[y][1],h[y][2],h[y][3],h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3],h[y][4]],this.functions.push(e))}else if("s"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var x=e.getC(),v=e.getD();e=new j(s[0],s[1],s[0]+v.x-x.x,s[1]+v.y-x.y,s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3]+s[0],h[y][4]+s[1]],this.functions.push(e))}else if("Q"===h[y][0]){if(s[0]==h[y][1]&&s[1]==h[y][2]){var M=new l(h[y][1],h[y][3],h[y][2],h[y][4]);this.length+=M.getTotalLength(),this.functions.push(M)}else e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);s=[h[y][3],h[y][4]],g=[h[y][1],h[y][2]]}else if("q"===h[y][0]){if(0!=h[y][1]||0!=h[y][2])e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var w=new l(s[0]+h[y][1],s[0]+h[y][3],s[1]+h[y][2],s[1]+h[y][4]);this.length+=w.getTotalLength(),this.functions.push(w)}g=[s[0]+h[y][1],s[1]+h[y][2]],s=[h[y][3]+s[0],h[y][4]+s[1]]}else if("T"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],h[y][1],h[y][2],void 0,void 0),this.functions.push(e),this.length+=e.getTotalLength();else{var L=new l(s[0],h[y][1],s[1],h[y][2]);this.functions.push(L),this.length+=L.getTotalLength()}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1],h[y][2]]}else if("t"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],s[0]+h[y][1],s[1]+h[y][2],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var d=new l(s[0],s[0]+h[y][1],s[1],s[1]+h[y][2]);this.length+=d.getTotalLength(),this.functions.push(d)}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1]+s[0],h[y][2]+s[1]]}else if("A"===h[y][0]){var A=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],h[y][6],h[y][7]);this.length+=A.getTotalLength(),s=[h[y][6],h[y][7]],this.functions.push(A)}else if("a"===h[y][0]){var b=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],s[0]+h[y][6],s[1]+h[y][7]);this.length+=b.getTotalLength(),s=[s[0]+h[y][6],s[1]+h[y][7]],this.functions.push(b)}this.partial_lengths.push(this.length)}})),E=e((function(t){var n=this;if(i(this,"inst",void 0),i(this,"getTotalLength",(function(){return n.inst.getTotalLength()})),i(this,"getPointAtLength",(function(t){return n.inst.getPointAtLength(t)})),i(this,"getTangentAtLength",(function(t){return n.inst.getTangentAtLength(t)})),i(this,"getPropertiesAtLength",(function(t){return n.inst.getPropertiesAtLength(t)})),i(this,"getParts",(function(){return n.inst.getParts()})),this.inst=new O(t),!(this instanceof E))return new E(t)}));t.svgPathProperties=E}));
|
custom_nodes/ComfyUI-KJNodes-main/nodes/audioscheduler_nodes.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# to be used with https://github.com/a1lazydog/ComfyUI-AudioScheduler
|
2 |
+
import torch
|
3 |
+
from torchvision.transforms import functional as TF
|
4 |
+
from PIL import Image, ImageDraw
|
5 |
+
import numpy as np
|
6 |
+
from ..utility.utility import pil2tensor
|
7 |
+
from nodes import MAX_RESOLUTION
|
8 |
+
|
9 |
+
class NormalizedAmplitudeToMask:
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(s):
|
12 |
+
return {"required": {
|
13 |
+
"normalized_amp": ("NORMALIZED_AMPLITUDE",),
|
14 |
+
"width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
15 |
+
"height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
16 |
+
"frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}),
|
17 |
+
"location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
18 |
+
"location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
19 |
+
"size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
20 |
+
"shape": (
|
21 |
+
[
|
22 |
+
'none',
|
23 |
+
'circle',
|
24 |
+
'square',
|
25 |
+
'triangle',
|
26 |
+
],
|
27 |
+
{
|
28 |
+
"default": 'none'
|
29 |
+
}),
|
30 |
+
"color": (
|
31 |
+
[
|
32 |
+
'white',
|
33 |
+
'amplitude',
|
34 |
+
],
|
35 |
+
{
|
36 |
+
"default": 'amplitude'
|
37 |
+
}),
|
38 |
+
},}
|
39 |
+
|
40 |
+
CATEGORY = "KJNodes/audio"
|
41 |
+
RETURN_TYPES = ("MASK",)
|
42 |
+
FUNCTION = "convert"
|
43 |
+
DESCRIPTION = """
|
44 |
+
Works as a bridge to the AudioScheduler -nodes:
|
45 |
+
https://github.com/a1lazydog/ComfyUI-AudioScheduler
|
46 |
+
Creates masks based on the normalized amplitude.
|
47 |
+
"""
|
48 |
+
|
49 |
+
def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color):
|
50 |
+
# Ensure normalized_amp is an array and within the range [0, 1]
|
51 |
+
normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
|
52 |
+
|
53 |
+
# Offset the amplitude values by rolling the array
|
54 |
+
normalized_amp = np.roll(normalized_amp, frame_offset)
|
55 |
+
|
56 |
+
# Initialize an empty list to hold the image tensors
|
57 |
+
out = []
|
58 |
+
# Iterate over each amplitude value to create an image
|
59 |
+
for amp in normalized_amp:
|
60 |
+
# Scale the amplitude value to cover the full range of grayscale values
|
61 |
+
if color == 'amplitude':
|
62 |
+
grayscale_value = int(amp * 255)
|
63 |
+
elif color == 'white':
|
64 |
+
grayscale_value = 255
|
65 |
+
# Convert the grayscale value to an RGB format
|
66 |
+
gray_color = (grayscale_value, grayscale_value, grayscale_value)
|
67 |
+
finalsize = size * amp
|
68 |
+
|
69 |
+
if shape == 'none':
|
70 |
+
shapeimage = Image.new("RGB", (width, height), gray_color)
|
71 |
+
else:
|
72 |
+
shapeimage = Image.new("RGB", (width, height), "black")
|
73 |
+
|
74 |
+
draw = ImageDraw.Draw(shapeimage)
|
75 |
+
if shape == 'circle' or shape == 'square':
|
76 |
+
# Define the bounding box for the shape
|
77 |
+
left_up_point = (location_x - finalsize, location_y - finalsize)
|
78 |
+
right_down_point = (location_x + finalsize,location_y + finalsize)
|
79 |
+
two_points = [left_up_point, right_down_point]
|
80 |
+
|
81 |
+
if shape == 'circle':
|
82 |
+
draw.ellipse(two_points, fill=gray_color)
|
83 |
+
elif shape == 'square':
|
84 |
+
draw.rectangle(two_points, fill=gray_color)
|
85 |
+
|
86 |
+
elif shape == 'triangle':
|
87 |
+
# Define the points for the triangle
|
88 |
+
left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left
|
89 |
+
right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right
|
90 |
+
top_point = (location_x, location_y) # top point
|
91 |
+
draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color)
|
92 |
+
|
93 |
+
shapeimage = pil2tensor(shapeimage)
|
94 |
+
mask = shapeimage[:, :, :, 0]
|
95 |
+
out.append(mask)
|
96 |
+
|
97 |
+
return (torch.cat(out, dim=0),)
|
98 |
+
|
99 |
+
class NormalizedAmplitudeToFloatList:
|
100 |
+
@classmethod
|
101 |
+
def INPUT_TYPES(s):
|
102 |
+
return {"required": {
|
103 |
+
"normalized_amp": ("NORMALIZED_AMPLITUDE",),
|
104 |
+
},}
|
105 |
+
|
106 |
+
CATEGORY = "KJNodes/audio"
|
107 |
+
RETURN_TYPES = ("FLOAT",)
|
108 |
+
FUNCTION = "convert"
|
109 |
+
DESCRIPTION = """
|
110 |
+
Works as a bridge to the AudioScheduler -nodes:
|
111 |
+
https://github.com/a1lazydog/ComfyUI-AudioScheduler
|
112 |
+
Creates a list of floats from the normalized amplitude.
|
113 |
+
"""
|
114 |
+
|
115 |
+
def convert(self, normalized_amp):
|
116 |
+
# Ensure normalized_amp is an array and within the range [0, 1]
|
117 |
+
normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
|
118 |
+
return (normalized_amp.tolist(),)
|
119 |
+
|
120 |
+
class OffsetMaskByNormalizedAmplitude:
|
121 |
+
@classmethod
|
122 |
+
def INPUT_TYPES(s):
|
123 |
+
return {
|
124 |
+
"required": {
|
125 |
+
"normalized_amp": ("NORMALIZED_AMPLITUDE",),
|
126 |
+
"mask": ("MASK",),
|
127 |
+
"x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
128 |
+
"y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
129 |
+
"rotate": ("BOOLEAN", { "default": False }),
|
130 |
+
"angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }),
|
131 |
+
}
|
132 |
+
}
|
133 |
+
|
134 |
+
RETURN_TYPES = ("MASK",)
|
135 |
+
RETURN_NAMES = ("mask",)
|
136 |
+
FUNCTION = "offset"
|
137 |
+
CATEGORY = "KJNodes/audio"
|
138 |
+
DESCRIPTION = """
|
139 |
+
Works as a bridge to the AudioScheduler -nodes:
|
140 |
+
https://github.com/a1lazydog/ComfyUI-AudioScheduler
|
141 |
+
Offsets masks based on the normalized amplitude.
|
142 |
+
"""
|
143 |
+
|
144 |
+
def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp):
|
145 |
+
|
146 |
+
# Ensure normalized_amp is an array and within the range [0, 1]
|
147 |
+
offsetmask = mask.clone()
|
148 |
+
normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
|
149 |
+
|
150 |
+
batch_size, height, width = mask.shape
|
151 |
+
|
152 |
+
if rotate:
|
153 |
+
for i in range(batch_size):
|
154 |
+
rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier))
|
155 |
+
rotation_angle = rotation_amp
|
156 |
+
offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0)
|
157 |
+
if x != 0 or y != 0:
|
158 |
+
for i in range(batch_size):
|
159 |
+
offset_amp = normalized_amp[i] * 10
|
160 |
+
shift_x = min(x*offset_amp, width-1)
|
161 |
+
shift_y = min(y*offset_amp, height-1)
|
162 |
+
if shift_x != 0:
|
163 |
+
offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1)
|
164 |
+
if shift_y != 0:
|
165 |
+
offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0)
|
166 |
+
|
167 |
+
return offsetmask,
|
168 |
+
|
169 |
+
class ImageTransformByNormalizedAmplitude:
|
170 |
+
@classmethod
|
171 |
+
def INPUT_TYPES(s):
|
172 |
+
return {"required": {
|
173 |
+
"normalized_amp": ("NORMALIZED_AMPLITUDE",),
|
174 |
+
"zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }),
|
175 |
+
"x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
176 |
+
"y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
177 |
+
"cumulative": ("BOOLEAN", { "default": False }),
|
178 |
+
"image": ("IMAGE",),
|
179 |
+
}}
|
180 |
+
|
181 |
+
RETURN_TYPES = ("IMAGE",)
|
182 |
+
FUNCTION = "amptransform"
|
183 |
+
CATEGORY = "KJNodes/audio"
|
184 |
+
DESCRIPTION = """
|
185 |
+
Works as a bridge to the AudioScheduler -nodes:
|
186 |
+
https://github.com/a1lazydog/ComfyUI-AudioScheduler
|
187 |
+
Transforms image based on the normalized amplitude.
|
188 |
+
"""
|
189 |
+
|
190 |
+
def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset):
|
191 |
+
# Ensure normalized_amp is an array and within the range [0, 1]
|
192 |
+
normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
|
193 |
+
transformed_images = []
|
194 |
+
|
195 |
+
# Initialize the cumulative zoom factor
|
196 |
+
prev_amp = 0.0
|
197 |
+
|
198 |
+
for i in range(image.shape[0]):
|
199 |
+
img = image[i] # Get the i-th image in the batch
|
200 |
+
amp = normalized_amp[i] # Get the corresponding amplitude value
|
201 |
+
|
202 |
+
# Incrementally increase the cumulative zoom factor
|
203 |
+
if cumulative:
|
204 |
+
prev_amp += amp
|
205 |
+
amp += prev_amp
|
206 |
+
|
207 |
+
# Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision
|
208 |
+
img = img.permute(2, 0, 1)
|
209 |
+
|
210 |
+
# Convert PyTorch tensor to PIL Image for processing
|
211 |
+
pil_img = TF.to_pil_image(img)
|
212 |
+
|
213 |
+
# Calculate the crop size based on the amplitude
|
214 |
+
width, height = pil_img.size
|
215 |
+
crop_size = int(min(width, height) * (1 - amp * zoom_scale))
|
216 |
+
crop_size = max(crop_size, 1)
|
217 |
+
|
218 |
+
# Calculate the crop box coordinates (centered crop)
|
219 |
+
left = (width - crop_size) // 2
|
220 |
+
top = (height - crop_size) // 2
|
221 |
+
right = (width + crop_size) // 2
|
222 |
+
bottom = (height + crop_size) // 2
|
223 |
+
|
224 |
+
# Crop and resize back to original size
|
225 |
+
cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size)
|
226 |
+
resized_img = TF.resize(cropped_img, (height, width))
|
227 |
+
|
228 |
+
# Convert back to tensor in CxHxW format
|
229 |
+
tensor_img = TF.to_tensor(resized_img)
|
230 |
+
|
231 |
+
# Convert the tensor back to BxHxWxC format
|
232 |
+
tensor_img = tensor_img.permute(1, 2, 0)
|
233 |
+
|
234 |
+
# Offset the image based on the amplitude
|
235 |
+
offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude
|
236 |
+
shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction
|
237 |
+
shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction
|
238 |
+
|
239 |
+
# Apply the offset to the image tensor
|
240 |
+
if shift_x != 0:
|
241 |
+
tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1)
|
242 |
+
if shift_y != 0:
|
243 |
+
tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0)
|
244 |
+
|
245 |
+
# Add to the list
|
246 |
+
transformed_images.append(tensor_img)
|
247 |
+
|
248 |
+
# Stack all transformed images into a batch
|
249 |
+
transformed_batch = torch.stack(transformed_images)
|
250 |
+
|
251 |
+
return (transformed_batch,)
|
custom_nodes/ComfyUI-KJNodes-main/nodes/batchcrop_nodes.py
ADDED
@@ -0,0 +1,757 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utility.utility import tensor2pil, pil2tensor
|
2 |
+
from PIL import Image, ImageDraw, ImageFilter
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from torchvision.transforms import Resize, CenterCrop, InterpolationMode
|
6 |
+
import math
|
7 |
+
|
8 |
+
#based on nodes from mtb https://github.com/melMass/comfy_mtb
|
9 |
+
|
10 |
+
def bbox_to_region(bbox, target_size=None):
|
11 |
+
bbox = bbox_check(bbox, target_size)
|
12 |
+
return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3])
|
13 |
+
|
14 |
+
def bbox_check(bbox, target_size=None):
|
15 |
+
if not target_size:
|
16 |
+
return bbox
|
17 |
+
|
18 |
+
new_bbox = (
|
19 |
+
bbox[0],
|
20 |
+
bbox[1],
|
21 |
+
min(target_size[0] - bbox[0], bbox[2]),
|
22 |
+
min(target_size[1] - bbox[1], bbox[3]),
|
23 |
+
)
|
24 |
+
return new_bbox
|
25 |
+
|
26 |
+
class BatchCropFromMask:
|
27 |
+
|
28 |
+
@classmethod
|
29 |
+
def INPUT_TYPES(cls):
|
30 |
+
return {
|
31 |
+
"required": {
|
32 |
+
"original_images": ("IMAGE",),
|
33 |
+
"masks": ("MASK",),
|
34 |
+
"crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}),
|
35 |
+
"bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
36 |
+
},
|
37 |
+
}
|
38 |
+
|
39 |
+
RETURN_TYPES = (
|
40 |
+
"IMAGE",
|
41 |
+
"IMAGE",
|
42 |
+
"BBOX",
|
43 |
+
"INT",
|
44 |
+
"INT",
|
45 |
+
)
|
46 |
+
RETURN_NAMES = (
|
47 |
+
"original_images",
|
48 |
+
"cropped_images",
|
49 |
+
"bboxes",
|
50 |
+
"width",
|
51 |
+
"height",
|
52 |
+
)
|
53 |
+
FUNCTION = "crop"
|
54 |
+
CATEGORY = "KJNodes/masking"
|
55 |
+
|
56 |
+
def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha):
|
57 |
+
if alpha == 0:
|
58 |
+
return prev_bbox_size
|
59 |
+
return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size)
|
60 |
+
|
61 |
+
def smooth_center(self, prev_center, curr_center, alpha=0.5):
|
62 |
+
if alpha == 0:
|
63 |
+
return prev_center
|
64 |
+
return (
|
65 |
+
round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]),
|
66 |
+
round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])
|
67 |
+
)
|
68 |
+
|
69 |
+
def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha):
|
70 |
+
|
71 |
+
bounding_boxes = []
|
72 |
+
cropped_images = []
|
73 |
+
|
74 |
+
self.max_bbox_width = 0
|
75 |
+
self.max_bbox_height = 0
|
76 |
+
|
77 |
+
# First, calculate the maximum bounding box size across all masks
|
78 |
+
curr_max_bbox_width = 0
|
79 |
+
curr_max_bbox_height = 0
|
80 |
+
for mask in masks:
|
81 |
+
_mask = tensor2pil(mask)[0]
|
82 |
+
non_zero_indices = np.nonzero(np.array(_mask))
|
83 |
+
min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
|
84 |
+
min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
|
85 |
+
width = max_x - min_x
|
86 |
+
height = max_y - min_y
|
87 |
+
curr_max_bbox_width = max(curr_max_bbox_width, width)
|
88 |
+
curr_max_bbox_height = max(curr_max_bbox_height, height)
|
89 |
+
|
90 |
+
# Smooth the changes in the bounding box size
|
91 |
+
self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha)
|
92 |
+
self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha)
|
93 |
+
|
94 |
+
# Apply the crop size multiplier
|
95 |
+
self.max_bbox_width = round(self.max_bbox_width * crop_size_mult)
|
96 |
+
self.max_bbox_height = round(self.max_bbox_height * crop_size_mult)
|
97 |
+
bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height
|
98 |
+
|
99 |
+
# Then, for each mask and corresponding image...
|
100 |
+
for i, (mask, img) in enumerate(zip(masks, original_images)):
|
101 |
+
_mask = tensor2pil(mask)[0]
|
102 |
+
non_zero_indices = np.nonzero(np.array(_mask))
|
103 |
+
min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
|
104 |
+
min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
|
105 |
+
|
106 |
+
# Calculate center of bounding box
|
107 |
+
center_x = np.mean(non_zero_indices[1])
|
108 |
+
center_y = np.mean(non_zero_indices[0])
|
109 |
+
curr_center = (round(center_x), round(center_y))
|
110 |
+
|
111 |
+
# If this is the first frame, initialize prev_center with curr_center
|
112 |
+
if not hasattr(self, 'prev_center'):
|
113 |
+
self.prev_center = curr_center
|
114 |
+
|
115 |
+
# Smooth the changes in the center coordinates from the second frame onwards
|
116 |
+
if i > 0:
|
117 |
+
center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha)
|
118 |
+
else:
|
119 |
+
center = curr_center
|
120 |
+
|
121 |
+
# Update prev_center for the next frame
|
122 |
+
self.prev_center = center
|
123 |
+
|
124 |
+
# Create bounding box using max_bbox_width and max_bbox_height
|
125 |
+
half_box_width = round(self.max_bbox_width / 2)
|
126 |
+
half_box_height = round(self.max_bbox_height / 2)
|
127 |
+
min_x = max(0, center[0] - half_box_width)
|
128 |
+
max_x = min(img.shape[1], center[0] + half_box_width)
|
129 |
+
min_y = max(0, center[1] - half_box_height)
|
130 |
+
max_y = min(img.shape[0], center[1] + half_box_height)
|
131 |
+
|
132 |
+
# Append bounding box coordinates
|
133 |
+
bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y))
|
134 |
+
|
135 |
+
# Crop the image from the bounding box
|
136 |
+
cropped_img = img[min_y:max_y, min_x:max_x, :]
|
137 |
+
|
138 |
+
# Calculate the new dimensions while maintaining the aspect ratio
|
139 |
+
new_height = min(cropped_img.shape[0], self.max_bbox_height)
|
140 |
+
new_width = round(new_height * bbox_aspect_ratio)
|
141 |
+
|
142 |
+
# Resize the image
|
143 |
+
resize_transform = Resize((new_height, new_width))
|
144 |
+
resized_img = resize_transform(cropped_img.permute(2, 0, 1))
|
145 |
+
|
146 |
+
# Perform the center crop to the desired size
|
147 |
+
crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary
|
148 |
+
cropped_resized_img = crop_transform(resized_img)
|
149 |
+
|
150 |
+
cropped_images.append(cropped_resized_img.permute(1, 2, 0))
|
151 |
+
|
152 |
+
cropped_out = torch.stack(cropped_images, dim=0)
|
153 |
+
|
154 |
+
return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, )
|
155 |
+
|
156 |
+
class BatchUncrop:
|
157 |
+
|
158 |
+
@classmethod
|
159 |
+
def INPUT_TYPES(cls):
|
160 |
+
return {
|
161 |
+
"required": {
|
162 |
+
"original_images": ("IMAGE",),
|
163 |
+
"cropped_images": ("IMAGE",),
|
164 |
+
"bboxes": ("BBOX",),
|
165 |
+
"border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ),
|
166 |
+
"crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
167 |
+
"border_top": ("BOOLEAN", {"default": True}),
|
168 |
+
"border_bottom": ("BOOLEAN", {"default": True}),
|
169 |
+
"border_left": ("BOOLEAN", {"default": True}),
|
170 |
+
"border_right": ("BOOLEAN", {"default": True}),
|
171 |
+
}
|
172 |
+
}
|
173 |
+
|
174 |
+
RETURN_TYPES = ("IMAGE",)
|
175 |
+
FUNCTION = "uncrop"
|
176 |
+
|
177 |
+
CATEGORY = "KJNodes/masking"
|
178 |
+
|
179 |
+
def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right):
|
180 |
+
def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right):
|
181 |
+
draw = ImageDraw.Draw(image)
|
182 |
+
width, height = image.size
|
183 |
+
if border_top:
|
184 |
+
draw.rectangle((0, 0, width, border_width), fill=border_color)
|
185 |
+
if border_bottom:
|
186 |
+
draw.rectangle((0, height - border_width, width, height), fill=border_color)
|
187 |
+
if border_left:
|
188 |
+
draw.rectangle((0, 0, border_width, height), fill=border_color)
|
189 |
+
if border_right:
|
190 |
+
draw.rectangle((width - border_width, 0, width, height), fill=border_color)
|
191 |
+
return image
|
192 |
+
|
193 |
+
if len(original_images) != len(cropped_images):
|
194 |
+
raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
|
195 |
+
|
196 |
+
# Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
|
197 |
+
if len(bboxes) > len(original_images):
|
198 |
+
print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
|
199 |
+
bboxes = bboxes[:len(original_images)]
|
200 |
+
elif len(bboxes) < len(original_images):
|
201 |
+
raise ValueError("There should be at least as many bboxes as there are original and cropped images")
|
202 |
+
|
203 |
+
input_images = tensor2pil(original_images)
|
204 |
+
crop_imgs = tensor2pil(cropped_images)
|
205 |
+
|
206 |
+
out_images = []
|
207 |
+
for i in range(len(input_images)):
|
208 |
+
img = input_images[i]
|
209 |
+
crop = crop_imgs[i]
|
210 |
+
bbox = bboxes[i]
|
211 |
+
|
212 |
+
# uncrop the image based on the bounding box
|
213 |
+
bb_x, bb_y, bb_width, bb_height = bbox
|
214 |
+
|
215 |
+
paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
|
216 |
+
|
217 |
+
# scale factors
|
218 |
+
scale_x = crop_rescale
|
219 |
+
scale_y = crop_rescale
|
220 |
+
|
221 |
+
# scaled paste_region
|
222 |
+
paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y))
|
223 |
+
|
224 |
+
# rescale the crop image to fit the paste_region
|
225 |
+
crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1])))
|
226 |
+
crop_img = crop.convert("RGB")
|
227 |
+
|
228 |
+
if border_blending > 1.0:
|
229 |
+
border_blending = 1.0
|
230 |
+
elif border_blending < 0.0:
|
231 |
+
border_blending = 0.0
|
232 |
+
|
233 |
+
blend_ratio = (max(crop_img.size) / 2) * float(border_blending)
|
234 |
+
|
235 |
+
blend = img.convert("RGBA")
|
236 |
+
mask = Image.new("L", img.size, 0)
|
237 |
+
|
238 |
+
mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255)
|
239 |
+
mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right)
|
240 |
+
|
241 |
+
mask.paste(mask_block, paste_region)
|
242 |
+
blend.paste(crop_img, paste_region)
|
243 |
+
|
244 |
+
mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4))
|
245 |
+
mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4))
|
246 |
+
|
247 |
+
blend.putalpha(mask)
|
248 |
+
img = Image.alpha_composite(img.convert("RGBA"), blend)
|
249 |
+
out_images.append(img.convert("RGB"))
|
250 |
+
|
251 |
+
return (pil2tensor(out_images),)
|
252 |
+
|
253 |
+
class BatchCropFromMaskAdvanced:
|
254 |
+
|
255 |
+
@classmethod
|
256 |
+
def INPUT_TYPES(cls):
|
257 |
+
return {
|
258 |
+
"required": {
|
259 |
+
"original_images": ("IMAGE",),
|
260 |
+
"masks": ("MASK",),
|
261 |
+
"crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
262 |
+
"bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
263 |
+
},
|
264 |
+
}
|
265 |
+
|
266 |
+
RETURN_TYPES = (
|
267 |
+
"IMAGE",
|
268 |
+
"IMAGE",
|
269 |
+
"MASK",
|
270 |
+
"IMAGE",
|
271 |
+
"MASK",
|
272 |
+
"BBOX",
|
273 |
+
"BBOX",
|
274 |
+
"INT",
|
275 |
+
"INT",
|
276 |
+
)
|
277 |
+
RETURN_NAMES = (
|
278 |
+
"original_images",
|
279 |
+
"cropped_images",
|
280 |
+
"cropped_masks",
|
281 |
+
"combined_crop_image",
|
282 |
+
"combined_crop_masks",
|
283 |
+
"bboxes",
|
284 |
+
"combined_bounding_box",
|
285 |
+
"bbox_width",
|
286 |
+
"bbox_height",
|
287 |
+
)
|
288 |
+
FUNCTION = "crop"
|
289 |
+
CATEGORY = "KJNodes/masking"
|
290 |
+
|
291 |
+
def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha):
|
292 |
+
return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size)
|
293 |
+
|
294 |
+
def smooth_center(self, prev_center, curr_center, alpha=0.5):
|
295 |
+
return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]),
|
296 |
+
round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]))
|
297 |
+
|
298 |
+
def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha):
|
299 |
+
bounding_boxes = []
|
300 |
+
combined_bounding_box = []
|
301 |
+
cropped_images = []
|
302 |
+
cropped_masks = []
|
303 |
+
cropped_masks_out = []
|
304 |
+
combined_crop_out = []
|
305 |
+
combined_cropped_images = []
|
306 |
+
combined_cropped_masks = []
|
307 |
+
|
308 |
+
def calculate_bbox(mask):
|
309 |
+
non_zero_indices = np.nonzero(np.array(mask))
|
310 |
+
|
311 |
+
# handle empty masks
|
312 |
+
min_x, max_x, min_y, max_y = 0, 0, 0, 0
|
313 |
+
if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0:
|
314 |
+
min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
|
315 |
+
min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
|
316 |
+
|
317 |
+
width = max_x - min_x
|
318 |
+
height = max_y - min_y
|
319 |
+
bbox_size = max(width, height)
|
320 |
+
return min_x, max_x, min_y, max_y, bbox_size
|
321 |
+
|
322 |
+
combined_mask = torch.max(masks, dim=0)[0]
|
323 |
+
_mask = tensor2pil(combined_mask)[0]
|
324 |
+
new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask)
|
325 |
+
center_x = (new_min_x + new_max_x) / 2
|
326 |
+
center_y = (new_min_y + new_max_y) / 2
|
327 |
+
half_box_size = round(combined_bbox_size // 2)
|
328 |
+
new_min_x = max(0, round(center_x - half_box_size))
|
329 |
+
new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size))
|
330 |
+
new_min_y = max(0, round(center_y - half_box_size))
|
331 |
+
new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size))
|
332 |
+
|
333 |
+
combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y))
|
334 |
+
|
335 |
+
self.max_bbox_size = 0
|
336 |
+
|
337 |
+
# First, calculate the maximum bounding box size across all masks
|
338 |
+
curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks)
|
339 |
+
# Smooth the changes in the bounding box size
|
340 |
+
self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha)
|
341 |
+
# Apply the crop size multiplier
|
342 |
+
self.max_bbox_size = round(self.max_bbox_size * crop_size_mult)
|
343 |
+
# Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is
|
344 |
+
self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16
|
345 |
+
|
346 |
+
if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]:
|
347 |
+
# max_bbox_size can only be as big as our input's width or height, and it has to be even
|
348 |
+
self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2
|
349 |
+
|
350 |
+
# Then, for each mask and corresponding image...
|
351 |
+
for i, (mask, img) in enumerate(zip(masks, original_images)):
|
352 |
+
_mask = tensor2pil(mask)[0]
|
353 |
+
non_zero_indices = np.nonzero(np.array(_mask))
|
354 |
+
|
355 |
+
# check for empty masks
|
356 |
+
if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0:
|
357 |
+
min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
|
358 |
+
min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
|
359 |
+
|
360 |
+
# Calculate center of bounding box
|
361 |
+
center_x = np.mean(non_zero_indices[1])
|
362 |
+
center_y = np.mean(non_zero_indices[0])
|
363 |
+
curr_center = (round(center_x), round(center_y))
|
364 |
+
|
365 |
+
# If this is the first frame, initialize prev_center with curr_center
|
366 |
+
if not hasattr(self, 'prev_center'):
|
367 |
+
self.prev_center = curr_center
|
368 |
+
|
369 |
+
# Smooth the changes in the center coordinates from the second frame onwards
|
370 |
+
if i > 0:
|
371 |
+
center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha)
|
372 |
+
else:
|
373 |
+
center = curr_center
|
374 |
+
|
375 |
+
# Update prev_center for the next frame
|
376 |
+
self.prev_center = center
|
377 |
+
|
378 |
+
# Create bounding box using max_bbox_size
|
379 |
+
half_box_size = self.max_bbox_size // 2
|
380 |
+
min_x = max(0, center[0] - half_box_size)
|
381 |
+
max_x = min(img.shape[1], center[0] + half_box_size)
|
382 |
+
min_y = max(0, center[1] - half_box_size)
|
383 |
+
max_y = min(img.shape[0], center[1] + half_box_size)
|
384 |
+
|
385 |
+
# Append bounding box coordinates
|
386 |
+
bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y))
|
387 |
+
|
388 |
+
# Crop the image from the bounding box
|
389 |
+
cropped_img = img[min_y:max_y, min_x:max_x, :]
|
390 |
+
cropped_mask = mask[min_y:max_y, min_x:max_x]
|
391 |
+
|
392 |
+
# Resize the cropped image to a fixed size
|
393 |
+
new_size = max(cropped_img.shape[0], cropped_img.shape[1])
|
394 |
+
resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1]))
|
395 |
+
resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)
|
396 |
+
resized_img = resize_transform(cropped_img.permute(2, 0, 1))
|
397 |
+
# Perform the center crop to the desired size
|
398 |
+
# Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions.
|
399 |
+
crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2])))
|
400 |
+
|
401 |
+
cropped_resized_img = crop_transform(resized_img)
|
402 |
+
cropped_images.append(cropped_resized_img.permute(1, 2, 0))
|
403 |
+
|
404 |
+
cropped_resized_mask = crop_transform(resized_mask)
|
405 |
+
cropped_masks.append(cropped_resized_mask)
|
406 |
+
|
407 |
+
combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :]
|
408 |
+
combined_cropped_images.append(combined_cropped_img)
|
409 |
+
|
410 |
+
combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x]
|
411 |
+
combined_cropped_masks.append(combined_cropped_mask)
|
412 |
+
else:
|
413 |
+
bounding_boxes.append((0, 0, img.shape[1], img.shape[0]))
|
414 |
+
cropped_images.append(img)
|
415 |
+
cropped_masks.append(mask)
|
416 |
+
combined_cropped_images.append(img)
|
417 |
+
combined_cropped_masks.append(mask)
|
418 |
+
|
419 |
+
cropped_out = torch.stack(cropped_images, dim=0)
|
420 |
+
combined_crop_out = torch.stack(combined_cropped_images, dim=0)
|
421 |
+
cropped_masks_out = torch.stack(cropped_masks, dim=0)
|
422 |
+
combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0)
|
423 |
+
|
424 |
+
return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size)
|
425 |
+
|
426 |
+
class FilterZeroMasksAndCorrespondingImages:
|
427 |
+
|
428 |
+
@classmethod
|
429 |
+
def INPUT_TYPES(cls):
|
430 |
+
return {
|
431 |
+
"required": {
|
432 |
+
"masks": ("MASK",),
|
433 |
+
},
|
434 |
+
"optional": {
|
435 |
+
"original_images": ("IMAGE",),
|
436 |
+
},
|
437 |
+
}
|
438 |
+
|
439 |
+
RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",)
|
440 |
+
RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",)
|
441 |
+
FUNCTION = "filter"
|
442 |
+
CATEGORY = "KJNodes/masking"
|
443 |
+
DESCRIPTION = """
|
444 |
+
Filter out all the empty (i.e. all zero) mask in masks
|
445 |
+
Also filter out all the corresponding images in original_images by indexes if provide
|
446 |
+
|
447 |
+
original_images (optional): If provided, need have same length as masks.
|
448 |
+
"""
|
449 |
+
|
450 |
+
def filter(self, masks, original_images=None):
|
451 |
+
non_zero_masks = []
|
452 |
+
non_zero_mask_images = []
|
453 |
+
zero_mask_images = []
|
454 |
+
zero_mask_images_indexes = []
|
455 |
+
|
456 |
+
masks_num = len(masks)
|
457 |
+
also_process_images = False
|
458 |
+
if original_images is not None:
|
459 |
+
imgs_num = len(original_images)
|
460 |
+
if len(original_images) == masks_num:
|
461 |
+
also_process_images = True
|
462 |
+
else:
|
463 |
+
print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})")
|
464 |
+
|
465 |
+
for i in range(masks_num):
|
466 |
+
non_zero_num = np.count_nonzero(np.array(masks[i]))
|
467 |
+
if non_zero_num > 0:
|
468 |
+
non_zero_masks.append(masks[i])
|
469 |
+
if also_process_images:
|
470 |
+
non_zero_mask_images.append(original_images[i])
|
471 |
+
else:
|
472 |
+
zero_mask_images.append(original_images[i])
|
473 |
+
zero_mask_images_indexes.append(i)
|
474 |
+
|
475 |
+
non_zero_masks_out = torch.stack(non_zero_masks, dim=0)
|
476 |
+
non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None
|
477 |
+
|
478 |
+
if also_process_images:
|
479 |
+
non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0)
|
480 |
+
if len(zero_mask_images) > 0:
|
481 |
+
zero_mask_images_out = torch.stack(zero_mask_images, dim=0)
|
482 |
+
zero_mask_images_out_indexes = zero_mask_images_indexes
|
483 |
+
|
484 |
+
return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes)
|
485 |
+
|
486 |
+
class InsertImageBatchByIndexes:
|
487 |
+
|
488 |
+
@classmethod
|
489 |
+
def INPUT_TYPES(cls):
|
490 |
+
return {
|
491 |
+
"required": {
|
492 |
+
"images": ("IMAGE",),
|
493 |
+
"images_to_insert": ("IMAGE",),
|
494 |
+
"insert_indexes": ("INDEXES",),
|
495 |
+
},
|
496 |
+
}
|
497 |
+
|
498 |
+
RETURN_TYPES = ("IMAGE", )
|
499 |
+
RETURN_NAMES = ("images_after_insert", )
|
500 |
+
FUNCTION = "insert"
|
501 |
+
CATEGORY = "KJNodes/image"
|
502 |
+
DESCRIPTION = """
|
503 |
+
This node is designed to be use with node FilterZeroMasksAndCorrespondingImages
|
504 |
+
It inserts the images_to_insert into images according to insert_indexes
|
505 |
+
|
506 |
+
Returns:
|
507 |
+
images_after_insert: updated original images with origonal sequence order
|
508 |
+
"""
|
509 |
+
|
510 |
+
def insert(self, images, images_to_insert, insert_indexes):
|
511 |
+
images_after_insert = images
|
512 |
+
|
513 |
+
if images_to_insert is not None and insert_indexes is not None:
|
514 |
+
images_to_insert_num = len(images_to_insert)
|
515 |
+
insert_indexes_num = len(insert_indexes)
|
516 |
+
if images_to_insert_num == insert_indexes_num:
|
517 |
+
images_after_insert = []
|
518 |
+
|
519 |
+
i_images = 0
|
520 |
+
for i in range(len(images) + images_to_insert_num):
|
521 |
+
if i in insert_indexes:
|
522 |
+
images_after_insert.append(images_to_insert[insert_indexes.index(i)])
|
523 |
+
else:
|
524 |
+
images_after_insert.append(images[i_images])
|
525 |
+
i_images += 1
|
526 |
+
|
527 |
+
images_after_insert = torch.stack(images_after_insert, dim=0)
|
528 |
+
|
529 |
+
else:
|
530 |
+
print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})")
|
531 |
+
|
532 |
+
|
533 |
+
return (images_after_insert, )
|
534 |
+
|
535 |
+
class BatchUncropAdvanced:
|
536 |
+
|
537 |
+
@classmethod
|
538 |
+
def INPUT_TYPES(cls):
|
539 |
+
return {
|
540 |
+
"required": {
|
541 |
+
"original_images": ("IMAGE",),
|
542 |
+
"cropped_images": ("IMAGE",),
|
543 |
+
"cropped_masks": ("MASK",),
|
544 |
+
"combined_crop_mask": ("MASK",),
|
545 |
+
"bboxes": ("BBOX",),
|
546 |
+
"border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ),
|
547 |
+
"crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
548 |
+
"use_combined_mask": ("BOOLEAN", {"default": False}),
|
549 |
+
"use_square_mask": ("BOOLEAN", {"default": True}),
|
550 |
+
},
|
551 |
+
"optional": {
|
552 |
+
"combined_bounding_box": ("BBOX", {"default": None}),
|
553 |
+
},
|
554 |
+
}
|
555 |
+
|
556 |
+
RETURN_TYPES = ("IMAGE",)
|
557 |
+
FUNCTION = "uncrop"
|
558 |
+
CATEGORY = "KJNodes/masking"
|
559 |
+
|
560 |
+
|
561 |
+
def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None):
|
562 |
+
|
563 |
+
def inset_border(image, border_width=20, border_color=(0)):
|
564 |
+
width, height = image.size
|
565 |
+
bordered_image = Image.new(image.mode, (width, height), border_color)
|
566 |
+
bordered_image.paste(image, (0, 0))
|
567 |
+
draw = ImageDraw.Draw(bordered_image)
|
568 |
+
draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width)
|
569 |
+
return bordered_image
|
570 |
+
|
571 |
+
if len(original_images) != len(cropped_images):
|
572 |
+
raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
|
573 |
+
|
574 |
+
# Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
|
575 |
+
if len(bboxes) > len(original_images):
|
576 |
+
print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
|
577 |
+
bboxes = bboxes[:len(original_images)]
|
578 |
+
elif len(bboxes) < len(original_images):
|
579 |
+
raise ValueError("There should be at least as many bboxes as there are original and cropped images")
|
580 |
+
|
581 |
+
crop_imgs = tensor2pil(cropped_images)
|
582 |
+
input_images = tensor2pil(original_images)
|
583 |
+
out_images = []
|
584 |
+
|
585 |
+
for i in range(len(input_images)):
|
586 |
+
img = input_images[i]
|
587 |
+
crop = crop_imgs[i]
|
588 |
+
bbox = bboxes[i]
|
589 |
+
|
590 |
+
if use_combined_mask:
|
591 |
+
bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0]
|
592 |
+
paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
|
593 |
+
mask = combined_crop_mask[i]
|
594 |
+
else:
|
595 |
+
bb_x, bb_y, bb_width, bb_height = bbox
|
596 |
+
paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
|
597 |
+
mask = cropped_masks[i]
|
598 |
+
|
599 |
+
# scale paste_region
|
600 |
+
scale_x = scale_y = crop_rescale
|
601 |
+
paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y))
|
602 |
+
|
603 |
+
# rescale the crop image to fit the paste_region
|
604 |
+
crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1])))
|
605 |
+
crop_img = crop.convert("RGB")
|
606 |
+
|
607 |
+
#border blending
|
608 |
+
if border_blending > 1.0:
|
609 |
+
border_blending = 1.0
|
610 |
+
elif border_blending < 0.0:
|
611 |
+
border_blending = 0.0
|
612 |
+
|
613 |
+
blend_ratio = (max(crop_img.size) / 2) * float(border_blending)
|
614 |
+
blend = img.convert("RGBA")
|
615 |
+
|
616 |
+
if use_square_mask:
|
617 |
+
mask = Image.new("L", img.size, 0)
|
618 |
+
mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255)
|
619 |
+
mask_block = inset_border(mask_block, round(blend_ratio / 2), (0))
|
620 |
+
mask.paste(mask_block, paste_region)
|
621 |
+
else:
|
622 |
+
original_mask = tensor2pil(mask)[0]
|
623 |
+
original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]))
|
624 |
+
mask = Image.new("L", img.size, 0)
|
625 |
+
mask.paste(original_mask, paste_region)
|
626 |
+
|
627 |
+
mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4))
|
628 |
+
mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4))
|
629 |
+
|
630 |
+
blend.paste(crop_img, paste_region)
|
631 |
+
blend.putalpha(mask)
|
632 |
+
|
633 |
+
img = Image.alpha_composite(img.convert("RGBA"), blend)
|
634 |
+
out_images.append(img.convert("RGB"))
|
635 |
+
|
636 |
+
return (pil2tensor(out_images),)
|
637 |
+
|
638 |
+
class SplitBboxes:
|
639 |
+
|
640 |
+
@classmethod
|
641 |
+
def INPUT_TYPES(cls):
|
642 |
+
return {
|
643 |
+
"required": {
|
644 |
+
"bboxes": ("BBOX",),
|
645 |
+
"index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}),
|
646 |
+
},
|
647 |
+
}
|
648 |
+
|
649 |
+
RETURN_TYPES = ("BBOX","BBOX",)
|
650 |
+
RETURN_NAMES = ("bboxes_a","bboxes_b",)
|
651 |
+
FUNCTION = "splitbbox"
|
652 |
+
CATEGORY = "KJNodes/masking"
|
653 |
+
DESCRIPTION = """
|
654 |
+
Splits the specified bbox list at the given index into two lists.
|
655 |
+
"""
|
656 |
+
|
657 |
+
def splitbbox(self, bboxes, index):
|
658 |
+
bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index
|
659 |
+
bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes
|
660 |
+
|
661 |
+
return (bboxes_a, bboxes_b,)
|
662 |
+
|
663 |
+
class BboxToInt:
|
664 |
+
|
665 |
+
@classmethod
|
666 |
+
def INPUT_TYPES(cls):
|
667 |
+
return {
|
668 |
+
"required": {
|
669 |
+
"bboxes": ("BBOX",),
|
670 |
+
"index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}),
|
671 |
+
},
|
672 |
+
}
|
673 |
+
|
674 |
+
RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",)
|
675 |
+
RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",)
|
676 |
+
FUNCTION = "bboxtoint"
|
677 |
+
CATEGORY = "KJNodes/masking"
|
678 |
+
DESCRIPTION = """
|
679 |
+
Returns selected index from bounding box list as integers.
|
680 |
+
"""
|
681 |
+
def bboxtoint(self, bboxes, index):
|
682 |
+
x_min, y_min, width, height = bboxes[index]
|
683 |
+
center_x = int(x_min + width / 2)
|
684 |
+
center_y = int(y_min + height / 2)
|
685 |
+
|
686 |
+
return (x_min, y_min, width, height, center_x, center_y,)
|
687 |
+
|
688 |
+
class BboxVisualize:
|
689 |
+
|
690 |
+
@classmethod
|
691 |
+
def INPUT_TYPES(cls):
|
692 |
+
return {
|
693 |
+
"required": {
|
694 |
+
"images": ("IMAGE",),
|
695 |
+
"bboxes": ("BBOX",),
|
696 |
+
"line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}),
|
697 |
+
},
|
698 |
+
}
|
699 |
+
|
700 |
+
RETURN_TYPES = ("IMAGE",)
|
701 |
+
RETURN_NAMES = ("images",)
|
702 |
+
FUNCTION = "visualizebbox"
|
703 |
+
DESCRIPTION = """
|
704 |
+
Visualizes the specified bbox on the image.
|
705 |
+
"""
|
706 |
+
|
707 |
+
CATEGORY = "KJNodes/masking"
|
708 |
+
|
709 |
+
def visualizebbox(self, bboxes, images, line_width):
|
710 |
+
image_list = []
|
711 |
+
for image, bbox in zip(images, bboxes):
|
712 |
+
x_min, y_min, width, height = bbox
|
713 |
+
|
714 |
+
# Ensure bbox coordinates are integers
|
715 |
+
x_min = int(x_min)
|
716 |
+
y_min = int(y_min)
|
717 |
+
width = int(width)
|
718 |
+
height = int(height)
|
719 |
+
|
720 |
+
# Permute the image dimensions
|
721 |
+
image = image.permute(2, 0, 1)
|
722 |
+
|
723 |
+
# Clone the image to draw bounding boxes
|
724 |
+
img_with_bbox = image.clone()
|
725 |
+
|
726 |
+
# Define the color for the bbox, e.g., red
|
727 |
+
color = torch.tensor([1, 0, 0], dtype=torch.float32)
|
728 |
+
|
729 |
+
# Ensure color tensor matches the image channels
|
730 |
+
if color.shape[0] != img_with_bbox.shape[0]:
|
731 |
+
color = color.unsqueeze(1).expand(-1, line_width)
|
732 |
+
|
733 |
+
# Draw lines for each side of the bbox with the specified line width
|
734 |
+
for lw in range(line_width):
|
735 |
+
# Top horizontal line
|
736 |
+
if y_min + lw < img_with_bbox.shape[1]:
|
737 |
+
img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None]
|
738 |
+
|
739 |
+
# Bottom horizontal line
|
740 |
+
if y_min + height - lw < img_with_bbox.shape[1]:
|
741 |
+
img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None]
|
742 |
+
|
743 |
+
# Left vertical line
|
744 |
+
if x_min + lw < img_with_bbox.shape[2]:
|
745 |
+
img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None]
|
746 |
+
|
747 |
+
# Right vertical line
|
748 |
+
if x_min + width - lw < img_with_bbox.shape[2]:
|
749 |
+
img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None]
|
750 |
+
|
751 |
+
# Permute the image dimensions back
|
752 |
+
img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0)
|
753 |
+
image_list.append(img_with_bbox)
|
754 |
+
|
755 |
+
return (torch.cat(image_list, dim=0),)
|
756 |
+
|
757 |
+
return (torch.cat(image_list, dim=0),)
|
custom_nodes/ComfyUI-KJNodes-main/nodes/curve_nodes.py
ADDED
@@ -0,0 +1,1561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torchvision import transforms
|
3 |
+
import json
|
4 |
+
from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageFilter, ImageChops
|
5 |
+
import numpy as np
|
6 |
+
from ..utility.utility import pil2tensor, tensor2pil
|
7 |
+
import folder_paths
|
8 |
+
import io
|
9 |
+
import base64
|
10 |
+
|
11 |
+
from comfy.utils import common_upscale
|
12 |
+
|
13 |
+
def plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, prompt):
|
14 |
+
import matplotlib
|
15 |
+
matplotlib.use('Agg')
|
16 |
+
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
17 |
+
text_color = '#999999'
|
18 |
+
bg_color = '#353535'
|
19 |
+
matplotlib.pyplot.rcParams['text.color'] = text_color
|
20 |
+
fig, ax = matplotlib.pyplot.subplots(figsize=(width/100, height/100), dpi=100)
|
21 |
+
fig.patch.set_facecolor(bg_color)
|
22 |
+
ax.set_facecolor(bg_color)
|
23 |
+
ax.grid(color=text_color, linestyle='-', linewidth=0.5)
|
24 |
+
ax.set_xlabel('x', color=text_color)
|
25 |
+
ax.set_ylabel('y', color=text_color)
|
26 |
+
for text in ax.get_xticklabels() + ax.get_yticklabels():
|
27 |
+
text.set_color(text_color)
|
28 |
+
ax.set_title('position for: ' + prompt)
|
29 |
+
ax.set_xlabel('X Coordinate')
|
30 |
+
ax.set_ylabel('Y Coordinate')
|
31 |
+
#ax.legend().remove()
|
32 |
+
ax.set_xlim(0, width) # Set the x-axis to match the input latent width
|
33 |
+
ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left
|
34 |
+
# Adjust the margins of the subplot
|
35 |
+
matplotlib.pyplot.subplots_adjust(left=0.08, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2)
|
36 |
+
|
37 |
+
cmap = matplotlib.pyplot.get_cmap('rainbow')
|
38 |
+
image_batch = []
|
39 |
+
canvas = FigureCanvas(fig)
|
40 |
+
width, height = fig.get_size_inches() * fig.get_dpi()
|
41 |
+
# Draw a box at each coordinate
|
42 |
+
for i, ((x, y), size) in enumerate(zip(coordinates, size_multiplier)):
|
43 |
+
color_index = i / (len(coordinates) - 1)
|
44 |
+
color = cmap(color_index)
|
45 |
+
draw_height = bbox_height * size
|
46 |
+
draw_width = bbox_width * size
|
47 |
+
rect = matplotlib.patches.Rectangle((x - draw_width/2, y - draw_height/2), draw_width, draw_height,
|
48 |
+
linewidth=1, edgecolor=color, facecolor='none', alpha=0.5)
|
49 |
+
ax.add_patch(rect)
|
50 |
+
|
51 |
+
# Check if there is a next coordinate to draw an arrow to
|
52 |
+
if i < len(coordinates) - 1:
|
53 |
+
x1, y1 = coordinates[i]
|
54 |
+
x2, y2 = coordinates[i + 1]
|
55 |
+
ax.annotate("", xy=(x2, y2), xytext=(x1, y1),
|
56 |
+
arrowprops=dict(arrowstyle="->",
|
57 |
+
linestyle="-",
|
58 |
+
lw=1,
|
59 |
+
color=color,
|
60 |
+
mutation_scale=20))
|
61 |
+
canvas.draw()
|
62 |
+
image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3).copy()
|
63 |
+
image_tensor = torch.from_numpy(image_np).float() / 255.0
|
64 |
+
image_tensor = image_tensor.unsqueeze(0)
|
65 |
+
image_batch.append(image_tensor)
|
66 |
+
|
67 |
+
matplotlib.pyplot.close(fig)
|
68 |
+
image_batch_tensor = torch.cat(image_batch, dim=0)
|
69 |
+
|
70 |
+
return image_batch_tensor
|
71 |
+
|
72 |
+
class PlotCoordinates:
|
73 |
+
@classmethod
|
74 |
+
def INPUT_TYPES(s):
|
75 |
+
return {"required": {
|
76 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
77 |
+
"text": ("STRING", {"default": 'title', "multiline": False}),
|
78 |
+
"width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
79 |
+
"height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
80 |
+
"bbox_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
|
81 |
+
"bbox_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
|
82 |
+
},
|
83 |
+
"optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})},
|
84 |
+
}
|
85 |
+
RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
|
86 |
+
RETURN_NAMES = ("images", "width", "height", "bbox_width", "bbox_height",)
|
87 |
+
FUNCTION = "append"
|
88 |
+
CATEGORY = "KJNodes/experimental"
|
89 |
+
DESCRIPTION = """
|
90 |
+
Plots coordinates to sequence of images using Matplotlib.
|
91 |
+
|
92 |
+
"""
|
93 |
+
|
94 |
+
def append(self, coordinates, text, width, height, bbox_width, bbox_height, size_multiplier=[1.0]):
|
95 |
+
coordinates = json.loads(coordinates.replace("'", '"'))
|
96 |
+
coordinates = [(coord['x'], coord['y']) for coord in coordinates]
|
97 |
+
batch_size = len(coordinates)
|
98 |
+
if not size_multiplier or len(size_multiplier) != batch_size:
|
99 |
+
size_multiplier = [0] * batch_size
|
100 |
+
else:
|
101 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
102 |
+
|
103 |
+
plot_image_tensor = plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, text)
|
104 |
+
|
105 |
+
return (plot_image_tensor, width, height, bbox_width, bbox_height)
|
106 |
+
|
107 |
+
class SplineEditor:
|
108 |
+
|
109 |
+
@classmethod
|
110 |
+
def INPUT_TYPES(cls):
|
111 |
+
return {
|
112 |
+
"required": {
|
113 |
+
"points_store": ("STRING", {"multiline": False}),
|
114 |
+
"coordinates": ("STRING", {"multiline": False}),
|
115 |
+
"mask_width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
116 |
+
"mask_height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
117 |
+
"points_to_sample": ("INT", {"default": 16, "min": 2, "max": 1000, "step": 1}),
|
118 |
+
"sampling_method": (
|
119 |
+
[
|
120 |
+
'path',
|
121 |
+
'time',
|
122 |
+
'controlpoints'
|
123 |
+
],
|
124 |
+
{
|
125 |
+
"default": 'time'
|
126 |
+
}),
|
127 |
+
"interpolation": (
|
128 |
+
[
|
129 |
+
'cardinal',
|
130 |
+
'monotone',
|
131 |
+
'basis',
|
132 |
+
'linear',
|
133 |
+
'step-before',
|
134 |
+
'step-after',
|
135 |
+
'polar',
|
136 |
+
'polar-reverse',
|
137 |
+
],
|
138 |
+
{
|
139 |
+
"default": 'cardinal'
|
140 |
+
}),
|
141 |
+
"tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
142 |
+
"repeat_output": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}),
|
143 |
+
"float_output_type": (
|
144 |
+
[
|
145 |
+
'list',
|
146 |
+
'pandas series',
|
147 |
+
'tensor',
|
148 |
+
],
|
149 |
+
{
|
150 |
+
"default": 'list'
|
151 |
+
}),
|
152 |
+
},
|
153 |
+
"optional": {
|
154 |
+
"min_value": ("FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}),
|
155 |
+
"max_value": ("FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}),
|
156 |
+
"bg_image": ("IMAGE", ),
|
157 |
+
}
|
158 |
+
}
|
159 |
+
|
160 |
+
RETURN_TYPES = ("MASK", "STRING", "FLOAT", "INT", "STRING",)
|
161 |
+
RETURN_NAMES = ("mask", "coord_str", "float", "count", "normalized_str",)
|
162 |
+
FUNCTION = "splinedata"
|
163 |
+
CATEGORY = "KJNodes/weights"
|
164 |
+
DESCRIPTION = """
|
165 |
+
# WORK IN PROGRESS
|
166 |
+
Do not count on this as part of your workflow yet,
|
167 |
+
probably contains lots of bugs and stability is not
|
168 |
+
guaranteed!!
|
169 |
+
|
170 |
+
## Graphical editor to create values for various
|
171 |
+
## schedules and/or mask batches.
|
172 |
+
|
173 |
+
**Shift + click** to add control point at end.
|
174 |
+
**Ctrl + click** to add control point (subdivide) between two points.
|
175 |
+
**Right click on a point** to delete it.
|
176 |
+
Note that you can't delete from start/end.
|
177 |
+
|
178 |
+
Right click on canvas for context menu:
|
179 |
+
These are purely visual options, doesn't affect the output:
|
180 |
+
- Toggle handles visibility
|
181 |
+
- Display sample points: display the points to be returned.
|
182 |
+
|
183 |
+
**points_to_sample** value sets the number of samples
|
184 |
+
returned from the **drawn spline itself**, this is independent from the
|
185 |
+
actual control points, so the interpolation type matters.
|
186 |
+
sampling_method:
|
187 |
+
- time: samples along the time axis, used for schedules
|
188 |
+
- path: samples along the path itself, useful for coordinates
|
189 |
+
|
190 |
+
output types:
|
191 |
+
- mask batch
|
192 |
+
example compatible nodes: anything that takes masks
|
193 |
+
- list of floats
|
194 |
+
example compatible nodes: IPAdapter weights
|
195 |
+
- pandas series
|
196 |
+
example compatible nodes: anything that takes Fizz'
|
197 |
+
nodes Batch Value Schedule
|
198 |
+
- torch tensor
|
199 |
+
example compatible nodes: unknown
|
200 |
+
"""
|
201 |
+
|
202 |
+
def splinedata(self, mask_width, mask_height, coordinates, float_output_type, interpolation,
|
203 |
+
points_to_sample, sampling_method, points_store, tension, repeat_output,
|
204 |
+
min_value=0.0, max_value=1.0, bg_image=None):
|
205 |
+
|
206 |
+
coordinates = json.loads(coordinates)
|
207 |
+
normalized = []
|
208 |
+
normalized_y_values = []
|
209 |
+
for coord in coordinates:
|
210 |
+
coord['x'] = int(round(coord['x']))
|
211 |
+
coord['y'] = int(round(coord['y']))
|
212 |
+
norm_x = (1.0 - (coord['x'] / mask_height) - 0.0) * (max_value - min_value) + min_value
|
213 |
+
norm_y = (1.0 - (coord['y'] / mask_height) - 0.0) * (max_value - min_value) + min_value
|
214 |
+
normalized_y_values.append(norm_y)
|
215 |
+
normalized.append({'x':norm_x, 'y':norm_y})
|
216 |
+
if float_output_type == 'list':
|
217 |
+
out_floats = normalized_y_values * repeat_output
|
218 |
+
elif float_output_type == 'pandas series':
|
219 |
+
try:
|
220 |
+
import pandas as pd
|
221 |
+
except:
|
222 |
+
raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
|
223 |
+
out_floats = pd.Series(normalized_y_values * repeat_output),
|
224 |
+
elif float_output_type == 'tensor':
|
225 |
+
out_floats = torch.tensor(normalized_y_values * repeat_output, dtype=torch.float32)
|
226 |
+
# Create a color map for grayscale intensities
|
227 |
+
color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32)
|
228 |
+
|
229 |
+
# Create image tensors for each normalized y value
|
230 |
+
mask_tensors = [color_map(y) for y in normalized_y_values]
|
231 |
+
masks_out = torch.stack(mask_tensors)
|
232 |
+
masks_out = masks_out.repeat(repeat_output, 1, 1, 1)
|
233 |
+
masks_out = masks_out.mean(dim=-1)
|
234 |
+
if bg_image is None:
|
235 |
+
return (masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized))
|
236 |
+
else:
|
237 |
+
transform = transforms.ToPILImage()
|
238 |
+
image = transform(bg_image[0].permute(2, 0, 1))
|
239 |
+
buffered = io.BytesIO()
|
240 |
+
image.save(buffered, format="JPEG", quality=75)
|
241 |
+
|
242 |
+
# Step 3: Encode the image bytes to a Base64 string
|
243 |
+
img_bytes = buffered.getvalue()
|
244 |
+
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
245 |
+
return {
|
246 |
+
"ui": {"bg_image": [img_base64]},
|
247 |
+
"result":(masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized))
|
248 |
+
}
|
249 |
+
|
250 |
+
|
251 |
+
class CreateShapeMaskOnPath:
|
252 |
+
|
253 |
+
RETURN_TYPES = ("MASK", "MASK",)
|
254 |
+
RETURN_NAMES = ("mask", "mask_inverted",)
|
255 |
+
FUNCTION = "createshapemask"
|
256 |
+
CATEGORY = "KJNodes/masking/generate"
|
257 |
+
DESCRIPTION = """
|
258 |
+
Creates a mask or batch of masks with the specified shape.
|
259 |
+
Locations are center locations.
|
260 |
+
"""
|
261 |
+
|
262 |
+
@classmethod
|
263 |
+
def INPUT_TYPES(s):
|
264 |
+
return {
|
265 |
+
"required": {
|
266 |
+
"shape": (
|
267 |
+
[ 'circle',
|
268 |
+
'square',
|
269 |
+
'triangle',
|
270 |
+
],
|
271 |
+
{
|
272 |
+
"default": 'circle'
|
273 |
+
}),
|
274 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
275 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
276 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
277 |
+
"shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
278 |
+
"shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
279 |
+
},
|
280 |
+
"optional": {
|
281 |
+
"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
|
282 |
+
}
|
283 |
+
}
|
284 |
+
|
285 |
+
def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape, size_multiplier=[1.0]):
|
286 |
+
# Define the number of images in the batch
|
287 |
+
coordinates = coordinates.replace("'", '"')
|
288 |
+
coordinates = json.loads(coordinates)
|
289 |
+
|
290 |
+
batch_size = len(coordinates)
|
291 |
+
out = []
|
292 |
+
color = "white"
|
293 |
+
if not size_multiplier or len(size_multiplier) != batch_size:
|
294 |
+
size_multiplier = [0] * batch_size
|
295 |
+
else:
|
296 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
297 |
+
for i, coord in enumerate(coordinates):
|
298 |
+
image = Image.new("RGB", (frame_width, frame_height), "black")
|
299 |
+
draw = ImageDraw.Draw(image)
|
300 |
+
|
301 |
+
# Calculate the size for this frame and ensure it's not less than 0
|
302 |
+
current_width = max(0, shape_width + i * size_multiplier[i])
|
303 |
+
current_height = max(0, shape_height + i * size_multiplier[i])
|
304 |
+
|
305 |
+
location_x = coord['x']
|
306 |
+
location_y = coord['y']
|
307 |
+
|
308 |
+
if shape == 'circle' or shape == 'square':
|
309 |
+
# Define the bounding box for the shape
|
310 |
+
left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
|
311 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
|
312 |
+
two_points = [left_up_point, right_down_point]
|
313 |
+
|
314 |
+
if shape == 'circle':
|
315 |
+
draw.ellipse(two_points, fill=color)
|
316 |
+
elif shape == 'square':
|
317 |
+
draw.rectangle(two_points, fill=color)
|
318 |
+
|
319 |
+
elif shape == 'triangle':
|
320 |
+
# Define the points for the triangle
|
321 |
+
left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
|
322 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
|
323 |
+
top_point = (location_x, location_y - current_height // 2) # top point
|
324 |
+
draw.polygon([top_point, left_up_point, right_down_point], fill=color)
|
325 |
+
|
326 |
+
image = pil2tensor(image)
|
327 |
+
mask = image[:, :, :, 0]
|
328 |
+
out.append(mask)
|
329 |
+
outstack = torch.cat(out, dim=0)
|
330 |
+
return (outstack, 1.0 - outstack,)
|
331 |
+
|
332 |
+
class CreateShapeImageOnPath:
|
333 |
+
|
334 |
+
RETURN_TYPES = ("IMAGE", "MASK",)
|
335 |
+
RETURN_NAMES = ("image","mask", )
|
336 |
+
FUNCTION = "createshapemask"
|
337 |
+
CATEGORY = "KJNodes/image"
|
338 |
+
DESCRIPTION = """
|
339 |
+
Creates an image or batch of images with the specified shape.
|
340 |
+
Locations are center locations.
|
341 |
+
"""
|
342 |
+
|
343 |
+
@classmethod
|
344 |
+
def INPUT_TYPES(s):
|
345 |
+
return {
|
346 |
+
"required": {
|
347 |
+
"shape": (
|
348 |
+
[ 'circle',
|
349 |
+
'square',
|
350 |
+
'triangle',
|
351 |
+
],
|
352 |
+
{
|
353 |
+
"default": 'circle'
|
354 |
+
}),
|
355 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
356 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
357 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
358 |
+
"shape_width": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}),
|
359 |
+
"shape_height": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}),
|
360 |
+
"shape_color": ("STRING", {"default": 'white'}),
|
361 |
+
"bg_color": ("STRING", {"default": 'black'}),
|
362 |
+
"blur_radius": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}),
|
363 |
+
"intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}),
|
364 |
+
},
|
365 |
+
"optional": {
|
366 |
+
"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
|
367 |
+
"trailing": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
368 |
+
}
|
369 |
+
}
|
370 |
+
|
371 |
+
def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape_color,
|
372 |
+
bg_color, blur_radius, shape, intensity, size_multiplier=[1.0], accumulate=False, trailing=1.0):
|
373 |
+
# Define the number of images in the batch
|
374 |
+
if len(coordinates) < 10:
|
375 |
+
coords_list = []
|
376 |
+
for coords in coordinates:
|
377 |
+
coords = json.loads(coords.replace("'", '"'))
|
378 |
+
coords_list.append(coords)
|
379 |
+
else:
|
380 |
+
coords = json.loads(coordinates.replace("'", '"'))
|
381 |
+
coords_list = [coords]
|
382 |
+
|
383 |
+
batch_size = len(coords_list[0])
|
384 |
+
images_list = []
|
385 |
+
masks_list = []
|
386 |
+
|
387 |
+
if not size_multiplier or len(size_multiplier) != batch_size:
|
388 |
+
size_multiplier = [0] * batch_size
|
389 |
+
else:
|
390 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
391 |
+
|
392 |
+
previous_output = None
|
393 |
+
|
394 |
+
for i in range(batch_size):
|
395 |
+
image = Image.new("RGB", (frame_width, frame_height), bg_color)
|
396 |
+
draw = ImageDraw.Draw(image)
|
397 |
+
|
398 |
+
# Calculate the size for this frame and ensure it's not less than 0
|
399 |
+
current_width = max(0, shape_width + i * size_multiplier[i])
|
400 |
+
current_height = max(0, shape_height + i * size_multiplier[i])
|
401 |
+
|
402 |
+
for coords in coords_list:
|
403 |
+
location_x = coords[i]['x']
|
404 |
+
location_y = coords[i]['y']
|
405 |
+
|
406 |
+
if shape == 'circle' or shape == 'square':
|
407 |
+
# Define the bounding box for the shape
|
408 |
+
left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
|
409 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
|
410 |
+
two_points = [left_up_point, right_down_point]
|
411 |
+
|
412 |
+
if shape == 'circle':
|
413 |
+
draw.ellipse(two_points, fill=shape_color)
|
414 |
+
elif shape == 'square':
|
415 |
+
draw.rectangle(two_points, fill=shape_color)
|
416 |
+
|
417 |
+
elif shape == 'triangle':
|
418 |
+
# Define the points for the triangle
|
419 |
+
left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
|
420 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
|
421 |
+
top_point = (location_x, location_y - current_height // 2) # top point
|
422 |
+
draw.polygon([top_point, left_up_point, right_down_point], fill=shape_color)
|
423 |
+
|
424 |
+
if blur_radius != 0:
|
425 |
+
image = image.filter(ImageFilter.GaussianBlur(blur_radius))
|
426 |
+
# Blend the current image with the accumulated image
|
427 |
+
|
428 |
+
image = pil2tensor(image)
|
429 |
+
if trailing != 1.0 and previous_output is not None:
|
430 |
+
# Add the decayed previous output to the current frame
|
431 |
+
image += trailing * previous_output
|
432 |
+
image = image / image.max()
|
433 |
+
previous_output = image
|
434 |
+
image = image * intensity
|
435 |
+
mask = image[:, :, :, 0]
|
436 |
+
masks_list.append(mask)
|
437 |
+
images_list.append(image)
|
438 |
+
out_images = torch.cat(images_list, dim=0).cpu().float()
|
439 |
+
out_masks = torch.cat(masks_list, dim=0)
|
440 |
+
return (out_images, out_masks)
|
441 |
+
|
442 |
+
class CreateTextOnPath:
|
443 |
+
|
444 |
+
RETURN_TYPES = ("IMAGE", "MASK", "MASK",)
|
445 |
+
RETURN_NAMES = ("image", "mask", "mask_inverted",)
|
446 |
+
FUNCTION = "createtextmask"
|
447 |
+
CATEGORY = "KJNodes/masking/generate"
|
448 |
+
DESCRIPTION = """
|
449 |
+
Creates a mask or batch of masks with the specified text.
|
450 |
+
Locations are center locations.
|
451 |
+
"""
|
452 |
+
|
453 |
+
@classmethod
|
454 |
+
def INPUT_TYPES(s):
|
455 |
+
return {
|
456 |
+
"required": {
|
457 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
458 |
+
"text": ("STRING", {"default": 'text', "multiline": True}),
|
459 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
460 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
461 |
+
"font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
|
462 |
+
"font_size": ("INT", {"default": 42}),
|
463 |
+
"alignment": (
|
464 |
+
[ 'left',
|
465 |
+
'center',
|
466 |
+
'right'
|
467 |
+
],
|
468 |
+
{"default": 'center'}
|
469 |
+
),
|
470 |
+
"text_color": ("STRING", {"default": 'white'}),
|
471 |
+
},
|
472 |
+
"optional": {
|
473 |
+
"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
|
474 |
+
}
|
475 |
+
}
|
476 |
+
|
477 |
+
def createtextmask(self, coordinates, frame_width, frame_height, font, font_size, text, text_color, alignment, size_multiplier=[1.0]):
|
478 |
+
coordinates = coordinates.replace("'", '"')
|
479 |
+
coordinates = json.loads(coordinates)
|
480 |
+
|
481 |
+
batch_size = len(coordinates)
|
482 |
+
mask_list = []
|
483 |
+
image_list = []
|
484 |
+
color = text_color
|
485 |
+
font_path = folder_paths.get_full_path("kjnodes_fonts", font)
|
486 |
+
|
487 |
+
if len(size_multiplier) != batch_size:
|
488 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
489 |
+
|
490 |
+
for i, coord in enumerate(coordinates):
|
491 |
+
image = Image.new("RGB", (frame_width, frame_height), "black")
|
492 |
+
draw = ImageDraw.Draw(image)
|
493 |
+
lines = text.split('\n') # Split the text into lines
|
494 |
+
# Apply the size multiplier to the font size for this iteration
|
495 |
+
current_font_size = int(font_size * size_multiplier[i])
|
496 |
+
current_font = ImageFont.truetype(font_path, current_font_size)
|
497 |
+
line_heights = [current_font.getbbox(line)[3] for line in lines] # List of line heights
|
498 |
+
total_text_height = sum(line_heights) # Total height of text block
|
499 |
+
|
500 |
+
# Calculate the starting Y position to center the block of text
|
501 |
+
start_y = coord['y'] - total_text_height // 2
|
502 |
+
for j, line in enumerate(lines):
|
503 |
+
text_width, text_height = current_font.getbbox(line)[2], line_heights[j]
|
504 |
+
if alignment == 'left':
|
505 |
+
location_x = coord['x']
|
506 |
+
elif alignment == 'center':
|
507 |
+
location_x = int(coord['x'] - text_width // 2)
|
508 |
+
elif alignment == 'right':
|
509 |
+
location_x = int(coord['x'] - text_width)
|
510 |
+
|
511 |
+
location_y = int(start_y + sum(line_heights[:j]))
|
512 |
+
text_position = (location_x, location_y)
|
513 |
+
# Draw the text
|
514 |
+
try:
|
515 |
+
draw.text(text_position, line, fill=color, font=current_font, features=['-liga'])
|
516 |
+
except:
|
517 |
+
draw.text(text_position, line, fill=color, font=current_font)
|
518 |
+
|
519 |
+
image = pil2tensor(image)
|
520 |
+
non_black_pixels = (image > 0).any(dim=-1)
|
521 |
+
mask = non_black_pixels.to(image.dtype)
|
522 |
+
mask_list.append(mask)
|
523 |
+
image_list.append(image)
|
524 |
+
|
525 |
+
out_images = torch.cat(image_list, dim=0).cpu().float()
|
526 |
+
out_masks = torch.cat(mask_list, dim=0)
|
527 |
+
return (out_images, out_masks, 1.0 - out_masks,)
|
528 |
+
|
529 |
+
class CreateGradientFromCoords:
|
530 |
+
|
531 |
+
RETURN_TYPES = ("IMAGE", )
|
532 |
+
RETURN_NAMES = ("image", )
|
533 |
+
FUNCTION = "generate"
|
534 |
+
CATEGORY = "KJNodes/image"
|
535 |
+
DESCRIPTION = """
|
536 |
+
Creates a gradient image from coordinates.
|
537 |
+
"""
|
538 |
+
|
539 |
+
@classmethod
|
540 |
+
def INPUT_TYPES(s):
|
541 |
+
return {
|
542 |
+
"required": {
|
543 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
544 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
545 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
546 |
+
"start_color": ("STRING", {"default": 'white'}),
|
547 |
+
"end_color": ("STRING", {"default": 'black'}),
|
548 |
+
"multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}),
|
549 |
+
},
|
550 |
+
}
|
551 |
+
|
552 |
+
def generate(self, coordinates, frame_width, frame_height, start_color, end_color, multiplier):
|
553 |
+
# Parse the coordinates
|
554 |
+
coordinates = json.loads(coordinates.replace("'", '"'))
|
555 |
+
|
556 |
+
# Create an image
|
557 |
+
image = Image.new("RGB", (frame_width, frame_height))
|
558 |
+
draw = ImageDraw.Draw(image)
|
559 |
+
|
560 |
+
# Extract start and end points for the gradient
|
561 |
+
start_coord = coordinates[0]
|
562 |
+
end_coord = coordinates[1]
|
563 |
+
|
564 |
+
start_color = ImageColor.getrgb(start_color)
|
565 |
+
end_color = ImageColor.getrgb(end_color)
|
566 |
+
|
567 |
+
# Calculate the gradient direction (vector)
|
568 |
+
gradient_direction = (end_coord['x'] - start_coord['x'], end_coord['y'] - start_coord['y'])
|
569 |
+
gradient_length = (gradient_direction[0] ** 2 + gradient_direction[1] ** 2) ** 0.5
|
570 |
+
|
571 |
+
# Iterate over each pixel in the image
|
572 |
+
for y in range(frame_height):
|
573 |
+
for x in range(frame_width):
|
574 |
+
# Calculate the projection of the point on the gradient line
|
575 |
+
point_vector = (x - start_coord['x'], y - start_coord['y'])
|
576 |
+
projection = (point_vector[0] * gradient_direction[0] + point_vector[1] * gradient_direction[1]) / gradient_length
|
577 |
+
projection = max(min(projection, gradient_length), 0) # Clamp the projection value
|
578 |
+
|
579 |
+
# Calculate the blend factor for the current pixel
|
580 |
+
blend = projection * multiplier / gradient_length
|
581 |
+
|
582 |
+
# Determine the color of the current pixel
|
583 |
+
color = (
|
584 |
+
int(start_color[0] + (end_color[0] - start_color[0]) * blend),
|
585 |
+
int(start_color[1] + (end_color[1] - start_color[1]) * blend),
|
586 |
+
int(start_color[2] + (end_color[2] - start_color[2]) * blend)
|
587 |
+
)
|
588 |
+
|
589 |
+
# Set the pixel color
|
590 |
+
draw.point((x, y), fill=color)
|
591 |
+
|
592 |
+
# Convert the PIL image to a tensor (assuming such a function exists in your context)
|
593 |
+
image_tensor = pil2tensor(image)
|
594 |
+
|
595 |
+
return (image_tensor,)
|
596 |
+
|
597 |
+
class GradientToFloat:
|
598 |
+
|
599 |
+
RETURN_TYPES = ("FLOAT", "FLOAT",)
|
600 |
+
RETURN_NAMES = ("float_x", "float_y", )
|
601 |
+
FUNCTION = "sample"
|
602 |
+
CATEGORY = "KJNodes/image"
|
603 |
+
DESCRIPTION = """
|
604 |
+
Calculates list of floats from image.
|
605 |
+
"""
|
606 |
+
|
607 |
+
@classmethod
|
608 |
+
def INPUT_TYPES(s):
|
609 |
+
return {
|
610 |
+
"required": {
|
611 |
+
"image": ("IMAGE", ),
|
612 |
+
"steps": ("INT", {"default": 10, "min": 2, "max": 10000, "step": 1}),
|
613 |
+
},
|
614 |
+
}
|
615 |
+
|
616 |
+
def sample(self, image, steps):
|
617 |
+
# Assuming image is a tensor with shape [B, H, W, C]
|
618 |
+
B, H, W, C = image.shape
|
619 |
+
|
620 |
+
# Sample along the width axis (W)
|
621 |
+
w_intervals = torch.linspace(0, W - 1, steps=steps, dtype=torch.int64)
|
622 |
+
# Assuming we're sampling from the first batch and the first channel
|
623 |
+
w_sampled = image[0, :, w_intervals, 0]
|
624 |
+
|
625 |
+
# Sample along the height axis (H)
|
626 |
+
h_intervals = torch.linspace(0, H - 1, steps=steps, dtype=torch.int64)
|
627 |
+
# Assuming we're sampling from the first batch and the first channel
|
628 |
+
h_sampled = image[0, h_intervals, :, 0]
|
629 |
+
|
630 |
+
# Taking the mean across the height for width sampling, and across the width for height sampling
|
631 |
+
w_values = w_sampled.mean(dim=0).tolist()
|
632 |
+
h_values = h_sampled.mean(dim=1).tolist()
|
633 |
+
|
634 |
+
return (w_values, h_values)
|
635 |
+
|
636 |
+
class MaskOrImageToWeight:
|
637 |
+
|
638 |
+
@classmethod
|
639 |
+
def INPUT_TYPES(s):
|
640 |
+
return {
|
641 |
+
"required": {
|
642 |
+
"output_type": (
|
643 |
+
[
|
644 |
+
'list',
|
645 |
+
'pandas series',
|
646 |
+
'tensor',
|
647 |
+
'string'
|
648 |
+
],
|
649 |
+
{
|
650 |
+
"default": 'list'
|
651 |
+
}),
|
652 |
+
},
|
653 |
+
"optional": {
|
654 |
+
"images": ("IMAGE",),
|
655 |
+
"masks": ("MASK",),
|
656 |
+
},
|
657 |
+
|
658 |
+
}
|
659 |
+
RETURN_TYPES = ("FLOAT", "STRING",)
|
660 |
+
FUNCTION = "execute"
|
661 |
+
CATEGORY = "KJNodes/weights"
|
662 |
+
DESCRIPTION = """
|
663 |
+
Gets the mean values from mask or image batch
|
664 |
+
and returns that as the selected output type.
|
665 |
+
"""
|
666 |
+
|
667 |
+
def execute(self, output_type, images=None, masks=None):
|
668 |
+
mean_values = []
|
669 |
+
if masks is not None and images is None:
|
670 |
+
for mask in masks:
|
671 |
+
mean_values.append(mask.mean().item())
|
672 |
+
elif masks is None and images is not None:
|
673 |
+
for image in images:
|
674 |
+
mean_values.append(image.mean().item())
|
675 |
+
elif masks is not None and images is not None:
|
676 |
+
raise Exception("MaskOrImageToWeight: Use either mask or image input only.")
|
677 |
+
|
678 |
+
# Convert mean_values to the specified output_type
|
679 |
+
if output_type == 'list':
|
680 |
+
out = mean_values
|
681 |
+
elif output_type == 'pandas series':
|
682 |
+
try:
|
683 |
+
import pandas as pd
|
684 |
+
except:
|
685 |
+
raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
|
686 |
+
out = pd.Series(mean_values),
|
687 |
+
elif output_type == 'tensor':
|
688 |
+
out = torch.tensor(mean_values, dtype=torch.float32),
|
689 |
+
return (out, [str(value) for value in mean_values],)
|
690 |
+
|
691 |
+
class WeightScheduleConvert:
|
692 |
+
|
693 |
+
@classmethod
|
694 |
+
def INPUT_TYPES(s):
|
695 |
+
return {
|
696 |
+
"required": {
|
697 |
+
"input_values": ("FLOAT", {"default": 0.0, "forceInput": True}),
|
698 |
+
"output_type": (
|
699 |
+
[
|
700 |
+
'match_input',
|
701 |
+
'list',
|
702 |
+
'pandas series',
|
703 |
+
'tensor',
|
704 |
+
],
|
705 |
+
{
|
706 |
+
"default": 'list'
|
707 |
+
}),
|
708 |
+
"invert": ("BOOLEAN", {"default": False}),
|
709 |
+
"repeat": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}),
|
710 |
+
},
|
711 |
+
"optional": {
|
712 |
+
"remap_to_frames": ("INT", {"default": 0}),
|
713 |
+
"interpolation_curve": ("FLOAT", {"forceInput": True}),
|
714 |
+
"remap_values": ("BOOLEAN", {"default": False}),
|
715 |
+
"remap_min": ("FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}),
|
716 |
+
"remap_max": ("FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}),
|
717 |
+
},
|
718 |
+
|
719 |
+
}
|
720 |
+
RETURN_TYPES = ("FLOAT", "STRING", "INT",)
|
721 |
+
FUNCTION = "execute"
|
722 |
+
CATEGORY = "KJNodes/weights"
|
723 |
+
DESCRIPTION = """
|
724 |
+
Converts different value lists/series to another type.
|
725 |
+
"""
|
726 |
+
|
727 |
+
def detect_input_type(self, input_values):
|
728 |
+
import pandas as pd
|
729 |
+
if isinstance(input_values, list):
|
730 |
+
return 'list'
|
731 |
+
elif isinstance(input_values, pd.Series):
|
732 |
+
return 'pandas series'
|
733 |
+
elif isinstance(input_values, torch.Tensor):
|
734 |
+
return 'tensor'
|
735 |
+
else:
|
736 |
+
raise ValueError("Unsupported input type")
|
737 |
+
|
738 |
+
def execute(self, input_values, output_type, invert, repeat, remap_to_frames=0, interpolation_curve=None, remap_min=0.0, remap_max=1.0, remap_values=False):
|
739 |
+
import pandas as pd
|
740 |
+
input_type = self.detect_input_type(input_values)
|
741 |
+
|
742 |
+
if input_type == 'pandas series':
|
743 |
+
float_values = input_values.tolist()
|
744 |
+
elif input_type == 'tensor':
|
745 |
+
float_values = input_values
|
746 |
+
else:
|
747 |
+
float_values = input_values
|
748 |
+
|
749 |
+
if invert:
|
750 |
+
float_values = [1 - value for value in float_values]
|
751 |
+
|
752 |
+
if interpolation_curve is not None:
|
753 |
+
interpolated_pattern = []
|
754 |
+
orig_float_values = float_values
|
755 |
+
for value in interpolation_curve:
|
756 |
+
min_val = min(orig_float_values)
|
757 |
+
max_val = max(orig_float_values)
|
758 |
+
# Normalize the values to [0, 1]
|
759 |
+
normalized_values = [(value - min_val) / (max_val - min_val) for value in orig_float_values]
|
760 |
+
# Interpolate the normalized values to the new frame count
|
761 |
+
remapped_float_values = np.interp(np.linspace(0, 1, int(remap_to_frames * value)), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist()
|
762 |
+
interpolated_pattern.extend(remapped_float_values)
|
763 |
+
float_values = interpolated_pattern
|
764 |
+
else:
|
765 |
+
# Remap float_values to match target_frame_amount
|
766 |
+
if remap_to_frames > 0 and remap_to_frames != len(float_values):
|
767 |
+
min_val = min(float_values)
|
768 |
+
max_val = max(float_values)
|
769 |
+
# Normalize the values to [0, 1]
|
770 |
+
normalized_values = [(value - min_val) / (max_val - min_val) for value in float_values]
|
771 |
+
# Interpolate the normalized values to the new frame count
|
772 |
+
float_values = np.interp(np.linspace(0, 1, remap_to_frames), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist()
|
773 |
+
|
774 |
+
float_values = float_values * repeat
|
775 |
+
if remap_values:
|
776 |
+
float_values = self.remap_values(float_values, remap_min, remap_max)
|
777 |
+
|
778 |
+
if output_type == 'list':
|
779 |
+
out = float_values,
|
780 |
+
elif output_type == 'pandas series':
|
781 |
+
out = pd.Series(float_values),
|
782 |
+
elif output_type == 'tensor':
|
783 |
+
if input_type == 'pandas series':
|
784 |
+
out = torch.tensor(float_values.values, dtype=torch.float32),
|
785 |
+
else:
|
786 |
+
out = torch.tensor(float_values, dtype=torch.float32),
|
787 |
+
elif output_type == 'match_input':
|
788 |
+
out = float_values,
|
789 |
+
return (out, [str(value) for value in float_values], [int(value) for value in float_values])
|
790 |
+
|
791 |
+
def remap_values(self, values, target_min, target_max):
|
792 |
+
# Determine the current range
|
793 |
+
current_min = min(values)
|
794 |
+
current_max = max(values)
|
795 |
+
current_range = current_max - current_min
|
796 |
+
|
797 |
+
# Determine the target range
|
798 |
+
target_range = target_max - target_min
|
799 |
+
|
800 |
+
# Perform the linear interpolation for each value
|
801 |
+
remapped_values = [(value - current_min) / current_range * target_range + target_min for value in values]
|
802 |
+
|
803 |
+
return remapped_values
|
804 |
+
|
805 |
+
|
806 |
+
class FloatToMask:
|
807 |
+
|
808 |
+
@classmethod
|
809 |
+
def INPUT_TYPES(s):
|
810 |
+
return {
|
811 |
+
"required": {
|
812 |
+
"input_values": ("FLOAT", {"forceInput": True, "default": 0}),
|
813 |
+
"width": ("INT", {"default": 100, "min": 1}),
|
814 |
+
"height": ("INT", {"default": 100, "min": 1}),
|
815 |
+
},
|
816 |
+
}
|
817 |
+
RETURN_TYPES = ("MASK",)
|
818 |
+
FUNCTION = "execute"
|
819 |
+
CATEGORY = "KJNodes/masking/generate"
|
820 |
+
DESCRIPTION = """
|
821 |
+
Generates a batch of masks based on the input float values.
|
822 |
+
The batch size is determined by the length of the input float values.
|
823 |
+
Each mask is generated with the specified width and height.
|
824 |
+
"""
|
825 |
+
|
826 |
+
def execute(self, input_values, width, height):
|
827 |
+
import pandas as pd
|
828 |
+
# Ensure input_values is a list
|
829 |
+
if isinstance(input_values, (float, int)):
|
830 |
+
input_values = [input_values]
|
831 |
+
elif isinstance(input_values, pd.Series):
|
832 |
+
input_values = input_values.tolist()
|
833 |
+
elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values):
|
834 |
+
input_values = [item for sublist in input_values for item in sublist]
|
835 |
+
|
836 |
+
# Generate a batch of masks based on the input_values
|
837 |
+
masks = []
|
838 |
+
for value in input_values:
|
839 |
+
# Assuming value is a float between 0 and 1 representing the mask's intensity
|
840 |
+
mask = torch.ones((height, width), dtype=torch.float32) * value
|
841 |
+
masks.append(mask)
|
842 |
+
masks_out = torch.stack(masks, dim=0)
|
843 |
+
|
844 |
+
return(masks_out,)
|
845 |
+
class WeightScheduleExtend:
|
846 |
+
|
847 |
+
@classmethod
|
848 |
+
def INPUT_TYPES(s):
|
849 |
+
return {
|
850 |
+
"required": {
|
851 |
+
"input_values_1": ("FLOAT", {"default": 0.0, "forceInput": True}),
|
852 |
+
"input_values_2": ("FLOAT", {"default": 0.0, "forceInput": True}),
|
853 |
+
"output_type": (
|
854 |
+
[
|
855 |
+
'match_input',
|
856 |
+
'list',
|
857 |
+
'pandas series',
|
858 |
+
'tensor',
|
859 |
+
],
|
860 |
+
{
|
861 |
+
"default": 'match_input'
|
862 |
+
}),
|
863 |
+
},
|
864 |
+
|
865 |
+
}
|
866 |
+
RETURN_TYPES = ("FLOAT",)
|
867 |
+
FUNCTION = "execute"
|
868 |
+
CATEGORY = "KJNodes/weights"
|
869 |
+
DESCRIPTION = """
|
870 |
+
Extends, and converts if needed, different value lists/series
|
871 |
+
"""
|
872 |
+
|
873 |
+
def detect_input_type(self, input_values):
|
874 |
+
import pandas as pd
|
875 |
+
if isinstance(input_values, list):
|
876 |
+
return 'list'
|
877 |
+
elif isinstance(input_values, pd.Series):
|
878 |
+
return 'pandas series'
|
879 |
+
elif isinstance(input_values, torch.Tensor):
|
880 |
+
return 'tensor'
|
881 |
+
else:
|
882 |
+
raise ValueError("Unsupported input type")
|
883 |
+
|
884 |
+
def execute(self, input_values_1, input_values_2, output_type):
|
885 |
+
import pandas as pd
|
886 |
+
input_type_1 = self.detect_input_type(input_values_1)
|
887 |
+
input_type_2 = self.detect_input_type(input_values_2)
|
888 |
+
# Convert input_values_2 to the same format as input_values_1 if they do not match
|
889 |
+
if not input_type_1 == input_type_2:
|
890 |
+
print("Converting input_values_2 to the same format as input_values_1")
|
891 |
+
if input_type_1 == 'pandas series':
|
892 |
+
# Convert input_values_2 to a pandas Series
|
893 |
+
float_values_2 = pd.Series(input_values_2)
|
894 |
+
elif input_type_1 == 'tensor':
|
895 |
+
# Convert input_values_2 to a tensor
|
896 |
+
float_values_2 = torch.tensor(input_values_2, dtype=torch.float32)
|
897 |
+
else:
|
898 |
+
print("Input types match, no conversion needed")
|
899 |
+
# If the types match, no conversion is needed
|
900 |
+
float_values_2 = input_values_2
|
901 |
+
|
902 |
+
float_values = input_values_1 + float_values_2
|
903 |
+
|
904 |
+
if output_type == 'list':
|
905 |
+
return float_values,
|
906 |
+
elif output_type == 'pandas series':
|
907 |
+
return pd.Series(float_values),
|
908 |
+
elif output_type == 'tensor':
|
909 |
+
if input_type_1 == 'pandas series':
|
910 |
+
return torch.tensor(float_values.values, dtype=torch.float32),
|
911 |
+
else:
|
912 |
+
return torch.tensor(float_values, dtype=torch.float32),
|
913 |
+
elif output_type == 'match_input':
|
914 |
+
return float_values,
|
915 |
+
else:
|
916 |
+
raise ValueError(f"Unsupported output_type: {output_type}")
|
917 |
+
|
918 |
+
class FloatToSigmas:
|
919 |
+
@classmethod
|
920 |
+
def INPUT_TYPES(s):
|
921 |
+
return {"required":
|
922 |
+
{
|
923 |
+
"float_list": ("FLOAT", {"default": 0.0, "forceInput": True}),
|
924 |
+
}
|
925 |
+
}
|
926 |
+
RETURN_TYPES = ("SIGMAS",)
|
927 |
+
RETURN_NAMES = ("SIGMAS",)
|
928 |
+
CATEGORY = "KJNodes/noise"
|
929 |
+
FUNCTION = "customsigmas"
|
930 |
+
DESCRIPTION = """
|
931 |
+
Creates a sigmas tensor from list of float values.
|
932 |
+
|
933 |
+
"""
|
934 |
+
def customsigmas(self, float_list):
|
935 |
+
return torch.tensor(float_list, dtype=torch.float32),
|
936 |
+
|
937 |
+
class SigmasToFloat:
|
938 |
+
@classmethod
|
939 |
+
def INPUT_TYPES(s):
|
940 |
+
return {"required":
|
941 |
+
{
|
942 |
+
"sigmas": ("SIGMAS",),
|
943 |
+
}
|
944 |
+
}
|
945 |
+
RETURN_TYPES = ("FLOAT",)
|
946 |
+
RETURN_NAMES = ("float",)
|
947 |
+
CATEGORY = "KJNodes/noise"
|
948 |
+
FUNCTION = "customsigmas"
|
949 |
+
DESCRIPTION = """
|
950 |
+
Creates a float list from sigmas tensors.
|
951 |
+
|
952 |
+
"""
|
953 |
+
def customsigmas(self, sigmas):
|
954 |
+
return sigmas.tolist(),
|
955 |
+
|
956 |
+
class GLIGENTextBoxApplyBatchCoords:
|
957 |
+
@classmethod
|
958 |
+
def INPUT_TYPES(s):
|
959 |
+
return {"required": {"conditioning_to": ("CONDITIONING", ),
|
960 |
+
"latents": ("LATENT", ),
|
961 |
+
"clip": ("CLIP", ),
|
962 |
+
"gligen_textbox_model": ("GLIGEN", ),
|
963 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
964 |
+
"text": ("STRING", {"multiline": True}),
|
965 |
+
"width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
|
966 |
+
"height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
|
967 |
+
},
|
968 |
+
"optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})},
|
969 |
+
}
|
970 |
+
RETURN_TYPES = ("CONDITIONING", "IMAGE", )
|
971 |
+
RETURN_NAMES = ("conditioning", "coord_preview", )
|
972 |
+
FUNCTION = "append"
|
973 |
+
CATEGORY = "KJNodes/experimental"
|
974 |
+
DESCRIPTION = """
|
975 |
+
This node allows scheduling GLIGEN text box positions in a batch,
|
976 |
+
to be used with AnimateDiff-Evolved. Intended to pair with the
|
977 |
+
Spline Editor -node.
|
978 |
+
|
979 |
+
GLIGEN model can be downloaded through the Manage's "Install Models" menu.
|
980 |
+
Or directly from here:
|
981 |
+
https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main
|
982 |
+
|
983 |
+
Inputs:
|
984 |
+
- **latents** input is used to calculate batch size
|
985 |
+
- **clip** is your standard text encoder, use same as for the main prompt
|
986 |
+
- **gligen_textbox_model** connects to GLIGEN Loader
|
987 |
+
- **coordinates** takes a json string of points, directly compatible
|
988 |
+
with the spline editor node.
|
989 |
+
- **text** is the part of the prompt to set position for
|
990 |
+
- **width** and **height** are the size of the GLIGEN bounding box
|
991 |
+
|
992 |
+
Outputs:
|
993 |
+
- **conditioning** goes between to clip text encode and the sampler
|
994 |
+
- **coord_preview** is an optional preview of the coordinates and
|
995 |
+
bounding boxes.
|
996 |
+
|
997 |
+
"""
|
998 |
+
|
999 |
+
def append(self, latents, coordinates, conditioning_to, clip, gligen_textbox_model, text, width, height, size_multiplier=[1.0]):
|
1000 |
+
coordinates = json.loads(coordinates.replace("'", '"'))
|
1001 |
+
coordinates = [(coord['x'], coord['y']) for coord in coordinates]
|
1002 |
+
|
1003 |
+
batch_size = sum(tensor.size(0) for tensor in latents.values())
|
1004 |
+
if len(coordinates) != batch_size:
|
1005 |
+
print("GLIGENTextBoxApplyBatchCoords WARNING: The number of coordinates does not match the number of latents")
|
1006 |
+
|
1007 |
+
c = []
|
1008 |
+
_, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
|
1009 |
+
|
1010 |
+
for t in conditioning_to:
|
1011 |
+
n = [t[0], t[1].copy()]
|
1012 |
+
|
1013 |
+
position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item
|
1014 |
+
if len(size_multiplier) != batch_size:
|
1015 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
1016 |
+
|
1017 |
+
for i in range(batch_size):
|
1018 |
+
x_position, y_position = coordinates[i]
|
1019 |
+
position_param = (cond_pooled, int((height // 8) * size_multiplier[i]), int((width // 8) * size_multiplier[i]), (y_position - height // 2) // 8, (x_position - width // 2) // 8)
|
1020 |
+
position_params_batch[i].append(position_param) # Append position_param to the correct sublist
|
1021 |
+
|
1022 |
+
prev = []
|
1023 |
+
if "gligen" in n[1]:
|
1024 |
+
prev = n[1]['gligen'][2]
|
1025 |
+
else:
|
1026 |
+
prev = [[] for _ in range(batch_size)]
|
1027 |
+
# Concatenate prev and position_params_batch, ensuring both are lists of lists
|
1028 |
+
# and each sublist corresponds to a batch item
|
1029 |
+
combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)]
|
1030 |
+
n[1]['gligen'] = ("position_batched", gligen_textbox_model, combined_position_params)
|
1031 |
+
c.append(n)
|
1032 |
+
|
1033 |
+
image_height = latents['samples'].shape[-2] * 8
|
1034 |
+
image_width = latents['samples'].shape[-1] * 8
|
1035 |
+
plot_image_tensor = plot_coordinates_to_tensor(coordinates, image_height, image_width, height, width, size_multiplier, text)
|
1036 |
+
|
1037 |
+
return (c, plot_image_tensor,)
|
1038 |
+
|
1039 |
+
class CreateInstanceDiffusionTracking:
|
1040 |
+
|
1041 |
+
RETURN_TYPES = ("TRACKING", "STRING", "INT", "INT", "INT", "INT",)
|
1042 |
+
RETURN_NAMES = ("tracking", "prompt", "width", "height", "bbox_width", "bbox_height",)
|
1043 |
+
FUNCTION = "tracking"
|
1044 |
+
CATEGORY = "KJNodes/InstanceDiffusion"
|
1045 |
+
DESCRIPTION = """
|
1046 |
+
Creates tracking data to be used with InstanceDiffusion:
|
1047 |
+
https://github.com/logtd/ComfyUI-InstanceDiffusion
|
1048 |
+
|
1049 |
+
InstanceDiffusion prompt format:
|
1050 |
+
"class_id.class_name": "prompt",
|
1051 |
+
for example:
|
1052 |
+
"1.head": "((head))",
|
1053 |
+
"""
|
1054 |
+
|
1055 |
+
@classmethod
|
1056 |
+
def INPUT_TYPES(s):
|
1057 |
+
return {
|
1058 |
+
"required": {
|
1059 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
1060 |
+
"width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1061 |
+
"height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1062 |
+
"bbox_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1063 |
+
"bbox_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1064 |
+
"class_name": ("STRING", {"default": "class_name"}),
|
1065 |
+
"class_id": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
|
1066 |
+
"prompt": ("STRING", {"default": "prompt", "multiline": True}),
|
1067 |
+
},
|
1068 |
+
"optional": {
|
1069 |
+
"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
|
1070 |
+
"fit_in_frame": ("BOOLEAN", {"default": True}),
|
1071 |
+
}
|
1072 |
+
}
|
1073 |
+
|
1074 |
+
def tracking(self, coordinates, class_name, class_id, width, height, bbox_width, bbox_height, prompt, size_multiplier=[1.0], fit_in_frame=True):
|
1075 |
+
# Define the number of images in the batch
|
1076 |
+
coordinates = coordinates.replace("'", '"')
|
1077 |
+
coordinates = json.loads(coordinates)
|
1078 |
+
|
1079 |
+
tracked = {}
|
1080 |
+
tracked[class_name] = {}
|
1081 |
+
batch_size = len(coordinates)
|
1082 |
+
# Initialize a list to hold the coordinates for the current ID
|
1083 |
+
id_coordinates = []
|
1084 |
+
if not size_multiplier or len(size_multiplier) != batch_size:
|
1085 |
+
size_multiplier = [0] * batch_size
|
1086 |
+
else:
|
1087 |
+
size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
|
1088 |
+
for i, coord in enumerate(coordinates):
|
1089 |
+
x = coord['x']
|
1090 |
+
y = coord['y']
|
1091 |
+
adjusted_bbox_width = bbox_width * size_multiplier[i]
|
1092 |
+
adjusted_bbox_height = bbox_height * size_multiplier[i]
|
1093 |
+
# Calculate the top left and bottom right coordinates
|
1094 |
+
top_left_x = x - adjusted_bbox_width // 2
|
1095 |
+
top_left_y = y - adjusted_bbox_height // 2
|
1096 |
+
bottom_right_x = x + adjusted_bbox_width // 2
|
1097 |
+
bottom_right_y = y + adjusted_bbox_height // 2
|
1098 |
+
|
1099 |
+
if fit_in_frame:
|
1100 |
+
# Clip the coordinates to the frame boundaries
|
1101 |
+
top_left_x = max(0, top_left_x)
|
1102 |
+
top_left_y = max(0, top_left_y)
|
1103 |
+
bottom_right_x = min(width, bottom_right_x)
|
1104 |
+
bottom_right_y = min(height, bottom_right_y)
|
1105 |
+
# Ensure width and height are positive
|
1106 |
+
adjusted_bbox_width = max(1, bottom_right_x - top_left_x)
|
1107 |
+
adjusted_bbox_height = max(1, bottom_right_y - top_left_y)
|
1108 |
+
|
1109 |
+
# Update the coordinates with the new width and height
|
1110 |
+
bottom_right_x = top_left_x + adjusted_bbox_width
|
1111 |
+
bottom_right_y = top_left_y + adjusted_bbox_height
|
1112 |
+
|
1113 |
+
# Append the top left and bottom right coordinates to the list for the current ID
|
1114 |
+
id_coordinates.append([top_left_x, top_left_y, bottom_right_x, bottom_right_y, width, height])
|
1115 |
+
|
1116 |
+
class_id = int(class_id)
|
1117 |
+
# Assign the list of coordinates to the specified ID within the class_id dictionary
|
1118 |
+
tracked[class_name][class_id] = id_coordinates
|
1119 |
+
|
1120 |
+
prompt_string = ""
|
1121 |
+
for class_name, class_data in tracked.items():
|
1122 |
+
for class_id in class_data.keys():
|
1123 |
+
class_id_str = str(class_id)
|
1124 |
+
# Use the incoming prompt for each class name and ID
|
1125 |
+
prompt_string += f'"{class_id_str}.{class_name}": "({prompt})",\n'
|
1126 |
+
|
1127 |
+
# Remove the last comma and newline
|
1128 |
+
prompt_string = prompt_string.rstrip(",\n")
|
1129 |
+
|
1130 |
+
return (tracked, prompt_string, width, height, bbox_width, bbox_height)
|
1131 |
+
|
1132 |
+
class AppendInstanceDiffusionTracking:
|
1133 |
+
|
1134 |
+
RETURN_TYPES = ("TRACKING", "STRING",)
|
1135 |
+
RETURN_NAMES = ("tracking", "prompt",)
|
1136 |
+
FUNCTION = "append"
|
1137 |
+
CATEGORY = "KJNodes/InstanceDiffusion"
|
1138 |
+
DESCRIPTION = """
|
1139 |
+
Appends tracking data to be used with InstanceDiffusion:
|
1140 |
+
https://github.com/logtd/ComfyUI-InstanceDiffusion
|
1141 |
+
|
1142 |
+
"""
|
1143 |
+
|
1144 |
+
@classmethod
|
1145 |
+
def INPUT_TYPES(s):
|
1146 |
+
return {
|
1147 |
+
"required": {
|
1148 |
+
"tracking_1": ("TRACKING", {"forceInput": True}),
|
1149 |
+
"tracking_2": ("TRACKING", {"forceInput": True}),
|
1150 |
+
},
|
1151 |
+
"optional": {
|
1152 |
+
"prompt_1": ("STRING", {"default": "", "forceInput": True}),
|
1153 |
+
"prompt_2": ("STRING", {"default": "", "forceInput": True}),
|
1154 |
+
}
|
1155 |
+
}
|
1156 |
+
|
1157 |
+
def append(self, tracking_1, tracking_2, prompt_1="", prompt_2=""):
|
1158 |
+
tracking_copy = tracking_1.copy()
|
1159 |
+
# Check for existing class names and class IDs, and raise an error if they exist
|
1160 |
+
for class_name, class_data in tracking_2.items():
|
1161 |
+
if class_name not in tracking_copy:
|
1162 |
+
tracking_copy[class_name] = class_data
|
1163 |
+
else:
|
1164 |
+
# If the class name exists, merge the class data from tracking_2 into tracking_copy
|
1165 |
+
# This will add new class IDs under the same class name without raising an error
|
1166 |
+
tracking_copy[class_name].update(class_data)
|
1167 |
+
prompt_string = prompt_1 + "," + prompt_2
|
1168 |
+
return (tracking_copy, prompt_string)
|
1169 |
+
|
1170 |
+
class InterpolateCoords:
|
1171 |
+
|
1172 |
+
RETURN_TYPES = ("STRING",)
|
1173 |
+
RETURN_NAMES = ("coordinates",)
|
1174 |
+
FUNCTION = "interpolate"
|
1175 |
+
CATEGORY = "KJNodes/experimental"
|
1176 |
+
DESCRIPTION = """
|
1177 |
+
Interpolates coordinates based on a curve.
|
1178 |
+
"""
|
1179 |
+
|
1180 |
+
@classmethod
|
1181 |
+
def INPUT_TYPES(s):
|
1182 |
+
return {
|
1183 |
+
"required": {
|
1184 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
1185 |
+
"interpolation_curve": ("FLOAT", {"forceInput": True}),
|
1186 |
+
|
1187 |
+
},
|
1188 |
+
}
|
1189 |
+
|
1190 |
+
def interpolate(self, coordinates, interpolation_curve):
|
1191 |
+
# Parse the JSON string to get the list of coordinates
|
1192 |
+
coordinates = json.loads(coordinates.replace("'", '"'))
|
1193 |
+
|
1194 |
+
# Convert the list of dictionaries to a list of (x, y) tuples for easier processing
|
1195 |
+
coordinates = [(coord['x'], coord['y']) for coord in coordinates]
|
1196 |
+
|
1197 |
+
# Calculate the total length of the original path
|
1198 |
+
path_length = sum(np.linalg.norm(np.array(coordinates[i]) - np.array(coordinates[i-1]))
|
1199 |
+
for i in range(1, len(coordinates)))
|
1200 |
+
|
1201 |
+
# Initialize variables for interpolation
|
1202 |
+
interpolated_coords = []
|
1203 |
+
current_length = 0
|
1204 |
+
current_index = 0
|
1205 |
+
|
1206 |
+
# Iterate over the normalized curve
|
1207 |
+
for normalized_length in interpolation_curve:
|
1208 |
+
target_length = normalized_length * path_length # Convert to the original scale
|
1209 |
+
while current_index < len(coordinates) - 1:
|
1210 |
+
segment_start, segment_end = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1])
|
1211 |
+
segment_length = np.linalg.norm(segment_end - segment_start)
|
1212 |
+
if current_length + segment_length >= target_length:
|
1213 |
+
break
|
1214 |
+
current_length += segment_length
|
1215 |
+
current_index += 1
|
1216 |
+
|
1217 |
+
# Interpolate between the last two points
|
1218 |
+
if current_index < len(coordinates) - 1:
|
1219 |
+
p1, p2 = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1])
|
1220 |
+
segment_length = np.linalg.norm(p2 - p1)
|
1221 |
+
if segment_length > 0:
|
1222 |
+
t = (target_length - current_length) / segment_length
|
1223 |
+
interpolated_point = p1 + t * (p2 - p1)
|
1224 |
+
interpolated_coords.append(interpolated_point.tolist())
|
1225 |
+
else:
|
1226 |
+
interpolated_coords.append(p1.tolist())
|
1227 |
+
else:
|
1228 |
+
# If the target_length is at or beyond the end of the path, add the last coordinate
|
1229 |
+
interpolated_coords.append(coordinates[-1])
|
1230 |
+
|
1231 |
+
# Convert back to string format if necessary
|
1232 |
+
interpolated_coords_str = "[" + ", ".join([f"{{'x': {round(coord[0])}, 'y': {round(coord[1])}}}" for coord in interpolated_coords]) + "]"
|
1233 |
+
print(interpolated_coords_str)
|
1234 |
+
|
1235 |
+
return (interpolated_coords_str,)
|
1236 |
+
|
1237 |
+
class DrawInstanceDiffusionTracking:
|
1238 |
+
|
1239 |
+
RETURN_TYPES = ("IMAGE",)
|
1240 |
+
RETURN_NAMES = ("image", )
|
1241 |
+
FUNCTION = "draw"
|
1242 |
+
CATEGORY = "KJNodes/InstanceDiffusion"
|
1243 |
+
DESCRIPTION = """
|
1244 |
+
Draws the tracking data from
|
1245 |
+
CreateInstanceDiffusionTracking -node.
|
1246 |
+
|
1247 |
+
"""
|
1248 |
+
|
1249 |
+
@classmethod
|
1250 |
+
def INPUT_TYPES(s):
|
1251 |
+
return {
|
1252 |
+
"required": {
|
1253 |
+
"image": ("IMAGE", ),
|
1254 |
+
"tracking": ("TRACKING", {"forceInput": True}),
|
1255 |
+
"box_line_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}),
|
1256 |
+
"draw_text": ("BOOLEAN", {"default": True}),
|
1257 |
+
"font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
|
1258 |
+
"font_size": ("INT", {"default": 20}),
|
1259 |
+
},
|
1260 |
+
}
|
1261 |
+
|
1262 |
+
def draw(self, image, tracking, box_line_width, draw_text, font, font_size):
|
1263 |
+
import matplotlib.cm as cm
|
1264 |
+
|
1265 |
+
modified_images = []
|
1266 |
+
|
1267 |
+
colormap = cm.get_cmap('rainbow', len(tracking))
|
1268 |
+
if draw_text:
|
1269 |
+
font_path = folder_paths.get_full_path("kjnodes_fonts", font)
|
1270 |
+
font = ImageFont.truetype(font_path, font_size)
|
1271 |
+
|
1272 |
+
# Iterate over each image in the batch
|
1273 |
+
for i in range(image.shape[0]):
|
1274 |
+
# Extract the current image and convert it to a PIL image
|
1275 |
+
current_image = image[i, :, :, :].permute(2, 0, 1)
|
1276 |
+
pil_image = transforms.ToPILImage()(current_image)
|
1277 |
+
|
1278 |
+
draw = ImageDraw.Draw(pil_image)
|
1279 |
+
|
1280 |
+
# Iterate over the bounding boxes for the current image
|
1281 |
+
for j, (class_name, class_data) in enumerate(tracking.items()):
|
1282 |
+
for class_id, bbox_list in class_data.items():
|
1283 |
+
# Check if the current index is within the bounds of the bbox_list
|
1284 |
+
if i < len(bbox_list):
|
1285 |
+
bbox = bbox_list[i]
|
1286 |
+
# Ensure bbox is a list or tuple before unpacking
|
1287 |
+
if isinstance(bbox, (list, tuple)):
|
1288 |
+
x1, y1, x2, y2, _, _ = bbox
|
1289 |
+
# Convert coordinates to integers
|
1290 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
1291 |
+
# Generate a color from the rainbow colormap
|
1292 |
+
color = tuple(int(255 * x) for x in colormap(j / len(tracking)))[:3]
|
1293 |
+
# Draw the bounding box on the image with the generated color
|
1294 |
+
draw.rectangle([x1, y1, x2, y2], outline=color, width=box_line_width)
|
1295 |
+
if draw_text:
|
1296 |
+
# Draw the class name and ID as text above the box with the generated color
|
1297 |
+
text = f"{class_id}.{class_name}"
|
1298 |
+
# Calculate the width and height of the text
|
1299 |
+
_, _, text_width, text_height = draw.textbbox((0, 0), text=text, font=font)
|
1300 |
+
# Position the text above the top-left corner of the box
|
1301 |
+
text_position = (x1, y1 - text_height)
|
1302 |
+
draw.text(text_position, text, fill=color, font=font)
|
1303 |
+
else:
|
1304 |
+
print(f"Unexpected data type for bbox: {type(bbox)}")
|
1305 |
+
|
1306 |
+
# Convert the drawn image back to a torch tensor and adjust back to (H, W, C)
|
1307 |
+
modified_image_tensor = transforms.ToTensor()(pil_image).permute(1, 2, 0)
|
1308 |
+
modified_images.append(modified_image_tensor)
|
1309 |
+
|
1310 |
+
# Stack the modified images back into a batch
|
1311 |
+
image_tensor_batch = torch.stack(modified_images).cpu().float()
|
1312 |
+
|
1313 |
+
return image_tensor_batch,
|
1314 |
+
|
1315 |
+
class PointsEditor:
|
1316 |
+
@classmethod
|
1317 |
+
def INPUT_TYPES(cls):
|
1318 |
+
return {
|
1319 |
+
"required": {
|
1320 |
+
"points_store": ("STRING", {"multiline": False}),
|
1321 |
+
"coordinates": ("STRING", {"multiline": False}),
|
1322 |
+
"neg_coordinates": ("STRING", {"multiline": False}),
|
1323 |
+
"bbox_store": ("STRING", {"multiline": False}),
|
1324 |
+
"bboxes": ("STRING", {"multiline": False}),
|
1325 |
+
"bbox_format": (
|
1326 |
+
[
|
1327 |
+
'xyxy',
|
1328 |
+
'xywh',
|
1329 |
+
],
|
1330 |
+
),
|
1331 |
+
"width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
1332 |
+
"height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
|
1333 |
+
"normalize": ("BOOLEAN", {"default": False}),
|
1334 |
+
},
|
1335 |
+
"optional": {
|
1336 |
+
"bg_image": ("IMAGE", ),
|
1337 |
+
},
|
1338 |
+
}
|
1339 |
+
|
1340 |
+
RETURN_TYPES = ("STRING", "STRING", "BBOX", "MASK", "IMAGE")
|
1341 |
+
RETURN_NAMES = ("positive_coords", "negative_coords", "bbox", "bbox_mask", "cropped_image")
|
1342 |
+
FUNCTION = "pointdata"
|
1343 |
+
CATEGORY = "KJNodes/experimental"
|
1344 |
+
DESCRIPTION = """
|
1345 |
+
# WORK IN PROGRESS
|
1346 |
+
Do not count on this as part of your workflow yet,
|
1347 |
+
probably contains lots of bugs and stability is not
|
1348 |
+
guaranteed!!
|
1349 |
+
|
1350 |
+
## Graphical editor to create coordinates
|
1351 |
+
|
1352 |
+
**Shift + click** to add a positive (green) point.
|
1353 |
+
**Shift + right click** to add a negative (red) point.
|
1354 |
+
**Ctrl + click** to draw a box.
|
1355 |
+
**Right click on a point** to delete it.
|
1356 |
+
Note that you can't delete from start/end of the points array.
|
1357 |
+
|
1358 |
+
To add an image select the node and copy/paste or drag in the image.
|
1359 |
+
Or from the bg_image input on queue (first frame of the batch).
|
1360 |
+
|
1361 |
+
**THE IMAGE IS SAVED TO THE NODE AND WORKFLOW METADATA**
|
1362 |
+
you can clear the image from the context menu by right clicking on the canvas
|
1363 |
+
|
1364 |
+
"""
|
1365 |
+
|
1366 |
+
def pointdata(self, points_store, bbox_store, width, height, coordinates, neg_coordinates, normalize, bboxes, bbox_format="xyxy", bg_image=None):
|
1367 |
+
coordinates = json.loads(coordinates)
|
1368 |
+
pos_coordinates = []
|
1369 |
+
for coord in coordinates:
|
1370 |
+
coord['x'] = int(round(coord['x']))
|
1371 |
+
coord['y'] = int(round(coord['y']))
|
1372 |
+
if normalize:
|
1373 |
+
norm_x = coord['x'] / width
|
1374 |
+
norm_y = coord['y'] / height
|
1375 |
+
pos_coordinates.append({'x': norm_x, 'y': norm_y})
|
1376 |
+
else:
|
1377 |
+
pos_coordinates.append({'x': coord['x'], 'y': coord['y']})
|
1378 |
+
|
1379 |
+
if neg_coordinates:
|
1380 |
+
coordinates = json.loads(neg_coordinates)
|
1381 |
+
neg_coordinates = []
|
1382 |
+
for coord in coordinates:
|
1383 |
+
coord['x'] = int(round(coord['x']))
|
1384 |
+
coord['y'] = int(round(coord['y']))
|
1385 |
+
if normalize:
|
1386 |
+
norm_x = coord['x'] / width
|
1387 |
+
norm_y = coord['y'] / height
|
1388 |
+
neg_coordinates.append({'x': norm_x, 'y': norm_y})
|
1389 |
+
else:
|
1390 |
+
neg_coordinates.append({'x': coord['x'], 'y': coord['y']})
|
1391 |
+
|
1392 |
+
# Create a blank mask
|
1393 |
+
mask = np.zeros((height, width), dtype=np.uint8)
|
1394 |
+
bboxes = json.loads(bboxes)
|
1395 |
+
print(bboxes)
|
1396 |
+
valid_bboxes = []
|
1397 |
+
for bbox in bboxes:
|
1398 |
+
if (bbox.get("startX") is None or
|
1399 |
+
bbox.get("startY") is None or
|
1400 |
+
bbox.get("endX") is None or
|
1401 |
+
bbox.get("endY") is None):
|
1402 |
+
continue # Skip this bounding box if any value is None
|
1403 |
+
else:
|
1404 |
+
# Ensure that endX and endY are greater than startX and startY
|
1405 |
+
x_min = min(int(bbox["startX"]), int(bbox["endX"]))
|
1406 |
+
y_min = min(int(bbox["startY"]), int(bbox["endY"]))
|
1407 |
+
x_max = max(int(bbox["startX"]), int(bbox["endX"]))
|
1408 |
+
y_max = max(int(bbox["startY"]), int(bbox["endY"]))
|
1409 |
+
|
1410 |
+
valid_bboxes.append((x_min, y_min, x_max, y_max))
|
1411 |
+
|
1412 |
+
bboxes_xyxy = []
|
1413 |
+
for bbox in valid_bboxes:
|
1414 |
+
x_min, y_min, x_max, y_max = bbox
|
1415 |
+
bboxes_xyxy.append((x_min, y_min, x_max, y_max))
|
1416 |
+
mask[y_min:y_max, x_min:x_max] = 1 # Fill the bounding box area with 1s
|
1417 |
+
|
1418 |
+
if bbox_format == "xywh":
|
1419 |
+
bboxes_xywh = []
|
1420 |
+
for bbox in valid_bboxes:
|
1421 |
+
x_min, y_min, x_max, y_max = bbox
|
1422 |
+
width = x_max - x_min
|
1423 |
+
height = y_max - y_min
|
1424 |
+
bboxes_xywh.append((x_min, y_min, width, height))
|
1425 |
+
bboxes = bboxes_xywh
|
1426 |
+
else:
|
1427 |
+
bboxes = bboxes_xyxy
|
1428 |
+
|
1429 |
+
mask_tensor = torch.from_numpy(mask)
|
1430 |
+
mask_tensor = mask_tensor.unsqueeze(0).float().cpu()
|
1431 |
+
|
1432 |
+
if bg_image is not None and len(valid_bboxes) > 0:
|
1433 |
+
x_min, y_min, x_max, y_max = bboxes[0]
|
1434 |
+
cropped_image = bg_image[:, y_min:y_max, x_min:x_max, :]
|
1435 |
+
|
1436 |
+
elif bg_image is not None:
|
1437 |
+
cropped_image = bg_image
|
1438 |
+
|
1439 |
+
if bg_image is None:
|
1440 |
+
return (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor)
|
1441 |
+
else:
|
1442 |
+
transform = transforms.ToPILImage()
|
1443 |
+
image = transform(bg_image[0].permute(2, 0, 1))
|
1444 |
+
buffered = io.BytesIO()
|
1445 |
+
image.save(buffered, format="JPEG", quality=75)
|
1446 |
+
|
1447 |
+
# Step 3: Encode the image bytes to a Base64 string
|
1448 |
+
img_bytes = buffered.getvalue()
|
1449 |
+
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
1450 |
+
|
1451 |
+
return {
|
1452 |
+
"ui": {"bg_image": [img_base64]},
|
1453 |
+
"result": (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor, cropped_image)
|
1454 |
+
}
|
1455 |
+
|
1456 |
+
class CutAndDragOnPath:
|
1457 |
+
RETURN_TYPES = ("IMAGE", "MASK",)
|
1458 |
+
RETURN_NAMES = ("image","mask", )
|
1459 |
+
FUNCTION = "cutanddrag"
|
1460 |
+
CATEGORY = "KJNodes/image"
|
1461 |
+
DESCRIPTION = """
|
1462 |
+
Cuts the masked area from the image, and drags it along the path. If inpaint is enabled, and no bg_image is provided, the cut area is filled using cv2 TELEA algorithm.
|
1463 |
+
"""
|
1464 |
+
|
1465 |
+
@classmethod
|
1466 |
+
def INPUT_TYPES(s):
|
1467 |
+
return {
|
1468 |
+
"required": {
|
1469 |
+
"image": ("IMAGE",),
|
1470 |
+
"coordinates": ("STRING", {"forceInput": True}),
|
1471 |
+
"mask": ("MASK",),
|
1472 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1473 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
1474 |
+
"inpaint": ("BOOLEAN", {"default": True}),
|
1475 |
+
},
|
1476 |
+
"optional": {
|
1477 |
+
"bg_image": ("IMAGE",),
|
1478 |
+
}
|
1479 |
+
}
|
1480 |
+
|
1481 |
+
def cutanddrag(self, image, coordinates, mask, frame_width, frame_height, inpaint, bg_image=None):
|
1482 |
+
# Parse coordinates
|
1483 |
+
if len(coordinates) < 10:
|
1484 |
+
coords_list = []
|
1485 |
+
for coords in coordinates:
|
1486 |
+
coords = json.loads(coords.replace("'", '"'))
|
1487 |
+
coords_list.append(coords)
|
1488 |
+
else:
|
1489 |
+
coords = json.loads(coordinates.replace("'", '"'))
|
1490 |
+
coords_list = [coords]
|
1491 |
+
|
1492 |
+
batch_size = len(coords_list[0])
|
1493 |
+
images_list = []
|
1494 |
+
masks_list = []
|
1495 |
+
|
1496 |
+
# Convert input image and mask to PIL
|
1497 |
+
input_image = tensor2pil(image)[0]
|
1498 |
+
input_mask = tensor2pil(mask)[0]
|
1499 |
+
|
1500 |
+
# Find masked region bounds
|
1501 |
+
mask_array = np.array(input_mask)
|
1502 |
+
y_indices, x_indices = np.where(mask_array > 0)
|
1503 |
+
if len(x_indices) == 0 or len(y_indices) == 0:
|
1504 |
+
return (image, mask)
|
1505 |
+
|
1506 |
+
x_min, x_max = x_indices.min(), x_indices.max()
|
1507 |
+
y_min, y_max = y_indices.min(), y_indices.max()
|
1508 |
+
|
1509 |
+
# Cut out the masked region
|
1510 |
+
cut_width = x_max - x_min
|
1511 |
+
cut_height = y_max - y_min
|
1512 |
+
cut_image = input_image.crop((x_min, y_min, x_max, y_max))
|
1513 |
+
cut_mask = input_mask.crop((x_min, y_min, x_max, y_max))
|
1514 |
+
|
1515 |
+
# Create inpainted background
|
1516 |
+
if bg_image is None:
|
1517 |
+
background = input_image.copy()
|
1518 |
+
# Inpaint the cut area
|
1519 |
+
if inpaint:
|
1520 |
+
import cv2
|
1521 |
+
border = 5 # Create small border around cut area for better inpainting
|
1522 |
+
fill_mask = Image.new("L", background.size, 0)
|
1523 |
+
draw = ImageDraw.Draw(fill_mask)
|
1524 |
+
draw.rectangle([x_min-border, y_min-border, x_max+border, y_max+border], fill=255)
|
1525 |
+
background = cv2.inpaint(
|
1526 |
+
np.array(background),
|
1527 |
+
np.array(fill_mask),
|
1528 |
+
inpaintRadius=3,
|
1529 |
+
flags=cv2.INPAINT_TELEA
|
1530 |
+
)
|
1531 |
+
background = Image.fromarray(background)
|
1532 |
+
else:
|
1533 |
+
background = tensor2pil(bg_image)[0]
|
1534 |
+
|
1535 |
+
# Create batch of images with cut region at different positions
|
1536 |
+
for i in range(batch_size):
|
1537 |
+
# Create new image
|
1538 |
+
new_image = background.copy()
|
1539 |
+
new_mask = Image.new("L", (frame_width, frame_height), 0)
|
1540 |
+
|
1541 |
+
# Get target position from coordinates
|
1542 |
+
for coords in coords_list:
|
1543 |
+
target_x = int(coords[i]['x'] - cut_width/2)
|
1544 |
+
target_y = int(coords[i]['y'] - cut_height/2)
|
1545 |
+
|
1546 |
+
# Paste cut region at new position
|
1547 |
+
new_image.paste(cut_image, (target_x, target_y), cut_mask)
|
1548 |
+
new_mask.paste(cut_mask, (target_x, target_y))
|
1549 |
+
|
1550 |
+
# Convert to tensor and append
|
1551 |
+
image_tensor = pil2tensor(new_image)
|
1552 |
+
mask_tensor = pil2tensor(new_mask)
|
1553 |
+
|
1554 |
+
images_list.append(image_tensor)
|
1555 |
+
masks_list.append(mask_tensor)
|
1556 |
+
|
1557 |
+
# Stack tensors into batches
|
1558 |
+
out_images = torch.cat(images_list, dim=0).cpu().float()
|
1559 |
+
out_masks = torch.cat(masks_list, dim=0)
|
1560 |
+
|
1561 |
+
return (out_images, out_masks)
|
custom_nodes/ComfyUI-KJNodes-main/nodes/image_nodes.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
custom_nodes/ComfyUI-KJNodes-main/nodes/intrinsic_lora_nodes.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import folder_paths
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from comfy.utils import ProgressBar, load_torch_file
|
6 |
+
import comfy.sample
|
7 |
+
from nodes import CLIPTextEncode
|
8 |
+
|
9 |
+
script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
10 |
+
folder_paths.add_model_folder_path("intrinsic_loras", os.path.join(script_directory, "intrinsic_loras"))
|
11 |
+
|
12 |
+
class Intrinsic_lora_sampling:
|
13 |
+
def __init__(self):
|
14 |
+
self.loaded_lora = None
|
15 |
+
|
16 |
+
@classmethod
|
17 |
+
def INPUT_TYPES(s):
|
18 |
+
return {"required": { "model": ("MODEL",),
|
19 |
+
"lora_name": (folder_paths.get_filename_list("intrinsic_loras"), ),
|
20 |
+
"task": (
|
21 |
+
[
|
22 |
+
'depth map',
|
23 |
+
'surface normals',
|
24 |
+
'albedo',
|
25 |
+
'shading',
|
26 |
+
],
|
27 |
+
{
|
28 |
+
"default": 'depth map'
|
29 |
+
}),
|
30 |
+
"text": ("STRING", {"multiline": True, "default": ""}),
|
31 |
+
"clip": ("CLIP", ),
|
32 |
+
"vae": ("VAE", ),
|
33 |
+
"per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}),
|
34 |
+
},
|
35 |
+
"optional": {
|
36 |
+
"image": ("IMAGE",),
|
37 |
+
"optional_latent": ("LATENT",),
|
38 |
+
},
|
39 |
+
}
|
40 |
+
|
41 |
+
RETURN_TYPES = ("IMAGE", "LATENT",)
|
42 |
+
FUNCTION = "onestepsample"
|
43 |
+
CATEGORY = "KJNodes"
|
44 |
+
DESCRIPTION = """
|
45 |
+
Sampler to use the intrinsic loras:
|
46 |
+
https://github.com/duxiaodan/intrinsic-lora
|
47 |
+
These LoRAs are tiny and thus included
|
48 |
+
with this node pack.
|
49 |
+
"""
|
50 |
+
|
51 |
+
def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None):
|
52 |
+
pbar = ProgressBar(3)
|
53 |
+
|
54 |
+
if optional_latent is None:
|
55 |
+
image_list = []
|
56 |
+
for start_idx in range(0, image.shape[0], per_batch):
|
57 |
+
sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch])
|
58 |
+
image_list.append(vae.encode(sub_pixels[:,:,:,:3]))
|
59 |
+
sample = torch.cat(image_list, dim=0)
|
60 |
+
else:
|
61 |
+
sample = optional_latent["samples"]
|
62 |
+
noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu")
|
63 |
+
prompt = task + "," + text
|
64 |
+
positive, = CLIPTextEncode.encode(self, clip, prompt)
|
65 |
+
negative = positive #negative shouldn't do anything in this scenario
|
66 |
+
|
67 |
+
pbar.update(1)
|
68 |
+
|
69 |
+
#custom model sampling to pass latent through as it is
|
70 |
+
class X0_PassThrough(comfy.model_sampling.EPS):
|
71 |
+
def calculate_denoised(self, sigma, model_output, model_input):
|
72 |
+
return model_output
|
73 |
+
def calculate_input(self, sigma, noise):
|
74 |
+
return noise
|
75 |
+
sampling_base = comfy.model_sampling.ModelSamplingDiscrete
|
76 |
+
sampling_type = X0_PassThrough
|
77 |
+
|
78 |
+
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
79 |
+
pass
|
80 |
+
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
81 |
+
|
82 |
+
#load lora
|
83 |
+
model_clone = model.clone()
|
84 |
+
lora_path = folder_paths.get_full_path("intrinsic_loras", lora_name)
|
85 |
+
lora = load_torch_file(lora_path, safe_load=True)
|
86 |
+
self.loaded_lora = (lora_path, lora)
|
87 |
+
|
88 |
+
model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0]
|
89 |
+
|
90 |
+
model_clone_with_lora.add_object_patch("model_sampling", model_sampling)
|
91 |
+
|
92 |
+
samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample,
|
93 |
+
denoise=1.0, disable_noise=True, start_step=0, last_step=1,
|
94 |
+
force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)}
|
95 |
+
pbar.update(1)
|
96 |
+
|
97 |
+
decoded = []
|
98 |
+
for start_idx in range(0, samples["samples"].shape[0], per_batch):
|
99 |
+
decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch]))
|
100 |
+
image_out = torch.cat(decoded, dim=0)
|
101 |
+
|
102 |
+
pbar.update(1)
|
103 |
+
|
104 |
+
if task == 'depth map':
|
105 |
+
imax = image_out.max()
|
106 |
+
imin = image_out.min()
|
107 |
+
image_out = (image_out-imin)/(imax-imin)
|
108 |
+
image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3)
|
109 |
+
elif task == 'surface normals':
|
110 |
+
image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5
|
111 |
+
image_out = 1.0 - image_out
|
112 |
+
else:
|
113 |
+
image_out = image_out.clamp(-1.,1.)
|
114 |
+
|
115 |
+
return (image_out, samples,)
|
custom_nodes/ComfyUI-KJNodes-main/nodes/mask_nodes.py
ADDED
@@ -0,0 +1,1397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torchvision.transforms import functional as TF
|
4 |
+
from PIL import Image, ImageDraw, ImageFilter, ImageFont
|
5 |
+
import scipy.ndimage
|
6 |
+
import numpy as np
|
7 |
+
from contextlib import nullcontext
|
8 |
+
import os
|
9 |
+
|
10 |
+
import model_management
|
11 |
+
from comfy.utils import ProgressBar
|
12 |
+
from comfy.utils import common_upscale
|
13 |
+
from nodes import MAX_RESOLUTION
|
14 |
+
|
15 |
+
import folder_paths
|
16 |
+
|
17 |
+
from ..utility.utility import tensor2pil, pil2tensor
|
18 |
+
|
19 |
+
script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
20 |
+
|
21 |
+
class BatchCLIPSeg:
|
22 |
+
|
23 |
+
def __init__(self):
|
24 |
+
pass
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
def INPUT_TYPES(s):
|
28 |
+
|
29 |
+
return {"required":
|
30 |
+
{
|
31 |
+
"images": ("IMAGE",),
|
32 |
+
"text": ("STRING", {"multiline": False}),
|
33 |
+
"threshold": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 10.0, "step": 0.001}),
|
34 |
+
"binary_mask": ("BOOLEAN", {"default": True}),
|
35 |
+
"combine_mask": ("BOOLEAN", {"default": False}),
|
36 |
+
"use_cuda": ("BOOLEAN", {"default": True}),
|
37 |
+
},
|
38 |
+
"optional":
|
39 |
+
{
|
40 |
+
"blur_sigma": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
|
41 |
+
"opt_model": ("CLIPSEGMODEL", ),
|
42 |
+
"prev_mask": ("MASK", {"default": None}),
|
43 |
+
"image_bg_level": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
44 |
+
"invert": ("BOOLEAN", {"default": False}),
|
45 |
+
}
|
46 |
+
}
|
47 |
+
|
48 |
+
CATEGORY = "KJNodes/masking"
|
49 |
+
RETURN_TYPES = ("MASK", "IMAGE", )
|
50 |
+
RETURN_NAMES = ("Mask", "Image", )
|
51 |
+
FUNCTION = "segment_image"
|
52 |
+
DESCRIPTION = """
|
53 |
+
Segments an image or batch of images using CLIPSeg.
|
54 |
+
"""
|
55 |
+
|
56 |
+
def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda, blur_sigma=0.0, opt_model=None, prev_mask=None, invert= False, image_bg_level=0.5):
|
57 |
+
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
58 |
+
import torchvision.transforms as transforms
|
59 |
+
offload_device = model_management.unet_offload_device()
|
60 |
+
device = model_management.get_torch_device()
|
61 |
+
if not use_cuda:
|
62 |
+
device = torch.device("cpu")
|
63 |
+
dtype = model_management.unet_dtype()
|
64 |
+
|
65 |
+
if opt_model is None:
|
66 |
+
checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', 'clipseg-rd64-refined-fp16')
|
67 |
+
if not hasattr(self, "model"):
|
68 |
+
try:
|
69 |
+
if not os.path.exists(checkpoint_path):
|
70 |
+
from huggingface_hub import snapshot_download
|
71 |
+
snapshot_download(repo_id="Kijai/clipseg-rd64-refined-fp16", local_dir=checkpoint_path, local_dir_use_symlinks=False)
|
72 |
+
self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
|
73 |
+
except:
|
74 |
+
checkpoint_path = "CIDAS/clipseg-rd64-refined"
|
75 |
+
self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
|
76 |
+
processor = CLIPSegProcessor.from_pretrained(checkpoint_path)
|
77 |
+
|
78 |
+
else:
|
79 |
+
self.model = opt_model['model']
|
80 |
+
processor = opt_model['processor']
|
81 |
+
|
82 |
+
self.model.to(dtype).to(device)
|
83 |
+
|
84 |
+
B, H, W, C = images.shape
|
85 |
+
images = images.to(device)
|
86 |
+
|
87 |
+
autocast_condition = (dtype != torch.float32) and not model_management.is_device_mps(device)
|
88 |
+
with torch.autocast(model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext():
|
89 |
+
|
90 |
+
PIL_images = [Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) for image in images ]
|
91 |
+
prompt = [text] * len(images)
|
92 |
+
input_prc = processor(text=prompt, images=PIL_images, return_tensors="pt")
|
93 |
+
|
94 |
+
for key in input_prc:
|
95 |
+
input_prc[key] = input_prc[key].to(device)
|
96 |
+
outputs = self.model(**input_prc)
|
97 |
+
|
98 |
+
mask_tensor = torch.sigmoid(outputs.logits)
|
99 |
+
mask_tensor = (mask_tensor - mask_tensor.min()) / (mask_tensor.max() - mask_tensor.min())
|
100 |
+
mask_tensor = torch.where(mask_tensor > (threshold), mask_tensor, torch.tensor(0, dtype=torch.float))
|
101 |
+
print(mask_tensor.shape)
|
102 |
+
if len(mask_tensor.shape) == 2:
|
103 |
+
mask_tensor = mask_tensor.unsqueeze(0)
|
104 |
+
mask_tensor = F.interpolate(mask_tensor.unsqueeze(1), size=(H, W), mode='nearest')
|
105 |
+
mask_tensor = mask_tensor.squeeze(1)
|
106 |
+
|
107 |
+
self.model.to(offload_device)
|
108 |
+
|
109 |
+
if binary_mask:
|
110 |
+
mask_tensor = (mask_tensor > 0).float()
|
111 |
+
if blur_sigma > 0:
|
112 |
+
kernel_size = int(6 * int(blur_sigma) + 1)
|
113 |
+
blur = transforms.GaussianBlur(kernel_size=(kernel_size, kernel_size), sigma=(blur_sigma, blur_sigma))
|
114 |
+
mask_tensor = blur(mask_tensor)
|
115 |
+
|
116 |
+
if combine_mask:
|
117 |
+
mask_tensor = torch.max(mask_tensor, dim=0)[0]
|
118 |
+
mask_tensor = mask_tensor.unsqueeze(0).repeat(len(images),1,1)
|
119 |
+
|
120 |
+
del outputs
|
121 |
+
model_management.soft_empty_cache()
|
122 |
+
|
123 |
+
if prev_mask is not None:
|
124 |
+
if prev_mask.shape != mask_tensor.shape:
|
125 |
+
prev_mask = F.interpolate(prev_mask.unsqueeze(1), size=(H, W), mode='nearest')
|
126 |
+
mask_tensor = mask_tensor + prev_mask.to(device)
|
127 |
+
torch.clamp(mask_tensor, min=0.0, max=1.0)
|
128 |
+
|
129 |
+
if invert:
|
130 |
+
mask_tensor = 1 - mask_tensor
|
131 |
+
|
132 |
+
image_tensor = images * mask_tensor.unsqueeze(-1) + (1 - mask_tensor.unsqueeze(-1)) * image_bg_level
|
133 |
+
image_tensor = torch.clamp(image_tensor, min=0.0, max=1.0).cpu().float()
|
134 |
+
|
135 |
+
mask_tensor = mask_tensor.cpu().float()
|
136 |
+
|
137 |
+
return mask_tensor, image_tensor,
|
138 |
+
|
139 |
+
class DownloadAndLoadCLIPSeg:
|
140 |
+
|
141 |
+
def __init__(self):
|
142 |
+
pass
|
143 |
+
|
144 |
+
@classmethod
|
145 |
+
def INPUT_TYPES(s):
|
146 |
+
|
147 |
+
return {"required":
|
148 |
+
{
|
149 |
+
"model": (
|
150 |
+
[ 'Kijai/clipseg-rd64-refined-fp16',
|
151 |
+
'CIDAS/clipseg-rd64-refined',
|
152 |
+
],
|
153 |
+
),
|
154 |
+
},
|
155 |
+
}
|
156 |
+
|
157 |
+
CATEGORY = "KJNodes/masking"
|
158 |
+
RETURN_TYPES = ("CLIPSEGMODEL",)
|
159 |
+
RETURN_NAMES = ("clipseg_model",)
|
160 |
+
FUNCTION = "segment_image"
|
161 |
+
DESCRIPTION = """
|
162 |
+
Downloads and loads CLIPSeg model with huggingface_hub,
|
163 |
+
to ComfyUI/models/clip_seg
|
164 |
+
"""
|
165 |
+
|
166 |
+
def segment_image(self, model):
|
167 |
+
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
|
168 |
+
checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', os.path.basename(model))
|
169 |
+
if not hasattr(self, "model"):
|
170 |
+
if not os.path.exists(checkpoint_path):
|
171 |
+
from huggingface_hub import snapshot_download
|
172 |
+
snapshot_download(repo_id=model, local_dir=checkpoint_path, local_dir_use_symlinks=False)
|
173 |
+
self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
|
174 |
+
|
175 |
+
processor = CLIPSegProcessor.from_pretrained(checkpoint_path)
|
176 |
+
|
177 |
+
clipseg_model = {}
|
178 |
+
clipseg_model['model'] = self.model
|
179 |
+
clipseg_model['processor'] = processor
|
180 |
+
|
181 |
+
return clipseg_model,
|
182 |
+
|
183 |
+
class CreateTextMask:
|
184 |
+
|
185 |
+
RETURN_TYPES = ("IMAGE", "MASK",)
|
186 |
+
FUNCTION = "createtextmask"
|
187 |
+
CATEGORY = "KJNodes/text"
|
188 |
+
DESCRIPTION = """
|
189 |
+
Creates a text image and mask.
|
190 |
+
Looks for fonts from this folder:
|
191 |
+
ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts
|
192 |
+
|
193 |
+
If start_rotation and/or end_rotation are different values,
|
194 |
+
creates animation between them.
|
195 |
+
"""
|
196 |
+
|
197 |
+
@classmethod
|
198 |
+
def INPUT_TYPES(s):
|
199 |
+
return {
|
200 |
+
"required": {
|
201 |
+
"invert": ("BOOLEAN", {"default": False}),
|
202 |
+
"frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
|
203 |
+
"text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
|
204 |
+
"text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
|
205 |
+
"font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}),
|
206 |
+
"font_color": ("STRING", {"default": "white"}),
|
207 |
+
"text": ("STRING", {"default": "HELLO!", "multiline": True}),
|
208 |
+
"font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
|
209 |
+
"width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
210 |
+
"height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
211 |
+
"start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}),
|
212 |
+
"end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}),
|
213 |
+
},
|
214 |
+
}
|
215 |
+
|
216 |
+
def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation):
|
217 |
+
# Define the number of images in the batch
|
218 |
+
batch_size = frames
|
219 |
+
out = []
|
220 |
+
masks = []
|
221 |
+
rotation = start_rotation
|
222 |
+
if start_rotation != end_rotation:
|
223 |
+
rotation_increment = (end_rotation - start_rotation) / (batch_size - 1)
|
224 |
+
|
225 |
+
font_path = folder_paths.get_full_path("kjnodes_fonts", font)
|
226 |
+
# Generate the text
|
227 |
+
for i in range(batch_size):
|
228 |
+
image = Image.new("RGB", (width, height), "black")
|
229 |
+
draw = ImageDraw.Draw(image)
|
230 |
+
font = ImageFont.truetype(font_path, font_size)
|
231 |
+
|
232 |
+
# Split the text into words
|
233 |
+
words = text.split()
|
234 |
+
|
235 |
+
# Initialize variables for line creation
|
236 |
+
lines = []
|
237 |
+
current_line = []
|
238 |
+
current_line_width = 0
|
239 |
+
try: #new pillow
|
240 |
+
# Iterate through words to create lines
|
241 |
+
for word in words:
|
242 |
+
word_width = font.getbbox(word)[2]
|
243 |
+
if current_line_width + word_width <= width - 2 * text_x:
|
244 |
+
current_line.append(word)
|
245 |
+
current_line_width += word_width + font.getbbox(" ")[2] # Add space width
|
246 |
+
else:
|
247 |
+
lines.append(" ".join(current_line))
|
248 |
+
current_line = [word]
|
249 |
+
current_line_width = word_width
|
250 |
+
except: #old pillow
|
251 |
+
for word in words:
|
252 |
+
word_width = font.getsize(word)[0]
|
253 |
+
if current_line_width + word_width <= width - 2 * text_x:
|
254 |
+
current_line.append(word)
|
255 |
+
current_line_width += word_width + font.getsize(" ")[0] # Add space width
|
256 |
+
else:
|
257 |
+
lines.append(" ".join(current_line))
|
258 |
+
current_line = [word]
|
259 |
+
current_line_width = word_width
|
260 |
+
|
261 |
+
# Add the last line if it's not empty
|
262 |
+
if current_line:
|
263 |
+
lines.append(" ".join(current_line))
|
264 |
+
|
265 |
+
# Draw each line of text separately
|
266 |
+
y_offset = text_y
|
267 |
+
for line in lines:
|
268 |
+
text_width = font.getlength(line)
|
269 |
+
text_height = font_size
|
270 |
+
text_center_x = text_x + text_width / 2
|
271 |
+
text_center_y = y_offset + text_height / 2
|
272 |
+
try:
|
273 |
+
draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga'])
|
274 |
+
except:
|
275 |
+
draw.text((text_x, y_offset), line, font=font, fill=font_color)
|
276 |
+
y_offset += text_height # Move to the next line
|
277 |
+
|
278 |
+
if start_rotation != end_rotation:
|
279 |
+
image = image.rotate(rotation, center=(text_center_x, text_center_y))
|
280 |
+
rotation += rotation_increment
|
281 |
+
|
282 |
+
image = np.array(image).astype(np.float32) / 255.0
|
283 |
+
image = torch.from_numpy(image)[None,]
|
284 |
+
mask = image[:, :, :, 0]
|
285 |
+
masks.append(mask)
|
286 |
+
out.append(image)
|
287 |
+
|
288 |
+
if invert:
|
289 |
+
return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),)
|
290 |
+
return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
|
291 |
+
|
292 |
+
class ColorToMask:
|
293 |
+
|
294 |
+
RETURN_TYPES = ("MASK",)
|
295 |
+
FUNCTION = "clip"
|
296 |
+
CATEGORY = "KJNodes/masking"
|
297 |
+
DESCRIPTION = """
|
298 |
+
Converts chosen RGB value to a mask.
|
299 |
+
With batch inputs, the **per_batch**
|
300 |
+
controls the number of images processed at once.
|
301 |
+
"""
|
302 |
+
|
303 |
+
@classmethod
|
304 |
+
def INPUT_TYPES(s):
|
305 |
+
return {
|
306 |
+
"required": {
|
307 |
+
"images": ("IMAGE",),
|
308 |
+
"invert": ("BOOLEAN", {"default": False}),
|
309 |
+
"red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
|
310 |
+
"green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
|
311 |
+
"blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
|
312 |
+
"threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}),
|
313 |
+
"per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}),
|
314 |
+
},
|
315 |
+
}
|
316 |
+
|
317 |
+
def clip(self, images, red, green, blue, threshold, invert, per_batch):
|
318 |
+
|
319 |
+
color = torch.tensor([red, green, blue], dtype=torch.uint8)
|
320 |
+
black = torch.tensor([0, 0, 0], dtype=torch.uint8)
|
321 |
+
white = torch.tensor([255, 255, 255], dtype=torch.uint8)
|
322 |
+
|
323 |
+
if invert:
|
324 |
+
black, white = white, black
|
325 |
+
|
326 |
+
steps = images.shape[0]
|
327 |
+
pbar = ProgressBar(steps)
|
328 |
+
tensors_out = []
|
329 |
+
|
330 |
+
for start_idx in range(0, images.shape[0], per_batch):
|
331 |
+
|
332 |
+
# Calculate color distances
|
333 |
+
color_distances = torch.norm(images[start_idx:start_idx+per_batch] * 255 - color, dim=-1)
|
334 |
+
|
335 |
+
# Create a mask based on the threshold
|
336 |
+
mask = color_distances <= threshold
|
337 |
+
|
338 |
+
# Apply the mask to create new images
|
339 |
+
mask_out = torch.where(mask.unsqueeze(-1), white, black).float()
|
340 |
+
mask_out = mask_out.mean(dim=-1)
|
341 |
+
|
342 |
+
tensors_out.append(mask_out.cpu())
|
343 |
+
batch_count = mask_out.shape[0]
|
344 |
+
pbar.update(batch_count)
|
345 |
+
|
346 |
+
tensors_out = torch.cat(tensors_out, dim=0)
|
347 |
+
tensors_out = torch.clamp(tensors_out, min=0.0, max=1.0)
|
348 |
+
return tensors_out,
|
349 |
+
|
350 |
+
class CreateFluidMask:
|
351 |
+
|
352 |
+
RETURN_TYPES = ("IMAGE", "MASK")
|
353 |
+
FUNCTION = "createfluidmask"
|
354 |
+
CATEGORY = "KJNodes/masking/generate"
|
355 |
+
|
356 |
+
@classmethod
|
357 |
+
def INPUT_TYPES(s):
|
358 |
+
return {
|
359 |
+
"required": {
|
360 |
+
"invert": ("BOOLEAN", {"default": False}),
|
361 |
+
"frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
|
362 |
+
"width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
363 |
+
"height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
364 |
+
"inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}),
|
365 |
+
"inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}),
|
366 |
+
"inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}),
|
367 |
+
"inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}),
|
368 |
+
"inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}),
|
369 |
+
},
|
370 |
+
}
|
371 |
+
#using code from https://github.com/GregTJ/stable-fluids
|
372 |
+
def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration):
|
373 |
+
from ..utility.fluid import Fluid
|
374 |
+
try:
|
375 |
+
from scipy.special import erf
|
376 |
+
except:
|
377 |
+
from scipy.spatial import erf
|
378 |
+
out = []
|
379 |
+
masks = []
|
380 |
+
RESOLUTION = width, height
|
381 |
+
DURATION = frames
|
382 |
+
|
383 |
+
INFLOW_PADDING = inflow_padding
|
384 |
+
INFLOW_DURATION = inflow_duration
|
385 |
+
INFLOW_RADIUS = inflow_radius
|
386 |
+
INFLOW_VELOCITY = inflow_velocity
|
387 |
+
INFLOW_COUNT = inflow_count
|
388 |
+
|
389 |
+
print('Generating fluid solver, this may take some time.')
|
390 |
+
fluid = Fluid(RESOLUTION, 'dye')
|
391 |
+
|
392 |
+
center = np.floor_divide(RESOLUTION, 2)
|
393 |
+
r = np.min(center) - INFLOW_PADDING
|
394 |
+
|
395 |
+
points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False)
|
396 |
+
points = tuple(np.array((np.cos(p), np.sin(p))) for p in points)
|
397 |
+
normals = tuple(-p for p in points)
|
398 |
+
points = tuple(r * p + center for p in points)
|
399 |
+
|
400 |
+
inflow_velocity = np.zeros_like(fluid.velocity)
|
401 |
+
inflow_dye = np.zeros(fluid.shape)
|
402 |
+
for p, n in zip(points, normals):
|
403 |
+
mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS
|
404 |
+
inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY
|
405 |
+
inflow_dye[mask] = 1
|
406 |
+
|
407 |
+
|
408 |
+
for f in range(DURATION):
|
409 |
+
print(f'Computing frame {f + 1} of {DURATION}.')
|
410 |
+
if f <= INFLOW_DURATION:
|
411 |
+
fluid.velocity += inflow_velocity
|
412 |
+
fluid.dye += inflow_dye
|
413 |
+
|
414 |
+
curl = fluid.step()[1]
|
415 |
+
# Using the error function to make the contrast a bit higher.
|
416 |
+
# Any other sigmoid function e.g. smoothstep would work.
|
417 |
+
curl = (erf(curl * 2) + 1) / 4
|
418 |
+
|
419 |
+
color = np.dstack((curl, np.ones(fluid.shape), fluid.dye))
|
420 |
+
color = (np.clip(color, 0, 1) * 255).astype('uint8')
|
421 |
+
image = np.array(color).astype(np.float32) / 255.0
|
422 |
+
image = torch.from_numpy(image)[None,]
|
423 |
+
mask = image[:, :, :, 0]
|
424 |
+
masks.append(mask)
|
425 |
+
out.append(image)
|
426 |
+
|
427 |
+
if invert:
|
428 |
+
return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),)
|
429 |
+
return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
|
430 |
+
|
431 |
+
class CreateAudioMask:
|
432 |
+
|
433 |
+
RETURN_TYPES = ("IMAGE",)
|
434 |
+
FUNCTION = "createaudiomask"
|
435 |
+
CATEGORY = "KJNodes/deprecated"
|
436 |
+
|
437 |
+
@classmethod
|
438 |
+
def INPUT_TYPES(s):
|
439 |
+
return {
|
440 |
+
"required": {
|
441 |
+
"invert": ("BOOLEAN", {"default": False}),
|
442 |
+
"frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}),
|
443 |
+
"scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}),
|
444 |
+
"audio_path": ("STRING", {"default": "audio.wav"}),
|
445 |
+
"width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
446 |
+
"height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
447 |
+
},
|
448 |
+
}
|
449 |
+
|
450 |
+
def createaudiomask(self, frames, width, height, invert, audio_path, scale):
|
451 |
+
try:
|
452 |
+
import librosa
|
453 |
+
except ImportError:
|
454 |
+
raise Exception("Can not import librosa. Install it with 'pip install librosa'")
|
455 |
+
batch_size = frames
|
456 |
+
out = []
|
457 |
+
masks = []
|
458 |
+
if audio_path == "audio.wav": #I don't know why relative path won't work otherwise...
|
459 |
+
audio_path = os.path.join(script_directory, audio_path)
|
460 |
+
audio, sr = librosa.load(audio_path)
|
461 |
+
spectrogram = np.abs(librosa.stft(audio))
|
462 |
+
|
463 |
+
for i in range(batch_size):
|
464 |
+
image = Image.new("RGB", (width, height), "black")
|
465 |
+
draw = ImageDraw.Draw(image)
|
466 |
+
frame = spectrogram[:, i]
|
467 |
+
circle_radius = int(height * np.mean(frame))
|
468 |
+
circle_radius *= scale
|
469 |
+
circle_center = (width // 2, height // 2) # Calculate the center of the image
|
470 |
+
|
471 |
+
draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius),
|
472 |
+
(circle_center[0] + circle_radius, circle_center[1] + circle_radius)],
|
473 |
+
fill='white')
|
474 |
+
|
475 |
+
image = np.array(image).astype(np.float32) / 255.0
|
476 |
+
image = torch.from_numpy(image)[None,]
|
477 |
+
mask = image[:, :, :, 0]
|
478 |
+
masks.append(mask)
|
479 |
+
out.append(image)
|
480 |
+
|
481 |
+
if invert:
|
482 |
+
return (1.0 - torch.cat(out, dim=0),)
|
483 |
+
return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
|
484 |
+
|
485 |
+
class CreateGradientMask:
|
486 |
+
|
487 |
+
RETURN_TYPES = ("MASK",)
|
488 |
+
FUNCTION = "createmask"
|
489 |
+
CATEGORY = "KJNodes/masking/generate"
|
490 |
+
|
491 |
+
@classmethod
|
492 |
+
def INPUT_TYPES(s):
|
493 |
+
return {
|
494 |
+
"required": {
|
495 |
+
"invert": ("BOOLEAN", {"default": False}),
|
496 |
+
"frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
|
497 |
+
"width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
498 |
+
"height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
499 |
+
},
|
500 |
+
}
|
501 |
+
def createmask(self, frames, width, height, invert):
|
502 |
+
# Define the number of images in the batch
|
503 |
+
batch_size = frames
|
504 |
+
out = []
|
505 |
+
# Create an empty array to store the image batch
|
506 |
+
image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
|
507 |
+
# Generate the black to white gradient for each image
|
508 |
+
for i in range(batch_size):
|
509 |
+
gradient = np.linspace(1.0, 0.0, width, dtype=np.float32)
|
510 |
+
time = i / frames # Calculate the time variable
|
511 |
+
offset_gradient = gradient - time # Offset the gradient values based on time
|
512 |
+
image_batch[i] = offset_gradient.reshape(1, -1)
|
513 |
+
output = torch.from_numpy(image_batch)
|
514 |
+
mask = output
|
515 |
+
out.append(mask)
|
516 |
+
if invert:
|
517 |
+
return (1.0 - torch.cat(out, dim=0),)
|
518 |
+
return (torch.cat(out, dim=0),)
|
519 |
+
|
520 |
+
class CreateFadeMask:
|
521 |
+
|
522 |
+
RETURN_TYPES = ("MASK",)
|
523 |
+
FUNCTION = "createfademask"
|
524 |
+
CATEGORY = "KJNodes/deprecated"
|
525 |
+
|
526 |
+
@classmethod
|
527 |
+
def INPUT_TYPES(s):
|
528 |
+
return {
|
529 |
+
"required": {
|
530 |
+
"invert": ("BOOLEAN", {"default": False}),
|
531 |
+
"frames": ("INT", {"default": 2,"min": 2, "max": 10000, "step": 1}),
|
532 |
+
"width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
533 |
+
"height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
|
534 |
+
"interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
|
535 |
+
"start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}),
|
536 |
+
"midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}),
|
537 |
+
"end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}),
|
538 |
+
"midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
|
539 |
+
},
|
540 |
+
}
|
541 |
+
|
542 |
+
def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame):
|
543 |
+
def ease_in(t):
|
544 |
+
return t * t
|
545 |
+
|
546 |
+
def ease_out(t):
|
547 |
+
return 1 - (1 - t) * (1 - t)
|
548 |
+
|
549 |
+
def ease_in_out(t):
|
550 |
+
return 3 * t * t - 2 * t * t * t
|
551 |
+
|
552 |
+
batch_size = frames
|
553 |
+
out = []
|
554 |
+
image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
|
555 |
+
|
556 |
+
if midpoint_frame == 0:
|
557 |
+
midpoint_frame = batch_size // 2
|
558 |
+
|
559 |
+
for i in range(batch_size):
|
560 |
+
if i <= midpoint_frame:
|
561 |
+
t = i / midpoint_frame
|
562 |
+
if interpolation == "ease_in":
|
563 |
+
t = ease_in(t)
|
564 |
+
elif interpolation == "ease_out":
|
565 |
+
t = ease_out(t)
|
566 |
+
elif interpolation == "ease_in_out":
|
567 |
+
t = ease_in_out(t)
|
568 |
+
color = start_level - t * (start_level - midpoint_level)
|
569 |
+
else:
|
570 |
+
t = (i - midpoint_frame) / (batch_size - midpoint_frame)
|
571 |
+
if interpolation == "ease_in":
|
572 |
+
t = ease_in(t)
|
573 |
+
elif interpolation == "ease_out":
|
574 |
+
t = ease_out(t)
|
575 |
+
elif interpolation == "ease_in_out":
|
576 |
+
t = ease_in_out(t)
|
577 |
+
color = midpoint_level - t * (midpoint_level - end_level)
|
578 |
+
|
579 |
+
color = np.clip(color, 0, 255)
|
580 |
+
image = np.full((height, width), color, dtype=np.float32)
|
581 |
+
image_batch[i] = image
|
582 |
+
|
583 |
+
output = torch.from_numpy(image_batch)
|
584 |
+
mask = output
|
585 |
+
out.append(mask)
|
586 |
+
|
587 |
+
if invert:
|
588 |
+
return (1.0 - torch.cat(out, dim=0),)
|
589 |
+
return (torch.cat(out, dim=0),)
|
590 |
+
|
591 |
+
class CreateFadeMaskAdvanced:
|
592 |
+
|
593 |
+
RETURN_TYPES = ("MASK",)
|
594 |
+
FUNCTION = "createfademask"
|
595 |
+
CATEGORY = "KJNodes/masking/generate"
|
596 |
+
DESCRIPTION = """
|
597 |
+
Create a batch of masks interpolated between given frames and values.
|
598 |
+
Uses same syntax as Fizz' BatchValueSchedule.
|
599 |
+
First value is the frame index (not that this starts from 0, not 1)
|
600 |
+
and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0
|
601 |
+
|
602 |
+
For example the default values:
|
603 |
+
0:(0.0)
|
604 |
+
7:(1.0)
|
605 |
+
15:(0.0)
|
606 |
+
|
607 |
+
Would create a mask batch fo 16 frames, starting from black,
|
608 |
+
interpolating with the chosen curve to fully white at the 8th frame,
|
609 |
+
and interpolating from that to fully black at the 16th frame.
|
610 |
+
"""
|
611 |
+
|
612 |
+
@classmethod
|
613 |
+
def INPUT_TYPES(s):
|
614 |
+
return {
|
615 |
+
"required": {
|
616 |
+
"points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}),
|
617 |
+
"invert": ("BOOLEAN", {"default": False}),
|
618 |
+
"frames": ("INT", {"default": 16,"min": 2, "max": 10000, "step": 1}),
|
619 |
+
"width": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}),
|
620 |
+
"height": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}),
|
621 |
+
"interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
|
622 |
+
},
|
623 |
+
}
|
624 |
+
|
625 |
+
def createfademask(self, frames, width, height, invert, points_string, interpolation):
|
626 |
+
def ease_in(t):
|
627 |
+
return t * t
|
628 |
+
|
629 |
+
def ease_out(t):
|
630 |
+
return 1 - (1 - t) * (1 - t)
|
631 |
+
|
632 |
+
def ease_in_out(t):
|
633 |
+
return 3 * t * t - 2 * t * t * t
|
634 |
+
|
635 |
+
# Parse the input string into a list of tuples
|
636 |
+
points = []
|
637 |
+
points_string = points_string.rstrip(',\n')
|
638 |
+
for point_str in points_string.split(','):
|
639 |
+
frame_str, color_str = point_str.split(':')
|
640 |
+
frame = int(frame_str.strip())
|
641 |
+
color = float(color_str.strip()[1:-1]) # Remove parentheses around color
|
642 |
+
points.append((frame, color))
|
643 |
+
|
644 |
+
# Check if the last frame is already in the points
|
645 |
+
if len(points) == 0 or points[-1][0] != frames - 1:
|
646 |
+
# If not, add it with the color of the last specified frame
|
647 |
+
points.append((frames - 1, points[-1][1] if points else 0))
|
648 |
+
|
649 |
+
# Sort the points by frame number
|
650 |
+
points.sort(key=lambda x: x[0])
|
651 |
+
|
652 |
+
batch_size = frames
|
653 |
+
out = []
|
654 |
+
image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
|
655 |
+
|
656 |
+
# Index of the next point to interpolate towards
|
657 |
+
next_point = 1
|
658 |
+
|
659 |
+
for i in range(batch_size):
|
660 |
+
while next_point < len(points) and i > points[next_point][0]:
|
661 |
+
next_point += 1
|
662 |
+
|
663 |
+
# Interpolate between the previous point and the next point
|
664 |
+
prev_point = next_point - 1
|
665 |
+
t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0])
|
666 |
+
if interpolation == "ease_in":
|
667 |
+
t = ease_in(t)
|
668 |
+
elif interpolation == "ease_out":
|
669 |
+
t = ease_out(t)
|
670 |
+
elif interpolation == "ease_in_out":
|
671 |
+
t = ease_in_out(t)
|
672 |
+
elif interpolation == "linear":
|
673 |
+
pass # No need to modify `t` for linear interpolation
|
674 |
+
|
675 |
+
color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1])
|
676 |
+
color = np.clip(color, 0, 255)
|
677 |
+
image = np.full((height, width), color, dtype=np.float32)
|
678 |
+
image_batch[i] = image
|
679 |
+
|
680 |
+
output = torch.from_numpy(image_batch)
|
681 |
+
mask = output
|
682 |
+
out.append(mask)
|
683 |
+
|
684 |
+
if invert:
|
685 |
+
return (1.0 - torch.cat(out, dim=0),)
|
686 |
+
return (torch.cat(out, dim=0),)
|
687 |
+
|
688 |
+
class CreateMagicMask:
|
689 |
+
|
690 |
+
RETURN_TYPES = ("MASK", "MASK",)
|
691 |
+
RETURN_NAMES = ("mask", "mask_inverted",)
|
692 |
+
FUNCTION = "createmagicmask"
|
693 |
+
CATEGORY = "KJNodes/masking/generate"
|
694 |
+
|
695 |
+
@classmethod
|
696 |
+
def INPUT_TYPES(s):
|
697 |
+
return {
|
698 |
+
"required": {
|
699 |
+
"frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}),
|
700 |
+
"depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}),
|
701 |
+
"distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}),
|
702 |
+
"seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}),
|
703 |
+
"transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}),
|
704 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
705 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
706 |
+
},
|
707 |
+
}
|
708 |
+
|
709 |
+
def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height):
|
710 |
+
from ..utility.magictex import coordinate_grid, random_transform, magic
|
711 |
+
import matplotlib.pyplot as plt
|
712 |
+
rng = np.random.default_rng(seed)
|
713 |
+
out = []
|
714 |
+
coords = coordinate_grid((frame_width, frame_height))
|
715 |
+
|
716 |
+
# Calculate the number of frames for each transition
|
717 |
+
frames_per_transition = frames // transitions
|
718 |
+
|
719 |
+
# Generate a base set of parameters
|
720 |
+
base_params = {
|
721 |
+
"coords": random_transform(coords, rng),
|
722 |
+
"depth": depth,
|
723 |
+
"distortion": distortion,
|
724 |
+
}
|
725 |
+
for t in range(transitions):
|
726 |
+
# Generate a second set of parameters that is at most max_diff away from the base parameters
|
727 |
+
params1 = base_params.copy()
|
728 |
+
params2 = base_params.copy()
|
729 |
+
|
730 |
+
params1['coords'] = random_transform(coords, rng)
|
731 |
+
params2['coords'] = random_transform(coords, rng)
|
732 |
+
|
733 |
+
for i in range(frames_per_transition):
|
734 |
+
# Compute the interpolation factor
|
735 |
+
alpha = i / frames_per_transition
|
736 |
+
|
737 |
+
# Interpolate between the two sets of parameters
|
738 |
+
params = params1.copy()
|
739 |
+
params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords']
|
740 |
+
|
741 |
+
tex = magic(**params)
|
742 |
+
|
743 |
+
dpi = frame_width / 10
|
744 |
+
fig = plt.figure(figsize=(10, 10), dpi=dpi)
|
745 |
+
|
746 |
+
ax = fig.add_subplot(111)
|
747 |
+
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
|
748 |
+
|
749 |
+
ax.get_yaxis().set_ticks([])
|
750 |
+
ax.get_xaxis().set_ticks([])
|
751 |
+
ax.imshow(tex, aspect='auto')
|
752 |
+
|
753 |
+
fig.canvas.draw()
|
754 |
+
img = np.array(fig.canvas.renderer._renderer)
|
755 |
+
|
756 |
+
plt.close(fig)
|
757 |
+
|
758 |
+
pil_img = Image.fromarray(img).convert("L")
|
759 |
+
mask = torch.tensor(np.array(pil_img)) / 255.0
|
760 |
+
|
761 |
+
out.append(mask)
|
762 |
+
|
763 |
+
return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
|
764 |
+
|
765 |
+
class CreateShapeMask:
|
766 |
+
|
767 |
+
RETURN_TYPES = ("MASK", "MASK",)
|
768 |
+
RETURN_NAMES = ("mask", "mask_inverted",)
|
769 |
+
FUNCTION = "createshapemask"
|
770 |
+
CATEGORY = "KJNodes/masking/generate"
|
771 |
+
DESCRIPTION = """
|
772 |
+
Creates a mask or batch of masks with the specified shape.
|
773 |
+
Locations are center locations.
|
774 |
+
Grow value is the amount to grow the shape on each frame, creating animated masks.
|
775 |
+
"""
|
776 |
+
|
777 |
+
@classmethod
|
778 |
+
def INPUT_TYPES(s):
|
779 |
+
return {
|
780 |
+
"required": {
|
781 |
+
"shape": (
|
782 |
+
[ 'circle',
|
783 |
+
'square',
|
784 |
+
'triangle',
|
785 |
+
],
|
786 |
+
{
|
787 |
+
"default": 'circle'
|
788 |
+
}),
|
789 |
+
"frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
|
790 |
+
"location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
791 |
+
"location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
792 |
+
"grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
793 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
794 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
795 |
+
"shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
796 |
+
"shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
797 |
+
},
|
798 |
+
}
|
799 |
+
|
800 |
+
def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape):
|
801 |
+
# Define the number of images in the batch
|
802 |
+
batch_size = frames
|
803 |
+
out = []
|
804 |
+
color = "white"
|
805 |
+
for i in range(batch_size):
|
806 |
+
image = Image.new("RGB", (frame_width, frame_height), "black")
|
807 |
+
draw = ImageDraw.Draw(image)
|
808 |
+
|
809 |
+
# Calculate the size for this frame and ensure it's not less than 0
|
810 |
+
current_width = max(0, shape_width + i*grow)
|
811 |
+
current_height = max(0, shape_height + i*grow)
|
812 |
+
|
813 |
+
if shape == 'circle' or shape == 'square':
|
814 |
+
# Define the bounding box for the shape
|
815 |
+
left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
|
816 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
|
817 |
+
two_points = [left_up_point, right_down_point]
|
818 |
+
|
819 |
+
if shape == 'circle':
|
820 |
+
draw.ellipse(two_points, fill=color)
|
821 |
+
elif shape == 'square':
|
822 |
+
draw.rectangle(two_points, fill=color)
|
823 |
+
|
824 |
+
elif shape == 'triangle':
|
825 |
+
# Define the points for the triangle
|
826 |
+
left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
|
827 |
+
right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
|
828 |
+
top_point = (location_x, location_y - current_height // 2) # top point
|
829 |
+
draw.polygon([top_point, left_up_point, right_down_point], fill=color)
|
830 |
+
|
831 |
+
image = pil2tensor(image)
|
832 |
+
mask = image[:, :, :, 0]
|
833 |
+
out.append(mask)
|
834 |
+
outstack = torch.cat(out, dim=0)
|
835 |
+
return (outstack, 1.0 - outstack,)
|
836 |
+
|
837 |
+
class CreateVoronoiMask:
|
838 |
+
|
839 |
+
RETURN_TYPES = ("MASK", "MASK",)
|
840 |
+
RETURN_NAMES = ("mask", "mask_inverted",)
|
841 |
+
FUNCTION = "createvoronoi"
|
842 |
+
CATEGORY = "KJNodes/masking/generate"
|
843 |
+
|
844 |
+
@classmethod
|
845 |
+
def INPUT_TYPES(s):
|
846 |
+
return {
|
847 |
+
"required": {
|
848 |
+
"frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}),
|
849 |
+
"num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}),
|
850 |
+
"line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}),
|
851 |
+
"speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}),
|
852 |
+
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
853 |
+
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
854 |
+
},
|
855 |
+
}
|
856 |
+
|
857 |
+
def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height):
|
858 |
+
from scipy.spatial import Voronoi
|
859 |
+
# Define the number of images in the batch
|
860 |
+
batch_size = frames
|
861 |
+
out = []
|
862 |
+
|
863 |
+
# Calculate aspect ratio
|
864 |
+
aspect_ratio = frame_width / frame_height
|
865 |
+
|
866 |
+
# Create start and end points for each point, considering the aspect ratio
|
867 |
+
start_points = np.random.rand(num_points, 2)
|
868 |
+
start_points[:, 0] *= aspect_ratio
|
869 |
+
|
870 |
+
end_points = np.random.rand(num_points, 2)
|
871 |
+
end_points[:, 0] *= aspect_ratio
|
872 |
+
|
873 |
+
for i in range(batch_size):
|
874 |
+
# Interpolate the points' positions based on the current frame
|
875 |
+
t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames
|
876 |
+
t = np.clip(t, 0, 1) # ensure t is in [0, 1]
|
877 |
+
points = (1 - t) * start_points + t * end_points # lerp
|
878 |
+
|
879 |
+
# Adjust points for aspect ratio
|
880 |
+
points[:, 0] *= aspect_ratio
|
881 |
+
|
882 |
+
vor = Voronoi(points)
|
883 |
+
|
884 |
+
# Create a blank image with a white background
|
885 |
+
fig, ax = plt.subplots()
|
886 |
+
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
|
887 |
+
ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits
|
888 |
+
ax.axis('off')
|
889 |
+
ax.margins(0, 0)
|
890 |
+
fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size
|
891 |
+
ax.fill_between([0, 1], [0, 1], color='white')
|
892 |
+
|
893 |
+
# Plot each Voronoi ridge
|
894 |
+
for simplex in vor.ridge_vertices:
|
895 |
+
simplex = np.asarray(simplex)
|
896 |
+
if np.all(simplex >= 0):
|
897 |
+
plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width)
|
898 |
+
|
899 |
+
fig.canvas.draw()
|
900 |
+
img = np.array(fig.canvas.renderer._renderer)
|
901 |
+
|
902 |
+
plt.close(fig)
|
903 |
+
|
904 |
+
pil_img = Image.fromarray(img).convert("L")
|
905 |
+
mask = torch.tensor(np.array(pil_img)) / 255.0
|
906 |
+
|
907 |
+
out.append(mask)
|
908 |
+
|
909 |
+
return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
|
910 |
+
|
911 |
+
class GetMaskSizeAndCount:
|
912 |
+
@classmethod
|
913 |
+
def INPUT_TYPES(s):
|
914 |
+
return {"required": {
|
915 |
+
"mask": ("MASK",),
|
916 |
+
}}
|
917 |
+
|
918 |
+
RETURN_TYPES = ("MASK","INT", "INT", "INT",)
|
919 |
+
RETURN_NAMES = ("mask", "width", "height", "count",)
|
920 |
+
FUNCTION = "getsize"
|
921 |
+
CATEGORY = "KJNodes/masking"
|
922 |
+
DESCRIPTION = """
|
923 |
+
Returns the width, height and batch size of the mask,
|
924 |
+
and passes it through unchanged.
|
925 |
+
|
926 |
+
"""
|
927 |
+
|
928 |
+
def getsize(self, mask):
|
929 |
+
width = mask.shape[2]
|
930 |
+
height = mask.shape[1]
|
931 |
+
count = mask.shape[0]
|
932 |
+
return {"ui": {
|
933 |
+
"text": [f"{count}x{width}x{height}"]},
|
934 |
+
"result": (mask, width, height, count)
|
935 |
+
}
|
936 |
+
|
937 |
+
class GrowMaskWithBlur:
|
938 |
+
@classmethod
|
939 |
+
def INPUT_TYPES(cls):
|
940 |
+
return {
|
941 |
+
"required": {
|
942 |
+
"mask": ("MASK",),
|
943 |
+
"expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
|
944 |
+
"incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
|
945 |
+
"tapered_corners": ("BOOLEAN", {"default": True}),
|
946 |
+
"flip_input": ("BOOLEAN", {"default": False}),
|
947 |
+
"blur_radius": ("FLOAT", {
|
948 |
+
"default": 0.0,
|
949 |
+
"min": 0.0,
|
950 |
+
"max": 100,
|
951 |
+
"step": 0.1
|
952 |
+
}),
|
953 |
+
"lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
954 |
+
"decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
955 |
+
},
|
956 |
+
"optional": {
|
957 |
+
"fill_holes": ("BOOLEAN", {"default": False}),
|
958 |
+
},
|
959 |
+
}
|
960 |
+
|
961 |
+
CATEGORY = "KJNodes/masking"
|
962 |
+
RETURN_TYPES = ("MASK", "MASK",)
|
963 |
+
RETURN_NAMES = ("mask", "mask_inverted",)
|
964 |
+
FUNCTION = "expand_mask"
|
965 |
+
DESCRIPTION = """
|
966 |
+
# GrowMaskWithBlur
|
967 |
+
- mask: Input mask or mask batch
|
968 |
+
- expand: Expand or contract mask or mask batch by a given amount
|
969 |
+
- incremental_expandrate: increase expand rate by a given amount per frame
|
970 |
+
- tapered_corners: use tapered corners
|
971 |
+
- flip_input: flip input mask
|
972 |
+
- blur_radius: value higher than 0 will blur the mask
|
973 |
+
- lerp_alpha: alpha value for interpolation between frames
|
974 |
+
- decay_factor: decay value for interpolation between frames
|
975 |
+
- fill_holes: fill holes in the mask (slow)"""
|
976 |
+
|
977 |
+
def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False):
|
978 |
+
alpha = lerp_alpha
|
979 |
+
decay = decay_factor
|
980 |
+
if flip_input:
|
981 |
+
mask = 1.0 - mask
|
982 |
+
c = 0 if tapered_corners else 1
|
983 |
+
kernel = np.array([[c, 1, c],
|
984 |
+
[1, 1, 1],
|
985 |
+
[c, 1, c]])
|
986 |
+
growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu()
|
987 |
+
out = []
|
988 |
+
previous_output = None
|
989 |
+
current_expand = expand
|
990 |
+
for m in growmask:
|
991 |
+
output = m.numpy().astype(np.float32)
|
992 |
+
for _ in range(abs(round(current_expand))):
|
993 |
+
if current_expand < 0:
|
994 |
+
output = scipy.ndimage.grey_erosion(output, footprint=kernel)
|
995 |
+
else:
|
996 |
+
output = scipy.ndimage.grey_dilation(output, footprint=kernel)
|
997 |
+
if current_expand < 0:
|
998 |
+
current_expand -= abs(incremental_expandrate)
|
999 |
+
else:
|
1000 |
+
current_expand += abs(incremental_expandrate)
|
1001 |
+
if fill_holes:
|
1002 |
+
binary_mask = output > 0
|
1003 |
+
output = scipy.ndimage.binary_fill_holes(binary_mask)
|
1004 |
+
output = output.astype(np.float32) * 255
|
1005 |
+
output = torch.from_numpy(output)
|
1006 |
+
if alpha < 1.0 and previous_output is not None:
|
1007 |
+
# Interpolate between the previous and current frame
|
1008 |
+
output = alpha * output + (1 - alpha) * previous_output
|
1009 |
+
if decay < 1.0 and previous_output is not None:
|
1010 |
+
# Add the decayed previous output to the current frame
|
1011 |
+
output += decay * previous_output
|
1012 |
+
output = output / output.max()
|
1013 |
+
previous_output = output
|
1014 |
+
out.append(output)
|
1015 |
+
|
1016 |
+
if blur_radius != 0:
|
1017 |
+
# Convert the tensor list to PIL images, apply blur, and convert back
|
1018 |
+
for idx, tensor in enumerate(out):
|
1019 |
+
# Convert tensor to PIL image
|
1020 |
+
pil_image = tensor2pil(tensor.cpu().detach())[0]
|
1021 |
+
# Apply Gaussian blur
|
1022 |
+
pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius))
|
1023 |
+
# Convert back to tensor
|
1024 |
+
out[idx] = pil2tensor(pil_image)
|
1025 |
+
blurred = torch.cat(out, dim=0)
|
1026 |
+
return (blurred, 1.0 - blurred)
|
1027 |
+
else:
|
1028 |
+
return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
|
1029 |
+
|
1030 |
+
class MaskBatchMulti:
|
1031 |
+
@classmethod
|
1032 |
+
def INPUT_TYPES(s):
|
1033 |
+
return {
|
1034 |
+
"required": {
|
1035 |
+
"inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
|
1036 |
+
"mask_1": ("MASK", ),
|
1037 |
+
"mask_2": ("MASK", ),
|
1038 |
+
},
|
1039 |
+
}
|
1040 |
+
|
1041 |
+
RETURN_TYPES = ("MASK",)
|
1042 |
+
RETURN_NAMES = ("masks",)
|
1043 |
+
FUNCTION = "combine"
|
1044 |
+
CATEGORY = "KJNodes/masking"
|
1045 |
+
DESCRIPTION = """
|
1046 |
+
Creates an image batch from multiple masks.
|
1047 |
+
You can set how many inputs the node has,
|
1048 |
+
with the **inputcount** and clicking update.
|
1049 |
+
"""
|
1050 |
+
|
1051 |
+
def combine(self, inputcount, **kwargs):
|
1052 |
+
mask = kwargs["mask_1"]
|
1053 |
+
for c in range(1, inputcount):
|
1054 |
+
new_mask = kwargs[f"mask_{c + 1}"]
|
1055 |
+
if mask.shape[1:] != new_mask.shape[1:]:
|
1056 |
+
new_mask = F.interpolate(new_mask.unsqueeze(1), size=(mask.shape[1], mask.shape[2]), mode="bicubic").squeeze(1)
|
1057 |
+
mask = torch.cat((mask, new_mask), dim=0)
|
1058 |
+
return (mask,)
|
1059 |
+
|
1060 |
+
class OffsetMask:
|
1061 |
+
@classmethod
|
1062 |
+
def INPUT_TYPES(s):
|
1063 |
+
return {
|
1064 |
+
"required": {
|
1065 |
+
"mask": ("MASK",),
|
1066 |
+
"x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
1067 |
+
"y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
1068 |
+
"angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }),
|
1069 |
+
"duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }),
|
1070 |
+
"roll": ("BOOLEAN", { "default": False }),
|
1071 |
+
"incremental": ("BOOLEAN", { "default": False }),
|
1072 |
+
"padding_mode": (
|
1073 |
+
[
|
1074 |
+
'empty',
|
1075 |
+
'border',
|
1076 |
+
'reflection',
|
1077 |
+
|
1078 |
+
], {
|
1079 |
+
"default": 'empty'
|
1080 |
+
}),
|
1081 |
+
}
|
1082 |
+
}
|
1083 |
+
|
1084 |
+
RETURN_TYPES = ("MASK",)
|
1085 |
+
RETURN_NAMES = ("mask",)
|
1086 |
+
FUNCTION = "offset"
|
1087 |
+
CATEGORY = "KJNodes/masking"
|
1088 |
+
DESCRIPTION = """
|
1089 |
+
Offsets the mask by the specified amount.
|
1090 |
+
- mask: Input mask or mask batch
|
1091 |
+
- x: Horizontal offset
|
1092 |
+
- y: Vertical offset
|
1093 |
+
- angle: Angle in degrees
|
1094 |
+
- roll: roll edge wrapping
|
1095 |
+
- duplication_factor: Number of times to duplicate the mask to form a batch
|
1096 |
+
- border padding_mode: Padding mode for the mask
|
1097 |
+
"""
|
1098 |
+
|
1099 |
+
def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"):
|
1100 |
+
# Create duplicates of the mask batch
|
1101 |
+
mask = mask.repeat(duplication_factor, 1, 1).clone()
|
1102 |
+
|
1103 |
+
batch_size, height, width = mask.shape
|
1104 |
+
|
1105 |
+
if angle != 0 and incremental:
|
1106 |
+
for i in range(batch_size):
|
1107 |
+
rotation_angle = angle * (i+1)
|
1108 |
+
mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0)
|
1109 |
+
elif angle > 0:
|
1110 |
+
for i in range(batch_size):
|
1111 |
+
mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0)
|
1112 |
+
|
1113 |
+
if roll:
|
1114 |
+
if incremental:
|
1115 |
+
for i in range(batch_size):
|
1116 |
+
shift_x = min(x*(i+1), width-1)
|
1117 |
+
shift_y = min(y*(i+1), height-1)
|
1118 |
+
if shift_x != 0:
|
1119 |
+
mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1)
|
1120 |
+
if shift_y != 0:
|
1121 |
+
mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0)
|
1122 |
+
else:
|
1123 |
+
shift_x = min(x, width-1)
|
1124 |
+
shift_y = min(y, height-1)
|
1125 |
+
if shift_x != 0:
|
1126 |
+
mask = torch.roll(mask, shifts=shift_x, dims=2)
|
1127 |
+
if shift_y != 0:
|
1128 |
+
mask = torch.roll(mask, shifts=shift_y, dims=1)
|
1129 |
+
else:
|
1130 |
+
|
1131 |
+
for i in range(batch_size):
|
1132 |
+
if incremental:
|
1133 |
+
temp_x = min(x * (i+1), width-1)
|
1134 |
+
temp_y = min(y * (i+1), height-1)
|
1135 |
+
else:
|
1136 |
+
temp_x = min(x, width-1)
|
1137 |
+
temp_y = min(y, height-1)
|
1138 |
+
if temp_x > 0:
|
1139 |
+
if padding_mode == 'empty':
|
1140 |
+
mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1)
|
1141 |
+
elif padding_mode in ['replicate', 'reflect']:
|
1142 |
+
mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode)
|
1143 |
+
elif temp_x < 0:
|
1144 |
+
if padding_mode == 'empty':
|
1145 |
+
mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1)
|
1146 |
+
elif padding_mode in ['replicate', 'reflect']:
|
1147 |
+
mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode)
|
1148 |
+
|
1149 |
+
if temp_y > 0:
|
1150 |
+
if padding_mode == 'empty':
|
1151 |
+
mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0)
|
1152 |
+
elif padding_mode in ['replicate', 'reflect']:
|
1153 |
+
mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode)
|
1154 |
+
elif temp_y < 0:
|
1155 |
+
if padding_mode == 'empty':
|
1156 |
+
mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0)
|
1157 |
+
elif padding_mode in ['replicate', 'reflect']:
|
1158 |
+
mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode)
|
1159 |
+
|
1160 |
+
return mask,
|
1161 |
+
|
1162 |
+
class RoundMask:
|
1163 |
+
@classmethod
|
1164 |
+
def INPUT_TYPES(s):
|
1165 |
+
return {"required": {
|
1166 |
+
"mask": ("MASK",),
|
1167 |
+
}}
|
1168 |
+
|
1169 |
+
RETURN_TYPES = ("MASK",)
|
1170 |
+
FUNCTION = "round"
|
1171 |
+
CATEGORY = "KJNodes/masking"
|
1172 |
+
DESCRIPTION = """
|
1173 |
+
Rounds the mask or batch of masks to a binary mask.
|
1174 |
+
<img src="https://github.com/kijai/ComfyUI-KJNodes/assets/40791699/52c85202-f74e-4b96-9dac-c8bda5ddcc40" width="300" height="250" alt="RoundMask example">
|
1175 |
+
|
1176 |
+
"""
|
1177 |
+
|
1178 |
+
def round(self, mask):
|
1179 |
+
mask = mask.round()
|
1180 |
+
return (mask,)
|
1181 |
+
|
1182 |
+
class ResizeMask:
|
1183 |
+
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
1184 |
+
@classmethod
|
1185 |
+
def INPUT_TYPES(s):
|
1186 |
+
return {
|
1187 |
+
"required": {
|
1188 |
+
"mask": ("MASK",),
|
1189 |
+
"width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
1190 |
+
"height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
|
1191 |
+
"keep_proportions": ("BOOLEAN", { "default": False }),
|
1192 |
+
"upscale_method": (s.upscale_methods,),
|
1193 |
+
"crop": (["disabled","center"],),
|
1194 |
+
}
|
1195 |
+
}
|
1196 |
+
|
1197 |
+
RETURN_TYPES = ("MASK", "INT", "INT",)
|
1198 |
+
RETURN_NAMES = ("mask", "width", "height",)
|
1199 |
+
FUNCTION = "resize"
|
1200 |
+
CATEGORY = "KJNodes/masking"
|
1201 |
+
DESCRIPTION = """
|
1202 |
+
Resizes the mask or batch of masks to the specified width and height.
|
1203 |
+
"""
|
1204 |
+
|
1205 |
+
def resize(self, mask, width, height, keep_proportions, upscale_method,crop):
|
1206 |
+
if keep_proportions:
|
1207 |
+
_, oh, ow = mask.shape
|
1208 |
+
width = ow if width == 0 else width
|
1209 |
+
height = oh if height == 0 else height
|
1210 |
+
ratio = min(width / ow, height / oh)
|
1211 |
+
width = round(ow*ratio)
|
1212 |
+
height = round(oh*ratio)
|
1213 |
+
outputs = mask.unsqueeze(1)
|
1214 |
+
outputs = common_upscale(outputs, width, height, upscale_method, crop)
|
1215 |
+
outputs = outputs.squeeze(1)
|
1216 |
+
|
1217 |
+
return(outputs, outputs.shape[2], outputs.shape[1],)
|
1218 |
+
|
1219 |
+
class RemapMaskRange:
|
1220 |
+
@classmethod
|
1221 |
+
def INPUT_TYPES(s):
|
1222 |
+
return {
|
1223 |
+
"required": {
|
1224 |
+
"mask": ("MASK",),
|
1225 |
+
"min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}),
|
1226 |
+
"max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}),
|
1227 |
+
}
|
1228 |
+
}
|
1229 |
+
|
1230 |
+
RETURN_TYPES = ("MASK",)
|
1231 |
+
RETURN_NAMES = ("mask",)
|
1232 |
+
FUNCTION = "remap"
|
1233 |
+
CATEGORY = "KJNodes/masking"
|
1234 |
+
DESCRIPTION = """
|
1235 |
+
Sets new min and max values for the mask.
|
1236 |
+
"""
|
1237 |
+
|
1238 |
+
def remap(self, mask, min, max):
|
1239 |
+
|
1240 |
+
# Find the maximum value in the mask
|
1241 |
+
mask_max = torch.max(mask)
|
1242 |
+
|
1243 |
+
# If the maximum mask value is zero, avoid division by zero by setting it to 1
|
1244 |
+
mask_max = mask_max if mask_max > 0 else 1
|
1245 |
+
|
1246 |
+
# Scale the mask values to the new range defined by min and max
|
1247 |
+
# The highest pixel value in the mask will be scaled to max
|
1248 |
+
scaled_mask = (mask / mask_max) * (max - min) + min
|
1249 |
+
|
1250 |
+
# Clamp the values to ensure they are within [0.0, 1.0]
|
1251 |
+
scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0)
|
1252 |
+
|
1253 |
+
return (scaled_mask, )
|
1254 |
+
|
1255 |
+
|
1256 |
+
def get_mask_polygon(self, mask_np):
|
1257 |
+
import cv2
|
1258 |
+
"""Helper function to get polygon points from mask"""
|
1259 |
+
# Find contours
|
1260 |
+
contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
1261 |
+
|
1262 |
+
if not contours:
|
1263 |
+
return None
|
1264 |
+
|
1265 |
+
# Get the largest contour
|
1266 |
+
largest_contour = max(contours, key=cv2.contourArea)
|
1267 |
+
|
1268 |
+
# Approximate polygon
|
1269 |
+
epsilon = 0.02 * cv2.arcLength(largest_contour, True)
|
1270 |
+
polygon = cv2.approxPolyDP(largest_contour, epsilon, True)
|
1271 |
+
|
1272 |
+
return polygon.squeeze()
|
1273 |
+
|
1274 |
+
import cv2
|
1275 |
+
class SeparateMasks:
|
1276 |
+
@classmethod
|
1277 |
+
def INPUT_TYPES(cls):
|
1278 |
+
return {
|
1279 |
+
"required": {
|
1280 |
+
"mask": ("MASK", ),
|
1281 |
+
"size_threshold_width" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}),
|
1282 |
+
"size_threshold_height" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}),
|
1283 |
+
"mode": (["convex_polygons", "area"],),
|
1284 |
+
"max_poly_points": ("INT", {"default": 8, "min": 3, "max": 32, "step": 1}),
|
1285 |
+
|
1286 |
+
},
|
1287 |
+
}
|
1288 |
+
|
1289 |
+
RETURN_TYPES = ("MASK",)
|
1290 |
+
RETURN_NAMES = ("mask",)
|
1291 |
+
FUNCTION = "separate"
|
1292 |
+
CATEGORY = "KJNodes/masking"
|
1293 |
+
OUTPUT_NODE = True
|
1294 |
+
DESCRIPTION = "Separates a mask into multiple masks based on the size of the connected components."
|
1295 |
+
|
1296 |
+
def polygon_to_mask(self, polygon, shape):
|
1297 |
+
mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) # Fixed shape handling
|
1298 |
+
|
1299 |
+
if len(polygon.shape) == 2: # Check if polygon points are valid
|
1300 |
+
polygon = polygon.astype(np.int32)
|
1301 |
+
cv2.fillPoly(mask, [polygon], 1)
|
1302 |
+
return mask
|
1303 |
+
|
1304 |
+
def get_mask_polygon(self, mask_np, max_points):
|
1305 |
+
contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
1306 |
+
if not contours:
|
1307 |
+
return None
|
1308 |
+
|
1309 |
+
largest_contour = max(contours, key=cv2.contourArea)
|
1310 |
+
hull = cv2.convexHull(largest_contour)
|
1311 |
+
|
1312 |
+
# Initialize with smaller epsilon for more points
|
1313 |
+
perimeter = cv2.arcLength(hull, True)
|
1314 |
+
epsilon = perimeter * 0.01 # Start smaller
|
1315 |
+
|
1316 |
+
min_eps = perimeter * 0.001 # Much smaller minimum
|
1317 |
+
max_eps = perimeter * 0.2 # Smaller maximum
|
1318 |
+
|
1319 |
+
best_approx = None
|
1320 |
+
best_diff = float('inf')
|
1321 |
+
max_iterations = 20
|
1322 |
+
|
1323 |
+
#print(f"Target points: {max_points}, Perimeter: {perimeter}")
|
1324 |
+
|
1325 |
+
for i in range(max_iterations):
|
1326 |
+
curr_eps = (min_eps + max_eps) / 2
|
1327 |
+
approx = cv2.approxPolyDP(hull, curr_eps, True)
|
1328 |
+
points_diff = len(approx) - max_points
|
1329 |
+
|
1330 |
+
#print(f"Iteration {i}: points={len(approx)}, eps={curr_eps:.4f}")
|
1331 |
+
|
1332 |
+
if abs(points_diff) < best_diff:
|
1333 |
+
best_approx = approx
|
1334 |
+
best_diff = abs(points_diff)
|
1335 |
+
|
1336 |
+
if len(approx) > max_points:
|
1337 |
+
min_eps = curr_eps * 1.1 # More gradual adjustment
|
1338 |
+
elif len(approx) < max_points:
|
1339 |
+
max_eps = curr_eps * 0.9 # More gradual adjustment
|
1340 |
+
else:
|
1341 |
+
return approx.squeeze()
|
1342 |
+
|
1343 |
+
if abs(max_eps - min_eps) < perimeter * 0.0001: # Relative tolerance
|
1344 |
+
break
|
1345 |
+
|
1346 |
+
# If we didn't find exact match, return best approximation
|
1347 |
+
return best_approx.squeeze() if best_approx is not None else hull.squeeze()
|
1348 |
+
|
1349 |
+
def separate(self, mask: torch.Tensor, size_threshold_width: int, size_threshold_height: int, max_poly_points: int, mode: str):
|
1350 |
+
from scipy.ndimage import label, center_of_mass
|
1351 |
+
import numpy as np
|
1352 |
+
|
1353 |
+
B, H, W = mask.shape
|
1354 |
+
separated = []
|
1355 |
+
|
1356 |
+
mask = mask.round()
|
1357 |
+
|
1358 |
+
for b in range(B):
|
1359 |
+
mask_np = mask[b].cpu().numpy().astype(np.uint8)
|
1360 |
+
structure = np.ones((3, 3), dtype=np.int8)
|
1361 |
+
labeled, ncomponents = label(mask_np, structure=structure)
|
1362 |
+
pbar = ProgressBar(ncomponents)
|
1363 |
+
|
1364 |
+
for component in range(1, ncomponents + 1):
|
1365 |
+
component_mask_np = (labeled == component).astype(np.uint8)
|
1366 |
+
|
1367 |
+
rows = np.any(component_mask_np, axis=1)
|
1368 |
+
cols = np.any(component_mask_np, axis=0)
|
1369 |
+
y_min, y_max = np.where(rows)[0][[0, -1]]
|
1370 |
+
x_min, x_max = np.where(cols)[0][[0, -1]]
|
1371 |
+
|
1372 |
+
width = x_max - x_min + 1
|
1373 |
+
height = y_max - y_min + 1
|
1374 |
+
centroid_x = (x_min + x_max) / 2 # Calculate x centroid
|
1375 |
+
print(f"Component {component}: width={width}, height={height}, x_pos={centroid_x}")
|
1376 |
+
|
1377 |
+
if width >= size_threshold_width and height >= size_threshold_height:
|
1378 |
+
if mode != "area":
|
1379 |
+
polygon = self.get_mask_polygon(component_mask_np, max_poly_points)
|
1380 |
+
if polygon is not None:
|
1381 |
+
poly_mask = self.polygon_to_mask(polygon, (H, W))
|
1382 |
+
poly_mask = torch.tensor(poly_mask, device=mask.device)
|
1383 |
+
separated.append((centroid_x, poly_mask))
|
1384 |
+
else:
|
1385 |
+
area_mask = torch.tensor(component_mask_np, device=mask.device)
|
1386 |
+
separated.append((centroid_x, area_mask))
|
1387 |
+
pbar.update(1)
|
1388 |
+
|
1389 |
+
if len(separated) > 0:
|
1390 |
+
# Sort by x position and extract only the masks
|
1391 |
+
separated.sort(key=lambda x: x[0])
|
1392 |
+
separated = [x[1] for x in separated]
|
1393 |
+
out_masks = torch.stack(separated, dim=0)
|
1394 |
+
return out_masks,
|
1395 |
+
else:
|
1396 |
+
return torch.empty((1, 64, 64), device=mask.device),
|
1397 |
+
|
custom_nodes/ComfyUI-KJNodes-main/nodes/model_optimization_nodes.py
ADDED
@@ -0,0 +1,1179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from comfy.ldm.modules import attention as comfy_attention
|
2 |
+
import logging
|
3 |
+
import comfy.model_patcher
|
4 |
+
import comfy.utils
|
5 |
+
import comfy.sd
|
6 |
+
import torch
|
7 |
+
import folder_paths
|
8 |
+
import comfy.model_management as mm
|
9 |
+
from comfy.cli_args import args
|
10 |
+
|
11 |
+
orig_attention = comfy_attention.optimized_attention
|
12 |
+
original_patch_model = comfy.model_patcher.ModelPatcher.patch_model
|
13 |
+
original_load_lora_for_models = comfy.sd.load_lora_for_models
|
14 |
+
|
15 |
+
class BaseLoaderKJ:
|
16 |
+
original_linear = None
|
17 |
+
cublas_patched = False
|
18 |
+
|
19 |
+
def _patch_modules(self, patch_cublaslinear, sage_attention):
|
20 |
+
from comfy.ops import disable_weight_init, CastWeightBiasOp, cast_bias_weight
|
21 |
+
|
22 |
+
if sage_attention != "disabled":
|
23 |
+
print("Patching comfy attention to use sageattn")
|
24 |
+
from sageattention import sageattn
|
25 |
+
def set_sage_func(sage_attention):
|
26 |
+
if sage_attention == "auto":
|
27 |
+
def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"):
|
28 |
+
return sageattn(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout)
|
29 |
+
return func
|
30 |
+
elif sage_attention == "sageattn_qk_int8_pv_fp16_cuda":
|
31 |
+
from sageattention import sageattn_qk_int8_pv_fp16_cuda
|
32 |
+
def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"):
|
33 |
+
return sageattn_qk_int8_pv_fp16_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32", tensor_layout=tensor_layout)
|
34 |
+
return func
|
35 |
+
elif sage_attention == "sageattn_qk_int8_pv_fp16_triton":
|
36 |
+
from sageattention import sageattn_qk_int8_pv_fp16_triton
|
37 |
+
def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"):
|
38 |
+
return sageattn_qk_int8_pv_fp16_triton(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout)
|
39 |
+
return func
|
40 |
+
elif sage_attention == "sageattn_qk_int8_pv_fp8_cuda":
|
41 |
+
from sageattention import sageattn_qk_int8_pv_fp8_cuda
|
42 |
+
def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"):
|
43 |
+
return sageattn_qk_int8_pv_fp8_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32+fp32", tensor_layout=tensor_layout)
|
44 |
+
return func
|
45 |
+
|
46 |
+
sage_func = set_sage_func(sage_attention)
|
47 |
+
|
48 |
+
@torch.compiler.disable()
|
49 |
+
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
50 |
+
if skip_reshape:
|
51 |
+
b, _, _, dim_head = q.shape
|
52 |
+
tensor_layout="HND"
|
53 |
+
else:
|
54 |
+
b, _, dim_head = q.shape
|
55 |
+
dim_head //= heads
|
56 |
+
q, k, v = map(
|
57 |
+
lambda t: t.view(b, -1, heads, dim_head),
|
58 |
+
(q, k, v),
|
59 |
+
)
|
60 |
+
tensor_layout="NHD"
|
61 |
+
if mask is not None:
|
62 |
+
# add a batch dimension if there isn't already one
|
63 |
+
if mask.ndim == 2:
|
64 |
+
mask = mask.unsqueeze(0)
|
65 |
+
# add a heads dimension if there isn't already one
|
66 |
+
if mask.ndim == 3:
|
67 |
+
mask = mask.unsqueeze(1)
|
68 |
+
out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout)
|
69 |
+
if tensor_layout == "HND":
|
70 |
+
if not skip_output_reshape:
|
71 |
+
out = (
|
72 |
+
out.transpose(1, 2).reshape(b, -1, heads * dim_head)
|
73 |
+
)
|
74 |
+
else:
|
75 |
+
if skip_output_reshape:
|
76 |
+
out = out.transpose(1, 2)
|
77 |
+
else:
|
78 |
+
out = out.reshape(b, -1, heads * dim_head)
|
79 |
+
return out
|
80 |
+
|
81 |
+
comfy_attention.optimized_attention = attention_sage
|
82 |
+
comfy.ldm.hunyuan_video.model.optimized_attention = attention_sage
|
83 |
+
comfy.ldm.flux.math.optimized_attention = attention_sage
|
84 |
+
comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = attention_sage
|
85 |
+
comfy.ldm.cosmos.blocks.optimized_attention = attention_sage
|
86 |
+
comfy.ldm.wan.model.optimized_attention = attention_sage
|
87 |
+
|
88 |
+
else:
|
89 |
+
comfy_attention.optimized_attention = orig_attention
|
90 |
+
comfy.ldm.hunyuan_video.model.optimized_attention = orig_attention
|
91 |
+
comfy.ldm.flux.math.optimized_attention = orig_attention
|
92 |
+
comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = orig_attention
|
93 |
+
comfy.ldm.cosmos.blocks.optimized_attention = orig_attention
|
94 |
+
comfy.ldm.wan.model.optimized_attention = orig_attention
|
95 |
+
|
96 |
+
if patch_cublaslinear:
|
97 |
+
if not BaseLoaderKJ.cublas_patched:
|
98 |
+
BaseLoaderKJ.original_linear = disable_weight_init.Linear
|
99 |
+
try:
|
100 |
+
from cublas_ops import CublasLinear
|
101 |
+
except ImportError:
|
102 |
+
raise Exception("Can't import 'torch-cublas-hgemm', install it from here https://github.com/aredden/torch-cublas-hgemm")
|
103 |
+
|
104 |
+
class PatchedLinear(CublasLinear, CastWeightBiasOp):
|
105 |
+
def reset_parameters(self):
|
106 |
+
pass
|
107 |
+
|
108 |
+
def forward_comfy_cast_weights(self, input):
|
109 |
+
weight, bias = cast_bias_weight(self, input)
|
110 |
+
return torch.nn.functional.linear(input, weight, bias)
|
111 |
+
|
112 |
+
def forward(self, *args, **kwargs):
|
113 |
+
if self.comfy_cast_weights:
|
114 |
+
return self.forward_comfy_cast_weights(*args, **kwargs)
|
115 |
+
else:
|
116 |
+
return super().forward(*args, **kwargs)
|
117 |
+
|
118 |
+
disable_weight_init.Linear = PatchedLinear
|
119 |
+
BaseLoaderKJ.cublas_patched = True
|
120 |
+
else:
|
121 |
+
if BaseLoaderKJ.cublas_patched:
|
122 |
+
disable_weight_init.Linear = BaseLoaderKJ.original_linear
|
123 |
+
BaseLoaderKJ.cublas_patched = False
|
124 |
+
|
125 |
+
class PathchSageAttentionKJ(BaseLoaderKJ):
|
126 |
+
@classmethod
|
127 |
+
def INPUT_TYPES(s):
|
128 |
+
return {"required": {
|
129 |
+
"model": ("MODEL",),
|
130 |
+
"sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Global patch comfy attention to use sageattn, once patched to revert back to normal you would need to run this node again with disabled option."}),
|
131 |
+
}}
|
132 |
+
|
133 |
+
RETURN_TYPES = ("MODEL", )
|
134 |
+
FUNCTION = "patch"
|
135 |
+
DESCRIPTION = "Experimental node for patching attention mode. This doesn't use the model patching system and thus can't be disabled without running the node again with 'disabled' option."
|
136 |
+
EXPERIMENTAL = True
|
137 |
+
CATEGORY = "KJNodes/experimental"
|
138 |
+
|
139 |
+
def patch(self, model, sage_attention):
|
140 |
+
self._patch_modules(False, sage_attention)
|
141 |
+
return model,
|
142 |
+
|
143 |
+
class CheckpointLoaderKJ(BaseLoaderKJ):
|
144 |
+
@classmethod
|
145 |
+
def INPUT_TYPES(s):
|
146 |
+
return {"required": {
|
147 |
+
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
|
148 |
+
"patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}),
|
149 |
+
"sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Patch comfy attention to use sageattn."}),
|
150 |
+
}}
|
151 |
+
|
152 |
+
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
153 |
+
FUNCTION = "patch"
|
154 |
+
OUTPUT_NODE = True
|
155 |
+
DESCRIPTION = "Experimental node for patching torch.nn.Linear with CublasLinear."
|
156 |
+
EXPERIMENTAL = True
|
157 |
+
CATEGORY = "KJNodes/experimental"
|
158 |
+
|
159 |
+
def patch(self, ckpt_name, patch_cublaslinear, sage_attention):
|
160 |
+
self._patch_modules(patch_cublaslinear, sage_attention)
|
161 |
+
from nodes import CheckpointLoaderSimple
|
162 |
+
model, clip, vae = CheckpointLoaderSimple.load_checkpoint(self, ckpt_name)
|
163 |
+
return model, clip, vae
|
164 |
+
|
165 |
+
class DiffusionModelLoaderKJ(BaseLoaderKJ):
|
166 |
+
@classmethod
|
167 |
+
def INPUT_TYPES(s):
|
168 |
+
return {"required": {
|
169 |
+
"model_name": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "The name of the checkpoint (model) to load."}),
|
170 |
+
"weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"],),
|
171 |
+
"compute_dtype": (["default", "fp16", "bf16", "fp32"], {"default": "fp16", "tooltip": "The compute dtype to use for the model."}),
|
172 |
+
"patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}),
|
173 |
+
"sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Patch comfy attention to use sageattn."}),
|
174 |
+
"enable_fp16_accumulation": ("BOOLEAN", {"default": False, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}),
|
175 |
+
}}
|
176 |
+
|
177 |
+
RETURN_TYPES = ("MODEL",)
|
178 |
+
FUNCTION = "patch_and_load"
|
179 |
+
OUTPUT_NODE = True
|
180 |
+
DESCRIPTION = "Node for patching torch.nn.Linear with CublasLinear."
|
181 |
+
EXPERIMENTAL = True
|
182 |
+
CATEGORY = "KJNodes/experimental"
|
183 |
+
|
184 |
+
def patch_and_load(self, model_name, weight_dtype, compute_dtype, patch_cublaslinear, sage_attention, enable_fp16_accumulation):
|
185 |
+
DTYPE_MAP = {
|
186 |
+
"fp8_e4m3fn": torch.float8_e4m3fn,
|
187 |
+
"fp8_e5m2": torch.float8_e5m2,
|
188 |
+
"fp16": torch.float16,
|
189 |
+
"bf16": torch.bfloat16,
|
190 |
+
"fp32": torch.float32
|
191 |
+
}
|
192 |
+
model_options = {}
|
193 |
+
if dtype := DTYPE_MAP.get(weight_dtype):
|
194 |
+
model_options["dtype"] = dtype
|
195 |
+
print(f"Setting {model_name} weight dtype to {dtype}")
|
196 |
+
|
197 |
+
if weight_dtype == "fp8_e4m3fn_fast":
|
198 |
+
model_options["dtype"] = torch.float8_e4m3fn
|
199 |
+
model_options["fp8_optimizations"] = True
|
200 |
+
|
201 |
+
if enable_fp16_accumulation:
|
202 |
+
if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"):
|
203 |
+
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
204 |
+
else:
|
205 |
+
raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently")
|
206 |
+
else:
|
207 |
+
if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"):
|
208 |
+
torch.backends.cuda.matmul.allow_fp16_accumulation = False
|
209 |
+
|
210 |
+
unet_path = folder_paths.get_full_path_or_raise("diffusion_models", model_name)
|
211 |
+
model = comfy.sd.load_diffusion_model(unet_path, model_options=model_options)
|
212 |
+
if dtype := DTYPE_MAP.get(compute_dtype):
|
213 |
+
model.set_model_compute_dtype(dtype)
|
214 |
+
model.force_cast_weights = False
|
215 |
+
print(f"Setting {model_name} compute dtype to {dtype}")
|
216 |
+
self._patch_modules(patch_cublaslinear, sage_attention)
|
217 |
+
|
218 |
+
return (model,)
|
219 |
+
|
220 |
+
def patched_patch_model(self, device_to=None, lowvram_model_memory=0, load_weights=True, force_patch_weights=False):
|
221 |
+
with self.use_ejected():
|
222 |
+
|
223 |
+
device_to = mm.get_torch_device()
|
224 |
+
|
225 |
+
full_load_override = getattr(self.model, "full_load_override", "auto")
|
226 |
+
if full_load_override in ["enabled", "disabled"]:
|
227 |
+
full_load = full_load_override == "enabled"
|
228 |
+
else:
|
229 |
+
full_load = lowvram_model_memory == 0
|
230 |
+
|
231 |
+
self.load(device_to, lowvram_model_memory=lowvram_model_memory, force_patch_weights=force_patch_weights, full_load=full_load)
|
232 |
+
|
233 |
+
for k in self.object_patches:
|
234 |
+
old = comfy.utils.set_attr(self.model, k, self.object_patches[k])
|
235 |
+
if k not in self.object_patches_backup:
|
236 |
+
self.object_patches_backup[k] = old
|
237 |
+
|
238 |
+
self.inject_model()
|
239 |
+
return self.model
|
240 |
+
|
241 |
+
def patched_load_lora_for_models(model, clip, lora, strength_model, strength_clip):
|
242 |
+
|
243 |
+
patch_keys = list(model.object_patches_backup.keys())
|
244 |
+
for k in patch_keys:
|
245 |
+
#print("backing up object patch: ", k)
|
246 |
+
comfy.utils.set_attr(model.model, k, model.object_patches_backup[k])
|
247 |
+
|
248 |
+
key_map = {}
|
249 |
+
if model is not None:
|
250 |
+
key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
|
251 |
+
if clip is not None:
|
252 |
+
key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)
|
253 |
+
|
254 |
+
lora = comfy.lora_convert.convert_lora(lora)
|
255 |
+
loaded = comfy.lora.load_lora(lora, key_map)
|
256 |
+
#print(temp_object_patches_backup)
|
257 |
+
|
258 |
+
if model is not None:
|
259 |
+
new_modelpatcher = model.clone()
|
260 |
+
k = new_modelpatcher.add_patches(loaded, strength_model)
|
261 |
+
else:
|
262 |
+
k = ()
|
263 |
+
new_modelpatcher = None
|
264 |
+
|
265 |
+
if clip is not None:
|
266 |
+
new_clip = clip.clone()
|
267 |
+
k1 = new_clip.add_patches(loaded, strength_clip)
|
268 |
+
else:
|
269 |
+
k1 = ()
|
270 |
+
new_clip = None
|
271 |
+
k = set(k)
|
272 |
+
k1 = set(k1)
|
273 |
+
for x in loaded:
|
274 |
+
if (x not in k) and (x not in k1):
|
275 |
+
print("NOT LOADED {}".format(x))
|
276 |
+
|
277 |
+
if patch_keys:
|
278 |
+
if hasattr(model.model, "compile_settings"):
|
279 |
+
compile_settings = getattr(model.model, "compile_settings")
|
280 |
+
print("compile_settings: ", compile_settings)
|
281 |
+
for k in patch_keys:
|
282 |
+
if "diffusion_model." in k:
|
283 |
+
# Remove the prefix to get the attribute path
|
284 |
+
key = k.replace('diffusion_model.', '')
|
285 |
+
attributes = key.split('.')
|
286 |
+
# Start with the diffusion_model object
|
287 |
+
block = model.get_model_object("diffusion_model")
|
288 |
+
# Navigate through the attributes to get to the block
|
289 |
+
for attr in attributes:
|
290 |
+
if attr.isdigit():
|
291 |
+
block = block[int(attr)]
|
292 |
+
else:
|
293 |
+
block = getattr(block, attr)
|
294 |
+
# Compile the block
|
295 |
+
compiled_block = torch.compile(block, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"])
|
296 |
+
# Add the compiled block back as an object patch
|
297 |
+
model.add_object_patch(k, compiled_block)
|
298 |
+
return (new_modelpatcher, new_clip)
|
299 |
+
|
300 |
+
class PatchModelPatcherOrder:
|
301 |
+
@classmethod
|
302 |
+
def INPUT_TYPES(s):
|
303 |
+
return {"required": {
|
304 |
+
"model": ("MODEL",),
|
305 |
+
"patch_order": (["object_patch_first", "weight_patch_first"], {"default": "weight_patch_first", "tooltip": "Patch the comfy patch_model function to load weight patches (LoRAs) before compiling the model"}),
|
306 |
+
"full_load": (["enabled", "disabled", "auto"], {"default": "auto", "tooltip": "Disabling may help with memory issues when loading large models, when changing this you should probably force model reload to avoid issues!"}),
|
307 |
+
}}
|
308 |
+
RETURN_TYPES = ("MODEL",)
|
309 |
+
FUNCTION = "patch"
|
310 |
+
CATEGORY = "KJNodes/experimental"
|
311 |
+
DESCRIPTION = "Patch the comfy patch_model function patching order, useful for torch.compile (used as object_patch) as it should come last if you want to use LoRAs with compile"
|
312 |
+
EXPERIMENTAL = True
|
313 |
+
|
314 |
+
def patch(self, model, patch_order, full_load):
|
315 |
+
comfy.model_patcher.ModelPatcher.temp_object_patches_backup = {}
|
316 |
+
setattr(model.model, "full_load_override", full_load)
|
317 |
+
if patch_order == "weight_patch_first":
|
318 |
+
comfy.model_patcher.ModelPatcher.patch_model = patched_patch_model
|
319 |
+
comfy.sd.load_lora_for_models = patched_load_lora_for_models
|
320 |
+
else:
|
321 |
+
comfy.model_patcher.ModelPatcher.patch_model = original_patch_model
|
322 |
+
comfy.sd.load_lora_for_models = original_load_lora_for_models
|
323 |
+
|
324 |
+
return model,
|
325 |
+
|
326 |
+
class TorchCompileModelFluxAdvanced:
|
327 |
+
def __init__(self):
|
328 |
+
self._compiled = False
|
329 |
+
|
330 |
+
@classmethod
|
331 |
+
def INPUT_TYPES(s):
|
332 |
+
return {"required": {
|
333 |
+
"model": ("MODEL",),
|
334 |
+
"backend": (["inductor", "cudagraphs"],),
|
335 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
336 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
337 |
+
"double_blocks": ("STRING", {"default": "0-18", "multiline": True}),
|
338 |
+
"single_blocks": ("STRING", {"default": "0-37", "multiline": True}),
|
339 |
+
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
340 |
+
},
|
341 |
+
"optional": {
|
342 |
+
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
|
343 |
+
}
|
344 |
+
}
|
345 |
+
RETURN_TYPES = ("MODEL",)
|
346 |
+
FUNCTION = "patch"
|
347 |
+
|
348 |
+
CATEGORY = "KJNodes/torchcompile"
|
349 |
+
EXPERIMENTAL = True
|
350 |
+
|
351 |
+
def parse_blocks(self, blocks_str):
|
352 |
+
blocks = []
|
353 |
+
for part in blocks_str.split(','):
|
354 |
+
part = part.strip()
|
355 |
+
if '-' in part:
|
356 |
+
start, end = map(int, part.split('-'))
|
357 |
+
blocks.extend(range(start, end + 1))
|
358 |
+
else:
|
359 |
+
blocks.append(int(part))
|
360 |
+
return blocks
|
361 |
+
|
362 |
+
def patch(self, model, backend, mode, fullgraph, single_blocks, double_blocks, dynamic, dynamo_cache_size_limit):
|
363 |
+
single_block_list = self.parse_blocks(single_blocks)
|
364 |
+
double_block_list = self.parse_blocks(double_blocks)
|
365 |
+
m = model.clone()
|
366 |
+
diffusion_model = m.get_model_object("diffusion_model")
|
367 |
+
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
368 |
+
|
369 |
+
if not self._compiled:
|
370 |
+
try:
|
371 |
+
for i, block in enumerate(diffusion_model.double_blocks):
|
372 |
+
if i in double_block_list:
|
373 |
+
#print("Compiling double_block", i)
|
374 |
+
m.add_object_patch(f"diffusion_model.double_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend))
|
375 |
+
for i, block in enumerate(diffusion_model.single_blocks):
|
376 |
+
if i in single_block_list:
|
377 |
+
#print("Compiling single block", i)
|
378 |
+
m.add_object_patch(f"diffusion_model.single_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend))
|
379 |
+
self._compiled = True
|
380 |
+
compile_settings = {
|
381 |
+
"backend": backend,
|
382 |
+
"mode": mode,
|
383 |
+
"fullgraph": fullgraph,
|
384 |
+
"dynamic": dynamic,
|
385 |
+
}
|
386 |
+
setattr(m.model, "compile_settings", compile_settings)
|
387 |
+
except:
|
388 |
+
raise RuntimeError("Failed to compile model")
|
389 |
+
|
390 |
+
return (m, )
|
391 |
+
# rest of the layers that are not patched
|
392 |
+
# diffusion_model.final_layer = torch.compile(diffusion_model.final_layer, mode=mode, fullgraph=fullgraph, backend=backend)
|
393 |
+
# diffusion_model.guidance_in = torch.compile(diffusion_model.guidance_in, mode=mode, fullgraph=fullgraph, backend=backend)
|
394 |
+
# diffusion_model.img_in = torch.compile(diffusion_model.img_in, mode=mode, fullgraph=fullgraph, backend=backend)
|
395 |
+
# diffusion_model.time_in = torch.compile(diffusion_model.time_in, mode=mode, fullgraph=fullgraph, backend=backend)
|
396 |
+
# diffusion_model.txt_in = torch.compile(diffusion_model.txt_in, mode=mode, fullgraph=fullgraph, backend=backend)
|
397 |
+
# diffusion_model.vector_in = torch.compile(diffusion_model.vector_in, mode=mode, fullgraph=fullgraph, backend=backend)
|
398 |
+
|
399 |
+
class TorchCompileModelHyVideo:
|
400 |
+
def __init__(self):
|
401 |
+
self._compiled = False
|
402 |
+
|
403 |
+
@classmethod
|
404 |
+
def INPUT_TYPES(s):
|
405 |
+
return {
|
406 |
+
"required": {
|
407 |
+
"model": ("MODEL",),
|
408 |
+
"backend": (["inductor","cudagraphs"], {"default": "inductor"}),
|
409 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
410 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
411 |
+
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
412 |
+
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
|
413 |
+
"compile_single_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile single blocks"}),
|
414 |
+
"compile_double_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile double blocks"}),
|
415 |
+
"compile_txt_in": ("BOOLEAN", {"default": False, "tooltip": "Compile txt_in layers"}),
|
416 |
+
"compile_vector_in": ("BOOLEAN", {"default": False, "tooltip": "Compile vector_in layers"}),
|
417 |
+
"compile_final_layer": ("BOOLEAN", {"default": False, "tooltip": "Compile final layer"}),
|
418 |
+
|
419 |
+
},
|
420 |
+
}
|
421 |
+
RETURN_TYPES = ("MODEL",)
|
422 |
+
FUNCTION = "patch"
|
423 |
+
|
424 |
+
CATEGORY = "KJNodes/torchcompile"
|
425 |
+
EXPERIMENTAL = True
|
426 |
+
|
427 |
+
def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_single_blocks, compile_double_blocks, compile_txt_in, compile_vector_in, compile_final_layer):
|
428 |
+
m = model.clone()
|
429 |
+
diffusion_model = m.get_model_object("diffusion_model")
|
430 |
+
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
431 |
+
if not self._compiled:
|
432 |
+
try:
|
433 |
+
if compile_single_blocks:
|
434 |
+
for i, block in enumerate(diffusion_model.single_blocks):
|
435 |
+
compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
436 |
+
m.add_object_patch(f"diffusion_model.single_blocks.{i}", compiled_block)
|
437 |
+
if compile_double_blocks:
|
438 |
+
for i, block in enumerate(diffusion_model.double_blocks):
|
439 |
+
compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
440 |
+
m.add_object_patch(f"diffusion_model.double_blocks.{i}", compiled_block)
|
441 |
+
if compile_txt_in:
|
442 |
+
compiled_block = torch.compile(diffusion_model.txt_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
443 |
+
m.add_object_patch("diffusion_model.txt_in", compiled_block)
|
444 |
+
if compile_vector_in:
|
445 |
+
compiled_block = torch.compile(diffusion_model.vector_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
446 |
+
m.add_object_patch("diffusion_model.vector_in", compiled_block)
|
447 |
+
if compile_final_layer:
|
448 |
+
compiled_block = torch.compile(diffusion_model.final_layer, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
449 |
+
m.add_object_patch("diffusion_model.final_layer", compiled_block)
|
450 |
+
self._compiled = True
|
451 |
+
compile_settings = {
|
452 |
+
"backend": backend,
|
453 |
+
"mode": mode,
|
454 |
+
"fullgraph": fullgraph,
|
455 |
+
"dynamic": dynamic,
|
456 |
+
}
|
457 |
+
setattr(m.model, "compile_settings", compile_settings)
|
458 |
+
except:
|
459 |
+
raise RuntimeError("Failed to compile model")
|
460 |
+
return (m, )
|
461 |
+
|
462 |
+
class TorchCompileModelWanVideo:
|
463 |
+
def __init__(self):
|
464 |
+
self._compiled = False
|
465 |
+
|
466 |
+
@classmethod
|
467 |
+
def INPUT_TYPES(s):
|
468 |
+
return {
|
469 |
+
"required": {
|
470 |
+
"model": ("MODEL",),
|
471 |
+
"backend": (["inductor","cudagraphs"], {"default": "inductor"}),
|
472 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
473 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
474 |
+
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
475 |
+
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
|
476 |
+
"compile_transformer_blocks_only": ("BOOLEAN", {"default": False, "tooltip": "Compile only transformer blocks"}),
|
477 |
+
},
|
478 |
+
}
|
479 |
+
RETURN_TYPES = ("MODEL",)
|
480 |
+
FUNCTION = "patch"
|
481 |
+
|
482 |
+
CATEGORY = "KJNodes/torchcompile"
|
483 |
+
EXPERIMENTAL = True
|
484 |
+
|
485 |
+
def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only):
|
486 |
+
m = model.clone()
|
487 |
+
diffusion_model = m.get_model_object("diffusion_model")
|
488 |
+
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
489 |
+
is_compiled = hasattr(model.model.diffusion_model.blocks[0], "_orig_mod")
|
490 |
+
if is_compiled:
|
491 |
+
logging.info(f"Already compiled, not reapplying")
|
492 |
+
else:
|
493 |
+
logging.info(f"Not compiled, applying")
|
494 |
+
try:
|
495 |
+
if compile_transformer_blocks_only:
|
496 |
+
for i, block in enumerate(diffusion_model.blocks):
|
497 |
+
if is_compiled:
|
498 |
+
compiled_block = torch.compile(block._orig_mod, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
499 |
+
else:
|
500 |
+
compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
501 |
+
m.add_object_patch(f"diffusion_model.blocks.{i}", compiled_block)
|
502 |
+
else:
|
503 |
+
compiled_model = torch.compile(diffusion_model, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode)
|
504 |
+
m.add_object_patch("diffusion_model", compiled_model)
|
505 |
+
|
506 |
+
compile_settings = {
|
507 |
+
"backend": backend,
|
508 |
+
"mode": mode,
|
509 |
+
"fullgraph": fullgraph,
|
510 |
+
"dynamic": dynamic,
|
511 |
+
}
|
512 |
+
setattr(m.model, "compile_settings", compile_settings)
|
513 |
+
except:
|
514 |
+
raise RuntimeError("Failed to compile model")
|
515 |
+
return (m, )
|
516 |
+
|
517 |
+
class TorchCompileVAE:
|
518 |
+
def __init__(self):
|
519 |
+
self._compiled_encoder = False
|
520 |
+
self._compiled_decoder = False
|
521 |
+
|
522 |
+
@classmethod
|
523 |
+
def INPUT_TYPES(s):
|
524 |
+
return {"required": {
|
525 |
+
"vae": ("VAE",),
|
526 |
+
"backend": (["inductor", "cudagraphs"],),
|
527 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
528 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
529 |
+
"compile_encoder": ("BOOLEAN", {"default": True, "tooltip": "Compile encoder"}),
|
530 |
+
"compile_decoder": ("BOOLEAN", {"default": True, "tooltip": "Compile decoder"}),
|
531 |
+
}}
|
532 |
+
RETURN_TYPES = ("VAE",)
|
533 |
+
FUNCTION = "compile"
|
534 |
+
|
535 |
+
CATEGORY = "KJNodes/torchcompile"
|
536 |
+
EXPERIMENTAL = True
|
537 |
+
|
538 |
+
def compile(self, vae, backend, mode, fullgraph, compile_encoder, compile_decoder):
|
539 |
+
if compile_encoder:
|
540 |
+
if not self._compiled_encoder:
|
541 |
+
encoder_name = "encoder"
|
542 |
+
if hasattr(vae.first_stage_model, "taesd_encoder"):
|
543 |
+
encoder_name = "taesd_encoder"
|
544 |
+
|
545 |
+
try:
|
546 |
+
setattr(
|
547 |
+
vae.first_stage_model,
|
548 |
+
encoder_name,
|
549 |
+
torch.compile(
|
550 |
+
getattr(vae.first_stage_model, encoder_name),
|
551 |
+
mode=mode,
|
552 |
+
fullgraph=fullgraph,
|
553 |
+
backend=backend,
|
554 |
+
),
|
555 |
+
)
|
556 |
+
self._compiled_encoder = True
|
557 |
+
except:
|
558 |
+
raise RuntimeError("Failed to compile model")
|
559 |
+
if compile_decoder:
|
560 |
+
if not self._compiled_decoder:
|
561 |
+
decoder_name = "decoder"
|
562 |
+
if hasattr(vae.first_stage_model, "taesd_decoder"):
|
563 |
+
decoder_name = "taesd_decoder"
|
564 |
+
|
565 |
+
try:
|
566 |
+
setattr(
|
567 |
+
vae.first_stage_model,
|
568 |
+
decoder_name,
|
569 |
+
torch.compile(
|
570 |
+
getattr(vae.first_stage_model, decoder_name),
|
571 |
+
mode=mode,
|
572 |
+
fullgraph=fullgraph,
|
573 |
+
backend=backend,
|
574 |
+
),
|
575 |
+
)
|
576 |
+
self._compiled_decoder = True
|
577 |
+
except:
|
578 |
+
raise RuntimeError("Failed to compile model")
|
579 |
+
return (vae, )
|
580 |
+
|
581 |
+
class TorchCompileControlNet:
|
582 |
+
def __init__(self):
|
583 |
+
self._compiled= False
|
584 |
+
|
585 |
+
@classmethod
|
586 |
+
def INPUT_TYPES(s):
|
587 |
+
return {"required": {
|
588 |
+
"controlnet": ("CONTROL_NET",),
|
589 |
+
"backend": (["inductor", "cudagraphs"],),
|
590 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
591 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
592 |
+
}}
|
593 |
+
RETURN_TYPES = ("CONTROL_NET",)
|
594 |
+
FUNCTION = "compile"
|
595 |
+
|
596 |
+
CATEGORY = "KJNodes/torchcompile"
|
597 |
+
EXPERIMENTAL = True
|
598 |
+
|
599 |
+
def compile(self, controlnet, backend, mode, fullgraph):
|
600 |
+
if not self._compiled:
|
601 |
+
try:
|
602 |
+
# for i, block in enumerate(controlnet.control_model.double_blocks):
|
603 |
+
# print("Compiling controlnet double_block", i)
|
604 |
+
# controlnet.control_model.double_blocks[i] = torch.compile(block, mode=mode, fullgraph=fullgraph, backend=backend)
|
605 |
+
controlnet.control_model = torch.compile(controlnet.control_model, mode=mode, fullgraph=fullgraph, backend=backend)
|
606 |
+
self._compiled = True
|
607 |
+
except:
|
608 |
+
self._compiled = False
|
609 |
+
raise RuntimeError("Failed to compile model")
|
610 |
+
|
611 |
+
return (controlnet, )
|
612 |
+
|
613 |
+
class TorchCompileLTXModel:
|
614 |
+
def __init__(self):
|
615 |
+
self._compiled = False
|
616 |
+
|
617 |
+
@classmethod
|
618 |
+
def INPUT_TYPES(s):
|
619 |
+
return {"required": {
|
620 |
+
"model": ("MODEL",),
|
621 |
+
"backend": (["inductor", "cudagraphs"],),
|
622 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
623 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
624 |
+
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
625 |
+
}}
|
626 |
+
RETURN_TYPES = ("MODEL",)
|
627 |
+
FUNCTION = "patch"
|
628 |
+
|
629 |
+
CATEGORY = "KJNodes/torchcompile"
|
630 |
+
EXPERIMENTAL = True
|
631 |
+
|
632 |
+
def patch(self, model, backend, mode, fullgraph, dynamic):
|
633 |
+
m = model.clone()
|
634 |
+
diffusion_model = m.get_model_object("diffusion_model")
|
635 |
+
|
636 |
+
if not self._compiled:
|
637 |
+
try:
|
638 |
+
for i, block in enumerate(diffusion_model.transformer_blocks):
|
639 |
+
compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)
|
640 |
+
m.add_object_patch(f"diffusion_model.transformer_blocks.{i}", compiled_block)
|
641 |
+
self._compiled = True
|
642 |
+
compile_settings = {
|
643 |
+
"backend": backend,
|
644 |
+
"mode": mode,
|
645 |
+
"fullgraph": fullgraph,
|
646 |
+
"dynamic": dynamic,
|
647 |
+
}
|
648 |
+
setattr(m.model, "compile_settings", compile_settings)
|
649 |
+
|
650 |
+
except:
|
651 |
+
raise RuntimeError("Failed to compile model")
|
652 |
+
|
653 |
+
return (m, )
|
654 |
+
|
655 |
+
class TorchCompileCosmosModel:
|
656 |
+
def __init__(self):
|
657 |
+
self._compiled = False
|
658 |
+
|
659 |
+
@classmethod
|
660 |
+
def INPUT_TYPES(s):
|
661 |
+
return {"required": {
|
662 |
+
"model": ("MODEL",),
|
663 |
+
"backend": (["inductor", "cudagraphs"],),
|
664 |
+
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
665 |
+
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
666 |
+
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
667 |
+
"dynamo_cache_size_limit": ("INT", {"default": 64, "tooltip": "Set the dynamo cache size limit"}),
|
668 |
+
}}
|
669 |
+
RETURN_TYPES = ("MODEL",)
|
670 |
+
FUNCTION = "patch"
|
671 |
+
|
672 |
+
CATEGORY = "KJNodes/torchcompile"
|
673 |
+
EXPERIMENTAL = True
|
674 |
+
|
675 |
+
def patch(self, model, backend, mode, fullgraph, dynamic, dynamo_cache_size_limit):
|
676 |
+
|
677 |
+
m = model.clone()
|
678 |
+
diffusion_model = m.get_model_object("diffusion_model")
|
679 |
+
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
680 |
+
|
681 |
+
if not self._compiled:
|
682 |
+
try:
|
683 |
+
for name, block in diffusion_model.blocks.items():
|
684 |
+
#print(f"Compiling block {name}")
|
685 |
+
compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)
|
686 |
+
m.add_object_patch(f"diffusion_model.blocks.{name}", compiled_block)
|
687 |
+
#diffusion_model.blocks[name] = compiled_block
|
688 |
+
|
689 |
+
self._compiled = True
|
690 |
+
compile_settings = {
|
691 |
+
"backend": backend,
|
692 |
+
"mode": mode,
|
693 |
+
"fullgraph": fullgraph,
|
694 |
+
"dynamic": dynamic,
|
695 |
+
}
|
696 |
+
setattr(m.model, "compile_settings", compile_settings)
|
697 |
+
|
698 |
+
except:
|
699 |
+
raise RuntimeError("Failed to compile model")
|
700 |
+
|
701 |
+
return (m, )
|
702 |
+
|
703 |
+
|
704 |
+
#teacache
|
705 |
+
|
706 |
+
try:
|
707 |
+
from comfy.ldm.wan.model import sinusoidal_embedding_1d
|
708 |
+
except:
|
709 |
+
pass
|
710 |
+
from einops import repeat
|
711 |
+
from unittest.mock import patch
|
712 |
+
from contextlib import nullcontext
|
713 |
+
import numpy as np
|
714 |
+
|
715 |
+
def relative_l1_distance(last_tensor, current_tensor):
|
716 |
+
l1_distance = torch.abs(last_tensor - current_tensor).mean()
|
717 |
+
norm = torch.abs(last_tensor).mean()
|
718 |
+
relative_l1_distance = l1_distance / norm
|
719 |
+
return relative_l1_distance.to(torch.float32)
|
720 |
+
|
721 |
+
def teacache_wanvideo_forward_orig(self, x, t, context, clip_fea=None, freqs=None, transformer_options={}, **kwargs):
|
722 |
+
# embeddings
|
723 |
+
x = self.patch_embedding(x.float()).to(x.dtype)
|
724 |
+
grid_sizes = x.shape[2:]
|
725 |
+
x = x.flatten(2).transpose(1, 2)
|
726 |
+
|
727 |
+
# time embeddings
|
728 |
+
e = self.time_embedding(
|
729 |
+
sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype))
|
730 |
+
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
|
731 |
+
|
732 |
+
# context
|
733 |
+
context = self.text_embedding(context)
|
734 |
+
if clip_fea is not None and self.img_emb is not None:
|
735 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
736 |
+
context = torch.concat([context_clip, context], dim=1)
|
737 |
+
|
738 |
+
@torch.compiler.disable()
|
739 |
+
def tea_cache(x, e0, e, kwargs):
|
740 |
+
#teacache for cond and uncond separately
|
741 |
+
rel_l1_thresh = transformer_options["rel_l1_thresh"]
|
742 |
+
|
743 |
+
is_cond = True if transformer_options["cond_or_uncond"] == [0] else False
|
744 |
+
|
745 |
+
should_calc = True
|
746 |
+
suffix = "cond" if is_cond else "uncond"
|
747 |
+
|
748 |
+
# Init cache dict if not exists
|
749 |
+
if not hasattr(self, 'teacache_state'):
|
750 |
+
self.teacache_state = {
|
751 |
+
'cond': {'accumulated_rel_l1_distance': 0, 'prev_input': None,
|
752 |
+
'teacache_skipped_steps': 0, 'previous_residual': None},
|
753 |
+
'uncond': {'accumulated_rel_l1_distance': 0, 'prev_input': None,
|
754 |
+
'teacache_skipped_steps': 0, 'previous_residual': None}
|
755 |
+
}
|
756 |
+
logging.info("\nTeaCache: Initialized")
|
757 |
+
|
758 |
+
cache = self.teacache_state[suffix]
|
759 |
+
|
760 |
+
if cache['prev_input'] is not None:
|
761 |
+
if transformer_options["coefficients"] == []:
|
762 |
+
temb_relative_l1 = relative_l1_distance(cache['prev_input'], e0)
|
763 |
+
curr_acc_dist = cache['accumulated_rel_l1_distance'] + temb_relative_l1
|
764 |
+
else:
|
765 |
+
rescale_func = np.poly1d(transformer_options["coefficients"])
|
766 |
+
curr_acc_dist = cache['accumulated_rel_l1_distance'] + rescale_func(((e-cache['prev_input']).abs().mean() / cache['prev_input'].abs().mean()).cpu().item())
|
767 |
+
try:
|
768 |
+
if curr_acc_dist < rel_l1_thresh:
|
769 |
+
should_calc = False
|
770 |
+
cache['accumulated_rel_l1_distance'] = curr_acc_dist
|
771 |
+
else:
|
772 |
+
should_calc = True
|
773 |
+
cache['accumulated_rel_l1_distance'] = 0
|
774 |
+
except:
|
775 |
+
should_calc = True
|
776 |
+
cache['accumulated_rel_l1_distance'] = 0
|
777 |
+
|
778 |
+
if transformer_options["coefficients"] == []:
|
779 |
+
cache['prev_input'] = e0.clone().detach()
|
780 |
+
else:
|
781 |
+
cache['prev_input'] = e.clone().detach()
|
782 |
+
|
783 |
+
if not should_calc:
|
784 |
+
x += cache['previous_residual'].to(x.device)
|
785 |
+
cache['teacache_skipped_steps'] += 1
|
786 |
+
#print(f"TeaCache: Skipping {suffix} step")
|
787 |
+
return should_calc, cache
|
788 |
+
|
789 |
+
if not transformer_options:
|
790 |
+
raise RuntimeError("Can't access transformer_options, this requires ComfyUI nightly version from Mar 14, 2025 or later")
|
791 |
+
|
792 |
+
teacache_enabled = transformer_options.get("teacache_enabled", False)
|
793 |
+
if not teacache_enabled:
|
794 |
+
should_calc = True
|
795 |
+
else:
|
796 |
+
should_calc, cache = tea_cache(x, e0, e, kwargs)
|
797 |
+
|
798 |
+
if should_calc:
|
799 |
+
original_x = x.clone().detach()
|
800 |
+
patches_replace = transformer_options.get("patches_replace", {})
|
801 |
+
blocks_replace = patches_replace.get("dit", {})
|
802 |
+
for i, block in enumerate(self.blocks):
|
803 |
+
if ("double_block", i) in blocks_replace:
|
804 |
+
def block_wrap(args):
|
805 |
+
out = {}
|
806 |
+
out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"])
|
807 |
+
return out
|
808 |
+
out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap, "transformer_options": transformer_options})
|
809 |
+
x = out["img"]
|
810 |
+
else:
|
811 |
+
x = block(x, e=e0, freqs=freqs, context=context)
|
812 |
+
|
813 |
+
if teacache_enabled:
|
814 |
+
cache['previous_residual'] = (x - original_x).to(transformer_options["teacache_device"])
|
815 |
+
|
816 |
+
# head
|
817 |
+
x = self.head(x, e)
|
818 |
+
|
819 |
+
# unpatchify
|
820 |
+
x = self.unpatchify(x, grid_sizes)
|
821 |
+
return x
|
822 |
+
|
823 |
+
class WanVideoTeaCacheKJ:
|
824 |
+
@classmethod
|
825 |
+
def INPUT_TYPES(s):
|
826 |
+
return {
|
827 |
+
"required": {
|
828 |
+
"model": ("MODEL",),
|
829 |
+
"rel_l1_thresh": ("FLOAT", {"default": 0.275, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Threshold for to determine when to apply the cache, compromise between speed and accuracy. When using coefficients a good value range is something between 0.2-0.4 for all but 1.3B model, which should be about 10 times smaller, same as when not using coefficients."}),
|
830 |
+
"start_percent": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of the steps to use with TeaCache."}),
|
831 |
+
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of the steps to use with TeaCache."}),
|
832 |
+
"cache_device": (["main_device", "offload_device"], {"default": "offload_device", "tooltip": "Device to cache to"}),
|
833 |
+
"coefficients": (["disabled", "1.3B", "14B", "i2v_480", "i2v_720"], {"default": "i2v_480", "tooltip": "Coefficients for rescaling the relative l1 distance, if disabled the threshold value should be about 10 times smaller than the value used with coefficients."}),
|
834 |
+
}
|
835 |
+
}
|
836 |
+
|
837 |
+
RETURN_TYPES = ("MODEL",)
|
838 |
+
RETURN_NAMES = ("model",)
|
839 |
+
FUNCTION = "patch_teacache"
|
840 |
+
CATEGORY = "KJNodes/teacache"
|
841 |
+
DESCRIPTION = """
|
842 |
+
Patch WanVideo model to use TeaCache. Speeds up inference by caching the output and
|
843 |
+
applying it instead of doing the step. Best results are achieved by choosing the
|
844 |
+
appropriate coefficients for the model. Early steps should never be skipped, with too
|
845 |
+
aggressive values this can happen and the motion suffers. Starting later can help with that too.
|
846 |
+
When NOT using coefficients, the threshold value should be
|
847 |
+
about 10 times smaller than the value used with coefficients.
|
848 |
+
|
849 |
+
Official recommended values https://github.com/ali-vilab/TeaCache/tree/main/TeaCache4Wan2.1:
|
850 |
+
|
851 |
+
|
852 |
+
<pre style='font-family:monospace'>
|
853 |
+
+-------------------+--------+---------+--------+
|
854 |
+
| Model | Low | Medium | High |
|
855 |
+
+-------------------+--------+---------+--------+
|
856 |
+
| Wan2.1 t2v 1.3B | 0.05 | 0.07 | 0.08 |
|
857 |
+
| Wan2.1 t2v 14B | 0.14 | 0.15 | 0.20 |
|
858 |
+
| Wan2.1 i2v 480P | 0.13 | 0.19 | 0.26 |
|
859 |
+
| Wan2.1 i2v 720P | 0.18 | 0.20 | 0.30 |
|
860 |
+
+-------------------+--------+---------+--------+
|
861 |
+
</pre>
|
862 |
+
"""
|
863 |
+
EXPERIMENTAL = True
|
864 |
+
|
865 |
+
def patch_teacache(self, model, rel_l1_thresh, start_percent, end_percent, cache_device, coefficients):
|
866 |
+
if rel_l1_thresh == 0:
|
867 |
+
return (model,)
|
868 |
+
|
869 |
+
if coefficients == "disabled" and rel_l1_thresh > 0.1:
|
870 |
+
logging.warning("Threshold value is too high for TeaCache without coefficients, consider using coefficients for better results.")
|
871 |
+
if coefficients != "disabled" and rel_l1_thresh < 0.1 and "1.3B" not in coefficients:
|
872 |
+
logging.warning("Threshold value is too low for TeaCache with coefficients, consider using higher threshold value for better results.")
|
873 |
+
|
874 |
+
# type_str = str(type(model.model.model_config).__name__)
|
875 |
+
#if model.model.diffusion_model.dim == 1536:
|
876 |
+
# model_type ="1.3B"
|
877 |
+
# else:
|
878 |
+
# if "WAN21_T2V" in type_str:
|
879 |
+
# model_type = "14B"
|
880 |
+
# elif "WAN21_I2V" in type_str:
|
881 |
+
# model_type = "i2v_480"
|
882 |
+
# else:
|
883 |
+
# model_type = "i2v_720" #how to detect this?
|
884 |
+
|
885 |
+
|
886 |
+
teacache_coefficients_map = {
|
887 |
+
"disabled": [],
|
888 |
+
"1.3B": [2.39676752e+03, -1.31110545e+03, 2.01331979e+02, -8.29855975e+00, 1.37887774e-01],
|
889 |
+
"14B": [-5784.54975374, 5449.50911966, -1811.16591783, 256.27178429, -13.02252404],
|
890 |
+
"i2v_480": [-3.02331670e+02, 2.23948934e+02, -5.25463970e+01, 5.87348440e+00, -2.01973289e-01],
|
891 |
+
"i2v_720": [-114.36346466, 65.26524496, -18.82220707, 4.91518089, -0.23412683],
|
892 |
+
}
|
893 |
+
coefficients = teacache_coefficients_map[coefficients]
|
894 |
+
|
895 |
+
teacache_device = mm.get_torch_device() if cache_device == "main_device" else mm.unet_offload_device()
|
896 |
+
|
897 |
+
model_clone = model.clone()
|
898 |
+
if 'transformer_options' not in model_clone.model_options:
|
899 |
+
model_clone.model_options['transformer_options'] = {}
|
900 |
+
model_clone.model_options["transformer_options"]["rel_l1_thresh"] = rel_l1_thresh
|
901 |
+
model_clone.model_options["transformer_options"]["teacache_device"] = teacache_device
|
902 |
+
model_clone.model_options["transformer_options"]["coefficients"] = coefficients
|
903 |
+
diffusion_model = model_clone.get_model_object("diffusion_model")
|
904 |
+
|
905 |
+
def outer_wrapper(start_percent, end_percent):
|
906 |
+
def unet_wrapper_function(model_function, kwargs):
|
907 |
+
input = kwargs["input"]
|
908 |
+
timestep = kwargs["timestep"]
|
909 |
+
c = kwargs["c"]
|
910 |
+
sigmas = c["transformer_options"]["sample_sigmas"]
|
911 |
+
cond_or_uncond = kwargs["cond_or_uncond"]
|
912 |
+
last_step = (len(sigmas) - 1)
|
913 |
+
|
914 |
+
matched_step_index = (sigmas == timestep[0] ).nonzero()
|
915 |
+
if len(matched_step_index) > 0:
|
916 |
+
current_step_index = matched_step_index.item()
|
917 |
+
else:
|
918 |
+
for i in range(len(sigmas) - 1):
|
919 |
+
# walk from beginning of steps until crossing the timestep
|
920 |
+
if (sigmas[i] - timestep[0]) * (sigmas[i + 1] - timestep[0]) <= 0:
|
921 |
+
current_step_index = i
|
922 |
+
break
|
923 |
+
else:
|
924 |
+
current_step_index = 0
|
925 |
+
|
926 |
+
if current_step_index == 0:
|
927 |
+
if hasattr(diffusion_model, "teacache_state"):
|
928 |
+
delattr(diffusion_model, "teacache_state")
|
929 |
+
logging.info("\nResetting TeaCache state")
|
930 |
+
|
931 |
+
current_percent = current_step_index / (len(sigmas) - 1)
|
932 |
+
c["transformer_options"]["current_percent"] = current_percent
|
933 |
+
if start_percent <= current_percent <= end_percent:
|
934 |
+
c["transformer_options"]["teacache_enabled"] = True
|
935 |
+
|
936 |
+
context = patch.multiple(
|
937 |
+
diffusion_model,
|
938 |
+
forward_orig=teacache_wanvideo_forward_orig.__get__(diffusion_model, diffusion_model.__class__)
|
939 |
+
)
|
940 |
+
|
941 |
+
with context:
|
942 |
+
out = model_function(input, timestep, **c)
|
943 |
+
if current_step_index+1 == last_step and hasattr(diffusion_model, "teacache_state"):
|
944 |
+
if len(cond_or_uncond) == 1 and cond_or_uncond[0] == 0:
|
945 |
+
skipped_steps_cond = diffusion_model.teacache_state["cond"]["teacache_skipped_steps"]
|
946 |
+
skipped_steps_uncond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"]
|
947 |
+
logging.info("-----------------------------------")
|
948 |
+
logging.info(f"TeaCache skipped:")
|
949 |
+
logging.info(f"{skipped_steps_cond} cond steps")
|
950 |
+
logging.info(f"{skipped_steps_uncond} uncond step")
|
951 |
+
logging.info(f"out of {last_step} steps")
|
952 |
+
logging.info("-----------------------------------")
|
953 |
+
elif len(cond_or_uncond) == 2:
|
954 |
+
skipped_steps_cond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"]
|
955 |
+
logging.info("-----------------------------------")
|
956 |
+
logging.info(f"TeaCache skipped:")
|
957 |
+
logging.info(f"{skipped_steps_cond} cond steps")
|
958 |
+
logging.info(f"out of {last_step} steps")
|
959 |
+
logging.info("-----------------------------------")
|
960 |
+
|
961 |
+
return out
|
962 |
+
return unet_wrapper_function
|
963 |
+
|
964 |
+
model_clone.set_model_unet_function_wrapper(outer_wrapper(start_percent=start_percent, end_percent=end_percent))
|
965 |
+
|
966 |
+
return (model_clone,)
|
967 |
+
|
968 |
+
|
969 |
+
|
970 |
+
from comfy.ldm.modules.attention import optimized_attention
|
971 |
+
from comfy.ldm.flux.math import apply_rope
|
972 |
+
|
973 |
+
def modified_wan_self_attention_forward(self, x, freqs):
|
974 |
+
r"""
|
975 |
+
Args:
|
976 |
+
x(Tensor): Shape [B, L, num_heads, C / num_heads]
|
977 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
978 |
+
"""
|
979 |
+
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
|
980 |
+
|
981 |
+
# query, key, value function
|
982 |
+
def qkv_fn(x):
|
983 |
+
q = self.norm_q(self.q(x)).view(b, s, n, d)
|
984 |
+
k = self.norm_k(self.k(x)).view(b, s, n, d)
|
985 |
+
v = self.v(x).view(b, s, n * d)
|
986 |
+
return q, k, v
|
987 |
+
|
988 |
+
q, k, v = qkv_fn(x)
|
989 |
+
|
990 |
+
q, k = apply_rope(q, k, freqs)
|
991 |
+
|
992 |
+
feta_scores = get_feta_scores(q, k, self.num_frames, self.enhance_weight)
|
993 |
+
|
994 |
+
x = optimized_attention(
|
995 |
+
q.view(b, s, n * d),
|
996 |
+
k.view(b, s, n * d),
|
997 |
+
v,
|
998 |
+
heads=self.num_heads,
|
999 |
+
)
|
1000 |
+
|
1001 |
+
x = self.o(x)
|
1002 |
+
|
1003 |
+
x *= feta_scores
|
1004 |
+
|
1005 |
+
return x
|
1006 |
+
|
1007 |
+
from einops import rearrange
|
1008 |
+
def get_feta_scores(query, key, num_frames, enhance_weight):
|
1009 |
+
img_q, img_k = query, key #torch.Size([2, 9216, 12, 128])
|
1010 |
+
|
1011 |
+
_, ST, num_heads, head_dim = img_q.shape
|
1012 |
+
spatial_dim = ST / num_frames
|
1013 |
+
spatial_dim = int(spatial_dim)
|
1014 |
+
|
1015 |
+
query_image = rearrange(
|
1016 |
+
img_q, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim
|
1017 |
+
)
|
1018 |
+
key_image = rearrange(
|
1019 |
+
img_k, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim
|
1020 |
+
)
|
1021 |
+
|
1022 |
+
return feta_score(query_image, key_image, head_dim, num_frames, enhance_weight)
|
1023 |
+
|
1024 |
+
def feta_score(query_image, key_image, head_dim, num_frames, enhance_weight):
|
1025 |
+
scale = head_dim**-0.5
|
1026 |
+
query_image = query_image * scale
|
1027 |
+
attn_temp = query_image @ key_image.transpose(-2, -1) # translate attn to float32
|
1028 |
+
attn_temp = attn_temp.to(torch.float32)
|
1029 |
+
attn_temp = attn_temp.softmax(dim=-1)
|
1030 |
+
|
1031 |
+
# Reshape to [batch_size * num_tokens, num_frames, num_frames]
|
1032 |
+
attn_temp = attn_temp.reshape(-1, num_frames, num_frames)
|
1033 |
+
|
1034 |
+
# Create a mask for diagonal elements
|
1035 |
+
diag_mask = torch.eye(num_frames, device=attn_temp.device).bool()
|
1036 |
+
diag_mask = diag_mask.unsqueeze(0).expand(attn_temp.shape[0], -1, -1)
|
1037 |
+
|
1038 |
+
# Zero out diagonal elements
|
1039 |
+
attn_wo_diag = attn_temp.masked_fill(diag_mask, 0)
|
1040 |
+
|
1041 |
+
# Calculate mean for each token's attention matrix
|
1042 |
+
# Number of off-diagonal elements per matrix is n*n - n
|
1043 |
+
num_off_diag = num_frames * num_frames - num_frames
|
1044 |
+
mean_scores = attn_wo_diag.sum(dim=(1, 2)) / num_off_diag
|
1045 |
+
|
1046 |
+
enhance_scores = mean_scores.mean() * (num_frames + enhance_weight)
|
1047 |
+
enhance_scores = enhance_scores.clamp(min=1)
|
1048 |
+
return enhance_scores
|
1049 |
+
|
1050 |
+
import types
|
1051 |
+
class WanAttentionPatch:
|
1052 |
+
def __init__(self, num_frames, weight):
|
1053 |
+
self.num_frames = num_frames
|
1054 |
+
self.enhance_weight = weight
|
1055 |
+
|
1056 |
+
def __get__(self, obj, objtype=None):
|
1057 |
+
# Create bound method with stored parameters
|
1058 |
+
def wrapped_attention(self_module, *args, **kwargs):
|
1059 |
+
self_module.num_frames = self.num_frames
|
1060 |
+
self_module.enhance_weight = self.enhance_weight
|
1061 |
+
return modified_wan_self_attention_forward(self_module, *args, **kwargs)
|
1062 |
+
return types.MethodType(wrapped_attention, obj)
|
1063 |
+
|
1064 |
+
class WanVideoEnhanceAVideoKJ:
|
1065 |
+
@classmethod
|
1066 |
+
def INPUT_TYPES(s):
|
1067 |
+
return {
|
1068 |
+
"required": {
|
1069 |
+
"model": ("MODEL",),
|
1070 |
+
"latent": ("LATENT", {"tooltip": "Only used to get the latent count"}),
|
1071 |
+
"weight": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Strength of the enhance effect"}),
|
1072 |
+
}
|
1073 |
+
}
|
1074 |
+
|
1075 |
+
RETURN_TYPES = ("MODEL",)
|
1076 |
+
RETURN_NAMES = ("model",)
|
1077 |
+
FUNCTION = "enhance"
|
1078 |
+
CATEGORY = "KJNodes/experimental"
|
1079 |
+
DESCRIPTION = "https://github.com/NUS-HPC-AI-Lab/Enhance-A-Video"
|
1080 |
+
EXPERIMENTAL = True
|
1081 |
+
|
1082 |
+
def enhance(self, model, weight, latent):
|
1083 |
+
if weight == 0:
|
1084 |
+
return (model,)
|
1085 |
+
|
1086 |
+
num_frames = latent["samples"].shape[2]
|
1087 |
+
|
1088 |
+
model_clone = model.clone()
|
1089 |
+
if 'transformer_options' not in model_clone.model_options:
|
1090 |
+
model_clone.model_options['transformer_options'] = {}
|
1091 |
+
model_clone.model_options["transformer_options"]["enhance_weight"] = weight
|
1092 |
+
diffusion_model = model_clone.get_model_object("diffusion_model")
|
1093 |
+
|
1094 |
+
compile_settings = getattr(model.model, "compile_settings", None)
|
1095 |
+
for idx, block in enumerate(diffusion_model.blocks):
|
1096 |
+
patched_attn = WanAttentionPatch(num_frames, weight).__get__(block.self_attn, block.__class__)
|
1097 |
+
if compile_settings is not None:
|
1098 |
+
patched_attn = torch.compile(patched_attn, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"])
|
1099 |
+
|
1100 |
+
model_clone.add_object_patch(f"diffusion_model.blocks.{idx}.self_attn.forward", patched_attn)
|
1101 |
+
|
1102 |
+
return (model_clone,)
|
1103 |
+
|
1104 |
+
class SkipLayerGuidanceWanVideo:
|
1105 |
+
@classmethod
|
1106 |
+
def INPUT_TYPES(s):
|
1107 |
+
return {"required": {"model": ("MODEL", ),
|
1108 |
+
"blocks": ("STRING", {"default": "10", "multiline": False}),
|
1109 |
+
"start_percent": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}),
|
1110 |
+
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
1111 |
+
}}
|
1112 |
+
RETURN_TYPES = ("MODEL",)
|
1113 |
+
FUNCTION = "slg"
|
1114 |
+
EXPERIMENTAL = True
|
1115 |
+
DESCRIPTION = "Simplified skip layer guidance that only skips the uncond on selected blocks"
|
1116 |
+
|
1117 |
+
CATEGORY = "advanced/guidance"
|
1118 |
+
|
1119 |
+
def slg(self, model, start_percent, end_percent, blocks):
|
1120 |
+
def skip(args, extra_args):
|
1121 |
+
transformer_options = extra_args.get("transformer_options", {})
|
1122 |
+
original_block = extra_args["original_block"]
|
1123 |
+
|
1124 |
+
if not transformer_options:
|
1125 |
+
raise ValueError("transformer_options not found in extra_args, currently SkipLayerGuidanceWanVideo only works with TeaCacheKJ")
|
1126 |
+
if start_percent <= transformer_options["current_percent"] <= end_percent:
|
1127 |
+
if args["img"].shape[0] == 2:
|
1128 |
+
prev_img_uncond = args["img"][0].unsqueeze(0)
|
1129 |
+
|
1130 |
+
new_args = {
|
1131 |
+
"img": args["img"][1],
|
1132 |
+
"txt": args["txt"][1],
|
1133 |
+
"vec": args["vec"][1],
|
1134 |
+
"pe": args["pe"][1]
|
1135 |
+
}
|
1136 |
+
|
1137 |
+
block_out = original_block(new_args)
|
1138 |
+
|
1139 |
+
out = {
|
1140 |
+
"img": torch.cat([prev_img_uncond, block_out["img"]], dim=0),
|
1141 |
+
"txt": args["txt"],
|
1142 |
+
"vec": args["vec"],
|
1143 |
+
"pe": args["pe"]
|
1144 |
+
}
|
1145 |
+
else:
|
1146 |
+
if transformer_options.get("cond_or_uncond") == [0]:
|
1147 |
+
out = original_block(args)
|
1148 |
+
else:
|
1149 |
+
out = args
|
1150 |
+
else:
|
1151 |
+
out = original_block(args)
|
1152 |
+
return out
|
1153 |
+
|
1154 |
+
block_list = [int(x.strip()) for x in blocks.split(",")]
|
1155 |
+
blocks = [int(i) for i in block_list]
|
1156 |
+
logging.info(f"Selected blocks to skip uncond on: {blocks}")
|
1157 |
+
|
1158 |
+
m = model.clone()
|
1159 |
+
|
1160 |
+
for b in blocks:
|
1161 |
+
#m.set_model_patch_replace(skip, "dit", "double_block", b)
|
1162 |
+
model_options = m.model_options["transformer_options"].copy()
|
1163 |
+
if "patches_replace" not in model_options:
|
1164 |
+
model_options["patches_replace"] = {}
|
1165 |
+
else:
|
1166 |
+
model_options["patches_replace"] = model_options["patches_replace"].copy()
|
1167 |
+
|
1168 |
+
if "dit" not in model_options["patches_replace"]:
|
1169 |
+
model_options["patches_replace"]["dit"] = {}
|
1170 |
+
else:
|
1171 |
+
model_options["patches_replace"]["dit"] = model_options["patches_replace"]["dit"].copy()
|
1172 |
+
|
1173 |
+
block = ("double_block", b)
|
1174 |
+
|
1175 |
+
model_options["patches_replace"]["dit"][block] = skip
|
1176 |
+
m.model_options["transformer_options"] = model_options
|
1177 |
+
|
1178 |
+
|
1179 |
+
return (m, )
|
custom_nodes/ComfyUI-KJNodes-main/nodes/nodes.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
custom_nodes/ComfyUI-KJNodes-main/pyproject.toml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "comfyui-kjnodes"
|
3 |
+
description = "Various quality of life -nodes for ComfyUI, mostly just visual stuff to improve usability."
|
4 |
+
version = "1.0.8"
|
5 |
+
license = {file = "LICENSE"}
|
6 |
+
dependencies = ["librosa", "numpy", "pillow>=10.3.0", "scipy", "color-matcher", "matplotlib", "huggingface_hub"]
|
7 |
+
|
8 |
+
[project.urls]
|
9 |
+
Repository = "https://github.com/kijai/ComfyUI-KJNodes"
|
10 |
+
# Used by Comfy Registry https://comfyregistry.org
|
11 |
+
|
12 |
+
[tool.comfy]
|
13 |
+
PublisherId = "kijai"
|
14 |
+
DisplayName = "ComfyUI-KJNodes"
|
15 |
+
Icon = ""
|
custom_nodes/ComfyUI-KJNodes-main/requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pillow>=10.3.0
|
2 |
+
scipy
|
3 |
+
color-matcher
|
4 |
+
matplotlib
|
5 |
+
huggingface_hub
|
6 |
+
mss
|
7 |
+
opencv-python
|
custom_nodes/ComfyUI-KJNodes-main/utility/fluid.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.ndimage import map_coordinates, spline_filter
|
3 |
+
from scipy.sparse.linalg import factorized
|
4 |
+
|
5 |
+
from .numerical import difference, operator
|
6 |
+
|
7 |
+
|
8 |
+
class Fluid:
|
9 |
+
def __init__(self, shape, *quantities, pressure_order=1, advect_order=3):
|
10 |
+
self.shape = shape
|
11 |
+
self.dimensions = len(shape)
|
12 |
+
|
13 |
+
# Prototyping is simplified by dynamically
|
14 |
+
# creating advected quantities as needed.
|
15 |
+
self.quantities = quantities
|
16 |
+
for q in quantities:
|
17 |
+
setattr(self, q, np.zeros(shape))
|
18 |
+
|
19 |
+
self.indices = np.indices(shape)
|
20 |
+
self.velocity = np.zeros((self.dimensions, *shape))
|
21 |
+
|
22 |
+
laplacian = operator(shape, difference(2, pressure_order))
|
23 |
+
self.pressure_solver = factorized(laplacian)
|
24 |
+
|
25 |
+
self.advect_order = advect_order
|
26 |
+
|
27 |
+
def step(self):
|
28 |
+
# Advection is computed backwards in time as described in Stable Fluids.
|
29 |
+
advection_map = self.indices - self.velocity
|
30 |
+
|
31 |
+
# SciPy's spline filter introduces checkerboard divergence.
|
32 |
+
# A linear blend of the filtered and unfiltered fields based
|
33 |
+
# on some value epsilon eliminates this error.
|
34 |
+
def advect(field, filter_epsilon=10e-2, mode='constant'):
|
35 |
+
filtered = spline_filter(field, order=self.advect_order, mode=mode)
|
36 |
+
field = filtered * (1 - filter_epsilon) + field * filter_epsilon
|
37 |
+
return map_coordinates(field, advection_map, prefilter=False, order=self.advect_order, mode=mode)
|
38 |
+
|
39 |
+
# Apply advection to each axis of the
|
40 |
+
# velocity field and each user-defined quantity.
|
41 |
+
for d in range(self.dimensions):
|
42 |
+
self.velocity[d] = advect(self.velocity[d])
|
43 |
+
|
44 |
+
for q in self.quantities:
|
45 |
+
setattr(self, q, advect(getattr(self, q)))
|
46 |
+
|
47 |
+
# Compute the jacobian at each point in the
|
48 |
+
# velocity field to extract curl and divergence.
|
49 |
+
jacobian_shape = (self.dimensions,) * 2
|
50 |
+
partials = tuple(np.gradient(d) for d in self.velocity)
|
51 |
+
jacobian = np.stack(partials).reshape(*jacobian_shape, *self.shape)
|
52 |
+
|
53 |
+
divergence = jacobian.trace()
|
54 |
+
|
55 |
+
# If this curl calculation is extended to 3D, the y-axis value must be negated.
|
56 |
+
# This corresponds to the coefficients of the levi-civita symbol in that dimension.
|
57 |
+
# Higher dimensions do not have a vector -> scalar, or vector -> vector,
|
58 |
+
# correspondence between velocity and curl due to differing isomorphisms
|
59 |
+
# between exterior powers in dimensions != 2 or 3 respectively.
|
60 |
+
curl_mask = np.triu(np.ones(jacobian_shape, dtype=bool), k=1)
|
61 |
+
curl = (jacobian[curl_mask] - jacobian[curl_mask.T]).squeeze()
|
62 |
+
|
63 |
+
# Apply the pressure correction to the fluid's velocity field.
|
64 |
+
pressure = self.pressure_solver(divergence.flatten()).reshape(self.shape)
|
65 |
+
self.velocity -= np.gradient(pressure)
|
66 |
+
|
67 |
+
return divergence, curl, pressure
|
custom_nodes/ComfyUI-KJNodes-main/utility/magictex.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Generates psychedelic color textures in the spirit of Blender's magic texture shader using Python/Numpy
|
2 |
+
|
3 |
+
https://github.com/cheind/magic-texture
|
4 |
+
"""
|
5 |
+
from typing import Tuple, Optional
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
|
9 |
+
def coordinate_grid(shape: Tuple[int, int], dtype=np.float32):
|
10 |
+
"""Returns a three-dimensional coordinate grid of given shape for use in `magic`."""
|
11 |
+
x = np.linspace(-1, 1, shape[1], endpoint=True, dtype=dtype)
|
12 |
+
y = np.linspace(-1, 1, shape[0], endpoint=True, dtype=dtype)
|
13 |
+
X, Y = np.meshgrid(x, y)
|
14 |
+
XYZ = np.stack((X, Y, np.ones_like(X)), -1)
|
15 |
+
return XYZ
|
16 |
+
|
17 |
+
|
18 |
+
def random_transform(coords: np.ndarray, rng: np.random.Generator = None):
|
19 |
+
"""Returns randomly transformed coordinates"""
|
20 |
+
H, W = coords.shape[:2]
|
21 |
+
rng = rng or np.random.default_rng()
|
22 |
+
m = rng.uniform(-1.0, 1.0, size=(3, 3)).astype(coords.dtype)
|
23 |
+
return (coords.reshape(-1, 3) @ m.T).reshape(H, W, 3)
|
24 |
+
|
25 |
+
|
26 |
+
def magic(
|
27 |
+
coords: np.ndarray,
|
28 |
+
depth: Optional[int] = None,
|
29 |
+
distortion: Optional[int] = None,
|
30 |
+
rng: np.random.Generator = None,
|
31 |
+
):
|
32 |
+
"""Returns color magic color texture.
|
33 |
+
|
34 |
+
The implementation is based on Blender's (https://www.blender.org/) magic
|
35 |
+
texture shader. The following adaptions have been made:
|
36 |
+
- we exchange the nested if-cascade by a probabilistic iterative approach
|
37 |
+
|
38 |
+
Kwargs
|
39 |
+
------
|
40 |
+
coords: HxWx3 array
|
41 |
+
Coordinates transformed into colors by this method. See
|
42 |
+
`magictex.coordinate_grid` to generate the default.
|
43 |
+
depth: int (optional)
|
44 |
+
Number of transformations applied. Higher numbers lead to more
|
45 |
+
nested patterns. If not specified, randomly sampled.
|
46 |
+
distortion: float (optional)
|
47 |
+
Distortion of patterns. Larger values indicate more distortion,
|
48 |
+
lower values tend to generate smoother patterns. If not specified,
|
49 |
+
randomly sampled.
|
50 |
+
rng: np.random.Generator
|
51 |
+
Optional random generator to draw samples from.
|
52 |
+
|
53 |
+
Returns
|
54 |
+
-------
|
55 |
+
colors: HxWx3 array
|
56 |
+
Three channel color image in range [0,1]
|
57 |
+
"""
|
58 |
+
rng = rng or np.random.default_rng()
|
59 |
+
if distortion is None:
|
60 |
+
distortion = rng.uniform(1, 4)
|
61 |
+
if depth is None:
|
62 |
+
depth = rng.integers(1, 5)
|
63 |
+
|
64 |
+
H, W = coords.shape[:2]
|
65 |
+
XYZ = coords
|
66 |
+
x = np.sin((XYZ[..., 0] + XYZ[..., 1] + XYZ[..., 2]) * distortion)
|
67 |
+
y = np.cos((-XYZ[..., 0] + XYZ[..., 1] - XYZ[..., 2]) * distortion)
|
68 |
+
z = -np.cos((-XYZ[..., 0] - XYZ[..., 1] + XYZ[..., 2]) * distortion)
|
69 |
+
|
70 |
+
if depth > 0:
|
71 |
+
x *= distortion
|
72 |
+
y *= distortion
|
73 |
+
z *= distortion
|
74 |
+
y = -np.cos(x - y + z)
|
75 |
+
y *= distortion
|
76 |
+
|
77 |
+
xyz = [x, y, z]
|
78 |
+
fns = [np.cos, np.sin]
|
79 |
+
for _ in range(1, depth):
|
80 |
+
axis = rng.choice(3)
|
81 |
+
fn = fns[rng.choice(2)]
|
82 |
+
signs = rng.binomial(n=1, p=0.5, size=4) * 2 - 1
|
83 |
+
|
84 |
+
xyz[axis] = signs[-1] * fn(
|
85 |
+
signs[0] * xyz[0] + signs[1] * xyz[1] + signs[2] * xyz[2]
|
86 |
+
)
|
87 |
+
xyz[axis] *= distortion
|
88 |
+
|
89 |
+
x, y, z = xyz
|
90 |
+
x /= 2 * distortion
|
91 |
+
y /= 2 * distortion
|
92 |
+
z /= 2 * distortion
|
93 |
+
c = 0.5 - np.stack((x, y, z), -1)
|
94 |
+
np.clip(c, 0, 1.0)
|
95 |
+
return c
|
custom_nodes/ComfyUI-KJNodes-main/utility/numerical.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import reduce
|
2 |
+
from itertools import cycle
|
3 |
+
from math import factorial
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import scipy.sparse as sp
|
7 |
+
|
8 |
+
|
9 |
+
def difference(derivative, accuracy=1):
|
10 |
+
# Central differences implemented based on the article here:
|
11 |
+
# http://web.media.mit.edu/~crtaylor/calculator.html
|
12 |
+
derivative += 1
|
13 |
+
radius = accuracy + derivative // 2 - 1
|
14 |
+
points = range(-radius, radius + 1)
|
15 |
+
coefficients = np.linalg.inv(np.vander(points))
|
16 |
+
return coefficients[-derivative] * factorial(derivative - 1), points
|
17 |
+
|
18 |
+
|
19 |
+
def operator(shape, *differences):
|
20 |
+
# Credit to Philip Zucker for figuring out
|
21 |
+
# that kronsum's argument order is reversed.
|
22 |
+
# Without that bit of wisdom I'd have lost it.
|
23 |
+
differences = zip(shape, cycle(differences))
|
24 |
+
factors = (sp.diags(*diff, shape=(dim,) * 2) for dim, diff in differences)
|
25 |
+
return reduce(lambda a, f: sp.kronsum(f, a, format='csc'), factors)
|
custom_nodes/ComfyUI-KJNodes-main/utility/utility.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
from typing import Union, List
|
5 |
+
|
6 |
+
# Utility functions from mtb nodes: https://github.com/melMass/comfy_mtb
|
7 |
+
def pil2tensor(image: Union[Image.Image, List[Image.Image]]) -> torch.Tensor:
|
8 |
+
if isinstance(image, list):
|
9 |
+
return torch.cat([pil2tensor(img) for img in image], dim=0)
|
10 |
+
|
11 |
+
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
12 |
+
|
13 |
+
|
14 |
+
def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor:
|
15 |
+
if isinstance(img_np, list):
|
16 |
+
return torch.cat([np2tensor(img) for img in img_np], dim=0)
|
17 |
+
|
18 |
+
return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0)
|
19 |
+
|
20 |
+
|
21 |
+
def tensor2np(tensor: torch.Tensor):
|
22 |
+
if len(tensor.shape) == 3: # Single image
|
23 |
+
return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8)
|
24 |
+
else: # Batch of images
|
25 |
+
return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor]
|
26 |
+
|
27 |
+
def tensor2pil(image: torch.Tensor) -> List[Image.Image]:
|
28 |
+
batch_count = image.size(0) if len(image.shape) > 3 else 1
|
29 |
+
if batch_count > 1:
|
30 |
+
out = []
|
31 |
+
for i in range(batch_count):
|
32 |
+
out.extend(tensor2pil(image[i]))
|
33 |
+
return out
|
34 |
+
|
35 |
+
return [
|
36 |
+
Image.fromarray(
|
37 |
+
np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)
|
38 |
+
)
|
39 |
+
]
|
custom_nodes/ComfyUI-KJNodes-main/web/green.png
ADDED
![]() |
custom_nodes/ComfyUI-KJNodes-main/web/js/appearance.js
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
|
3 |
+
app.registerExtension({
|
4 |
+
name: "KJNodes.appearance",
|
5 |
+
nodeCreated(node) {
|
6 |
+
switch (node.comfyClass) {
|
7 |
+
case "INTConstant":
|
8 |
+
node.setSize([200, 58]);
|
9 |
+
node.color = "#1b4669";
|
10 |
+
node.bgcolor = "#29699c";
|
11 |
+
break;
|
12 |
+
case "FloatConstant":
|
13 |
+
node.setSize([200, 58]);
|
14 |
+
node.color = LGraphCanvas.node_colors.green.color;
|
15 |
+
node.bgcolor = LGraphCanvas.node_colors.green.bgcolor;
|
16 |
+
break;
|
17 |
+
case "ConditioningMultiCombine":
|
18 |
+
node.color = LGraphCanvas.node_colors.brown.color;
|
19 |
+
node.bgcolor = LGraphCanvas.node_colors.brown.bgcolor;
|
20 |
+
break;
|
21 |
+
}
|
22 |
+
}
|
23 |
+
});
|
custom_nodes/ComfyUI-KJNodes-main/web/js/browserstatus.js
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { api } from "../../../scripts/api.js";
|
2 |
+
import { app } from "../../../scripts/app.js";
|
3 |
+
|
4 |
+
app.registerExtension({
|
5 |
+
name: "KJNodes.browserstatus",
|
6 |
+
setup() {
|
7 |
+
if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) {
|
8 |
+
return;
|
9 |
+
}
|
10 |
+
api.addEventListener("status", ({ detail }) => {
|
11 |
+
let title = "ComfyUI";
|
12 |
+
let favicon = "green";
|
13 |
+
let queueRemaining = detail && detail.exec_info.queue_remaining;
|
14 |
+
|
15 |
+
if (queueRemaining) {
|
16 |
+
favicon = "red";
|
17 |
+
title = `00% - ${queueRemaining} | ${title}`;
|
18 |
+
}
|
19 |
+
let link = document.querySelector("link[rel~='icon']");
|
20 |
+
if (!link) {
|
21 |
+
link = document.createElement("link");
|
22 |
+
link.rel = "icon";
|
23 |
+
document.head.appendChild(link);
|
24 |
+
}
|
25 |
+
link.href = new URL(`../${favicon}.png`, import.meta.url);
|
26 |
+
document.title = title;
|
27 |
+
});
|
28 |
+
//add progress to the title
|
29 |
+
api.addEventListener("progress", ({ detail }) => {
|
30 |
+
const { value, max } = detail;
|
31 |
+
const progress = Math.floor((value / max) * 100);
|
32 |
+
let title = document.title;
|
33 |
+
|
34 |
+
if (!isNaN(progress) && progress >= 0 && progress <= 100) {
|
35 |
+
const paddedProgress = String(progress).padStart(2, '0');
|
36 |
+
title = `${paddedProgress}% ${title.replace(/^\d+%\s/, '')}`;
|
37 |
+
}
|
38 |
+
document.title = title;
|
39 |
+
});
|
40 |
+
},
|
41 |
+
init() {
|
42 |
+
if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) {
|
43 |
+
return;
|
44 |
+
}
|
45 |
+
const pythongossFeed = app.extensions.find(
|
46 |
+
(e) => e.name === 'pysssss.FaviconStatus',
|
47 |
+
)
|
48 |
+
if (pythongossFeed) {
|
49 |
+
console.warn("KJNodes - Overriding pysssss.FaviconStatus")
|
50 |
+
pythongossFeed.setup = function() {
|
51 |
+
console.warn("Disabled by KJNodes")
|
52 |
+
};
|
53 |
+
}
|
54 |
+
},
|
55 |
+
});
|
custom_nodes/ComfyUI-KJNodes-main/web/js/contextmenu.js
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
|
3 |
+
// Adds context menu entries, code partly from pyssssscustom-scripts
|
4 |
+
|
5 |
+
function addMenuHandler(nodeType, cb) {
|
6 |
+
const getOpts = nodeType.prototype.getExtraMenuOptions;
|
7 |
+
nodeType.prototype.getExtraMenuOptions = function () {
|
8 |
+
const r = getOpts.apply(this, arguments);
|
9 |
+
cb.apply(this, arguments);
|
10 |
+
return r;
|
11 |
+
};
|
12 |
+
}
|
13 |
+
|
14 |
+
function addNode(name, nextTo, options) {
|
15 |
+
console.log("name:", name);
|
16 |
+
console.log("nextTo:", nextTo);
|
17 |
+
options = { side: "left", select: true, shiftY: 0, shiftX: 0, ...(options || {}) };
|
18 |
+
const node = LiteGraph.createNode(name);
|
19 |
+
app.graph.add(node);
|
20 |
+
|
21 |
+
node.pos = [
|
22 |
+
options.side === "left" ? nextTo.pos[0] - (node.size[0] + options.offset): nextTo.pos[0] + nextTo.size[0] + options.offset,
|
23 |
+
|
24 |
+
nextTo.pos[1] + options.shiftY,
|
25 |
+
];
|
26 |
+
if (options.select) {
|
27 |
+
app.canvas.selectNode(node, false);
|
28 |
+
}
|
29 |
+
return node;
|
30 |
+
}
|
31 |
+
|
32 |
+
app.registerExtension({
|
33 |
+
name: "KJNodesContextmenu",
|
34 |
+
async beforeRegisterNodeDef(nodeType, nodeData, app) {
|
35 |
+
if (nodeData.input && nodeData.input.required) {
|
36 |
+
addMenuHandler(nodeType, function (_, options) {
|
37 |
+
options.unshift(
|
38 |
+
{
|
39 |
+
content: "Add GetNode",
|
40 |
+
callback: () => {addNode("GetNode", this, { side:"left", offset: 30});}
|
41 |
+
},
|
42 |
+
{
|
43 |
+
content: "Add SetNode",
|
44 |
+
callback: () => {addNode("SetNode", this, { side:"right", offset: 30 });
|
45 |
+
},
|
46 |
+
});
|
47 |
+
});
|
48 |
+
}
|
49 |
+
},
|
50 |
+
async setup(app) {
|
51 |
+
const updateSlots = (value) => {
|
52 |
+
const valuesToAddToIn = ["GetNode"];
|
53 |
+
const valuesToAddToOut = ["SetNode"];
|
54 |
+
// Remove entries if they exist
|
55 |
+
for (const arr of Object.values(LiteGraph.slot_types_default_in)) {
|
56 |
+
for (const valueToAdd of valuesToAddToIn) {
|
57 |
+
const idx = arr.indexOf(valueToAdd);
|
58 |
+
if (idx !== -1) {
|
59 |
+
arr.splice(idx, 1);
|
60 |
+
}
|
61 |
+
}
|
62 |
+
}
|
63 |
+
|
64 |
+
for (const arr of Object.values(LiteGraph.slot_types_default_out)) {
|
65 |
+
for (const valueToAdd of valuesToAddToOut) {
|
66 |
+
const idx = arr.indexOf(valueToAdd);
|
67 |
+
if (idx !== -1) {
|
68 |
+
arr.splice(idx, 1);
|
69 |
+
}
|
70 |
+
}
|
71 |
+
}
|
72 |
+
if (value!="disabled") {
|
73 |
+
for (const arr of Object.values(LiteGraph.slot_types_default_in)) {
|
74 |
+
for (const valueToAdd of valuesToAddToIn) {
|
75 |
+
const idx = arr.indexOf(valueToAdd);
|
76 |
+
if (idx !== -1) {
|
77 |
+
arr.splice(idx, 1);
|
78 |
+
}
|
79 |
+
if (value === "top") {
|
80 |
+
arr.unshift(valueToAdd);
|
81 |
+
} else {
|
82 |
+
arr.push(valueToAdd);
|
83 |
+
}
|
84 |
+
}
|
85 |
+
}
|
86 |
+
|
87 |
+
for (const arr of Object.values(LiteGraph.slot_types_default_out)) {
|
88 |
+
for (const valueToAdd of valuesToAddToOut) {
|
89 |
+
const idx = arr.indexOf(valueToAdd);
|
90 |
+
if (idx !== -1) {
|
91 |
+
arr.splice(idx, 1);
|
92 |
+
}
|
93 |
+
if (value === "top") {
|
94 |
+
arr.unshift(valueToAdd);
|
95 |
+
} else {
|
96 |
+
arr.push(valueToAdd);
|
97 |
+
}
|
98 |
+
}
|
99 |
+
}
|
100 |
+
}
|
101 |
+
};
|
102 |
+
|
103 |
+
app.ui.settings.addSetting({
|
104 |
+
id: "KJNodes.SetGetMenu",
|
105 |
+
name: "KJNodes: Make Set/Get -nodes defaults",
|
106 |
+
tooltip: 'Adds Set/Get nodes to the top or bottom of the list of available node suggestions.',
|
107 |
+
options: ['disabled', 'top', 'bottom'],
|
108 |
+
defaultValue: 'disabled',
|
109 |
+
type: "combo",
|
110 |
+
onChange: updateSlots,
|
111 |
+
|
112 |
+
});
|
113 |
+
app.ui.settings.addSetting({
|
114 |
+
id: "KJNodes.MiddleClickDefault",
|
115 |
+
name: "KJNodes: Middle click default node adding",
|
116 |
+
defaultValue: false,
|
117 |
+
type: "boolean",
|
118 |
+
onChange: (value) => {
|
119 |
+
LiteGraph.middle_click_slot_add_default_node = value;
|
120 |
+
},
|
121 |
+
});
|
122 |
+
app.ui.settings.addSetting({
|
123 |
+
id: "KJNodes.nodeAutoColor",
|
124 |
+
name: "KJNodes: Automatically set node colors",
|
125 |
+
type: "boolean",
|
126 |
+
defaultValue: true,
|
127 |
+
});
|
128 |
+
app.ui.settings.addSetting({
|
129 |
+
id: "KJNodes.helpPopup",
|
130 |
+
name: "KJNodes: Help popups",
|
131 |
+
defaultValue: true,
|
132 |
+
type: "boolean",
|
133 |
+
});
|
134 |
+
app.ui.settings.addSetting({
|
135 |
+
id: "KJNodes.disablePrefix",
|
136 |
+
name: "KJNodes: Disable automatic Set_ and Get_ prefix",
|
137 |
+
defaultValue: true,
|
138 |
+
type: "boolean",
|
139 |
+
});
|
140 |
+
app.ui.settings.addSetting({
|
141 |
+
id: "KJNodes.browserStatus",
|
142 |
+
name: "KJNodes: 🟢 Stoplight browser status icon 🔴",
|
143 |
+
defaultValue: false,
|
144 |
+
type: "boolean",
|
145 |
+
});
|
146 |
+
}
|
147 |
+
});
|
custom_nodes/ComfyUI-KJNodes-main/web/js/fast_preview.js
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from '../../../scripts/app.js'
|
2 |
+
|
3 |
+
//from melmass
|
4 |
+
export function makeUUID() {
|
5 |
+
let dt = new Date().getTime()
|
6 |
+
const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
|
7 |
+
const r = ((dt + Math.random() * 16) % 16) | 0
|
8 |
+
dt = Math.floor(dt / 16)
|
9 |
+
return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
|
10 |
+
})
|
11 |
+
return uuid
|
12 |
+
}
|
13 |
+
|
14 |
+
function chainCallback(object, property, callback) {
|
15 |
+
if (object == undefined) {
|
16 |
+
//This should not happen.
|
17 |
+
console.error("Tried to add callback to non-existant object")
|
18 |
+
return;
|
19 |
+
}
|
20 |
+
if (property in object) {
|
21 |
+
const callback_orig = object[property]
|
22 |
+
object[property] = function () {
|
23 |
+
const r = callback_orig.apply(this, arguments);
|
24 |
+
callback.apply(this, arguments);
|
25 |
+
return r
|
26 |
+
};
|
27 |
+
} else {
|
28 |
+
object[property] = callback;
|
29 |
+
}
|
30 |
+
}
|
31 |
+
app.registerExtension({
|
32 |
+
name: 'KJNodes.FastPreview',
|
33 |
+
|
34 |
+
async beforeRegisterNodeDef(nodeType, nodeData) {
|
35 |
+
if (nodeData?.name === 'FastPreview') {
|
36 |
+
chainCallback(nodeType.prototype, "onNodeCreated", function () {
|
37 |
+
|
38 |
+
var element = document.createElement("div");
|
39 |
+
this.uuid = makeUUID()
|
40 |
+
element.id = `fast-preview-${this.uuid}`
|
41 |
+
|
42 |
+
this.previewWidget = this.addDOMWidget(nodeData.name, "FastPreviewWidget", element, {
|
43 |
+
serialize: false,
|
44 |
+
hideOnZoom: false,
|
45 |
+
});
|
46 |
+
|
47 |
+
this.previewer = new Previewer(this);
|
48 |
+
|
49 |
+
this.setSize([550, 550]);
|
50 |
+
this.resizable = false;
|
51 |
+
this.previewWidget.parentEl = document.createElement("div");
|
52 |
+
this.previewWidget.parentEl.className = "fast-preview";
|
53 |
+
this.previewWidget.parentEl.id = `fast-preview-${this.uuid}`
|
54 |
+
element.appendChild(this.previewWidget.parentEl);
|
55 |
+
|
56 |
+
chainCallback(this, "onExecuted", function (message) {
|
57 |
+
let bg_image = message["bg_image"];
|
58 |
+
this.properties.imgData = {
|
59 |
+
name: "bg_image",
|
60 |
+
base64: bg_image
|
61 |
+
};
|
62 |
+
this.previewer.refreshBackgroundImage(this);
|
63 |
+
});
|
64 |
+
|
65 |
+
|
66 |
+
}); // onAfterGraphConfigured
|
67 |
+
}//node created
|
68 |
+
} //before register
|
69 |
+
})//register
|
70 |
+
|
71 |
+
class Previewer {
|
72 |
+
constructor(context) {
|
73 |
+
this.node = context;
|
74 |
+
this.previousWidth = null;
|
75 |
+
this.previousHeight = null;
|
76 |
+
}
|
77 |
+
refreshBackgroundImage = () => {
|
78 |
+
const imgData = this.node?.properties?.imgData;
|
79 |
+
if (imgData?.base64) {
|
80 |
+
const base64String = imgData.base64;
|
81 |
+
const imageUrl = `data:${imgData.type};base64,${base64String}`;
|
82 |
+
const img = new Image();
|
83 |
+
img.src = imageUrl;
|
84 |
+
img.onload = () => {
|
85 |
+
const { width, height } = img;
|
86 |
+
if (width !== this.previousWidth || height !== this.previousHeight) {
|
87 |
+
this.node.setSize([width, height]);
|
88 |
+
this.previousWidth = width;
|
89 |
+
this.previousHeight = height;
|
90 |
+
}
|
91 |
+
this.node.previewWidget.element.style.backgroundImage = `url(${imageUrl})`;
|
92 |
+
};
|
93 |
+
}
|
94 |
+
};
|
95 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/web/js/help_popup.js
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
|
3 |
+
// code based on mtb nodes by Mel Massadian https://github.com/melMass/comfy_mtb/
|
4 |
+
export const loadScript = (
|
5 |
+
FILE_URL,
|
6 |
+
async = true,
|
7 |
+
type = 'text/javascript',
|
8 |
+
) => {
|
9 |
+
return new Promise((resolve, reject) => {
|
10 |
+
try {
|
11 |
+
// Check if the script already exists
|
12 |
+
const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
|
13 |
+
if (existingScript) {
|
14 |
+
resolve({ status: true, message: 'Script already loaded' })
|
15 |
+
return
|
16 |
+
}
|
17 |
+
|
18 |
+
const scriptEle = document.createElement('script')
|
19 |
+
scriptEle.type = type
|
20 |
+
scriptEle.async = async
|
21 |
+
scriptEle.src = FILE_URL
|
22 |
+
|
23 |
+
scriptEle.addEventListener('load', (ev) => {
|
24 |
+
resolve({ status: true })
|
25 |
+
})
|
26 |
+
|
27 |
+
scriptEle.addEventListener('error', (ev) => {
|
28 |
+
reject({
|
29 |
+
status: false,
|
30 |
+
message: `Failed to load the script ${FILE_URL}`,
|
31 |
+
})
|
32 |
+
})
|
33 |
+
|
34 |
+
document.body.appendChild(scriptEle)
|
35 |
+
} catch (error) {
|
36 |
+
reject(error)
|
37 |
+
}
|
38 |
+
})
|
39 |
+
}
|
40 |
+
|
41 |
+
loadScript('/kjweb_async/marked.min.js').catch((e) => {
|
42 |
+
console.log(e)
|
43 |
+
})
|
44 |
+
loadScript('/kjweb_async/purify.min.js').catch((e) => {
|
45 |
+
console.log(e)
|
46 |
+
})
|
47 |
+
|
48 |
+
const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold", "IC-Light", "WanVideoWrapper"];
|
49 |
+
app.registerExtension({
|
50 |
+
name: "KJNodes.HelpPopup",
|
51 |
+
async beforeRegisterNodeDef(nodeType, nodeData) {
|
52 |
+
|
53 |
+
if (app.ui.settings.getSettingValue("KJNodes.helpPopup") === false) {
|
54 |
+
return;
|
55 |
+
}
|
56 |
+
try {
|
57 |
+
categories.forEach(category => {
|
58 |
+
if (nodeData?.category?.startsWith(category)) {
|
59 |
+
addDocumentation(nodeData, nodeType);
|
60 |
+
}
|
61 |
+
else return
|
62 |
+
});
|
63 |
+
} catch (error) {
|
64 |
+
console.error("Error in registering KJNodes.HelpPopup", error);
|
65 |
+
}
|
66 |
+
},
|
67 |
+
});
|
68 |
+
|
69 |
+
const create_documentation_stylesheet = () => {
|
70 |
+
const tag = 'kj-documentation-stylesheet'
|
71 |
+
|
72 |
+
let styleTag = document.head.querySelector(tag)
|
73 |
+
|
74 |
+
if (!styleTag) {
|
75 |
+
styleTag = document.createElement('style')
|
76 |
+
styleTag.type = 'text/css'
|
77 |
+
styleTag.id = tag
|
78 |
+
styleTag.innerHTML = `
|
79 |
+
.kj-documentation-popup {
|
80 |
+
background: var(--comfy-menu-bg);
|
81 |
+
position: absolute;
|
82 |
+
color: var(--fg-color);
|
83 |
+
font: 12px monospace;
|
84 |
+
line-height: 1.5em;
|
85 |
+
padding: 10px;
|
86 |
+
border-radius: 10px;
|
87 |
+
border-style: solid;
|
88 |
+
border-width: medium;
|
89 |
+
border-color: var(--border-color);
|
90 |
+
z-index: 5;
|
91 |
+
overflow: hidden;
|
92 |
+
}
|
93 |
+
.content-wrapper {
|
94 |
+
overflow: auto;
|
95 |
+
max-height: 100%;
|
96 |
+
/* Scrollbar styling for Chrome */
|
97 |
+
&::-webkit-scrollbar {
|
98 |
+
width: 6px;
|
99 |
+
}
|
100 |
+
&::-webkit-scrollbar-track {
|
101 |
+
background: var(--bg-color);
|
102 |
+
}
|
103 |
+
&::-webkit-scrollbar-thumb {
|
104 |
+
background-color: var(--fg-color);
|
105 |
+
border-radius: 6px;
|
106 |
+
border: 3px solid var(--bg-color);
|
107 |
+
}
|
108 |
+
|
109 |
+
/* Scrollbar styling for Firefox */
|
110 |
+
scrollbar-width: thin;
|
111 |
+
scrollbar-color: var(--fg-color) var(--bg-color);
|
112 |
+
a {
|
113 |
+
color: yellow;
|
114 |
+
}
|
115 |
+
a:visited {
|
116 |
+
color: orange;
|
117 |
+
}
|
118 |
+
a:hover {
|
119 |
+
color: red;
|
120 |
+
}
|
121 |
+
}
|
122 |
+
`
|
123 |
+
document.head.appendChild(styleTag)
|
124 |
+
}
|
125 |
+
}
|
126 |
+
|
127 |
+
/** Add documentation widget to the selected node */
|
128 |
+
export const addDocumentation = (
|
129 |
+
nodeData,
|
130 |
+
nodeType,
|
131 |
+
opts = { icon_size: 14, icon_margin: 4 },) => {
|
132 |
+
|
133 |
+
opts = opts || {}
|
134 |
+
const iconSize = opts.icon_size ? opts.icon_size : 14
|
135 |
+
const iconMargin = opts.icon_margin ? opts.icon_margin : 4
|
136 |
+
let docElement = null
|
137 |
+
let contentWrapper = null
|
138 |
+
//if no description in the node python code, don't do anything
|
139 |
+
if (!nodeData.description) {
|
140 |
+
return
|
141 |
+
}
|
142 |
+
|
143 |
+
const drawFg = nodeType.prototype.onDrawForeground
|
144 |
+
nodeType.prototype.onDrawForeground = function (ctx) {
|
145 |
+
const r = drawFg ? drawFg.apply(this, arguments) : undefined
|
146 |
+
if (this.flags.collapsed) return r
|
147 |
+
|
148 |
+
// icon position
|
149 |
+
const x = this.size[0] - iconSize - iconMargin
|
150 |
+
|
151 |
+
// create the popup
|
152 |
+
if (this.show_doc && docElement === null) {
|
153 |
+
docElement = document.createElement('div')
|
154 |
+
contentWrapper = document.createElement('div');
|
155 |
+
docElement.appendChild(contentWrapper);
|
156 |
+
|
157 |
+
create_documentation_stylesheet()
|
158 |
+
contentWrapper.classList.add('content-wrapper');
|
159 |
+
docElement.classList.add('kj-documentation-popup')
|
160 |
+
|
161 |
+
//parse the string from the python node code to html with marked, and sanitize the html with DOMPurify
|
162 |
+
contentWrapper.innerHTML = DOMPurify.sanitize(marked.parse(nodeData.description,))
|
163 |
+
|
164 |
+
// resize handle
|
165 |
+
const resizeHandle = document.createElement('div');
|
166 |
+
resizeHandle.style.width = '0';
|
167 |
+
resizeHandle.style.height = '0';
|
168 |
+
resizeHandle.style.position = 'absolute';
|
169 |
+
resizeHandle.style.bottom = '0';
|
170 |
+
resizeHandle.style.right = '0';
|
171 |
+
resizeHandle.style.cursor = 'se-resize';
|
172 |
+
|
173 |
+
// Add pseudo-elements to create a triangle shape
|
174 |
+
const borderColor = getComputedStyle(document.documentElement).getPropertyValue('--border-color').trim();
|
175 |
+
resizeHandle.style.borderTop = '10px solid transparent';
|
176 |
+
resizeHandle.style.borderLeft = '10px solid transparent';
|
177 |
+
resizeHandle.style.borderBottom = `10px solid ${borderColor}`;
|
178 |
+
resizeHandle.style.borderRight = `10px solid ${borderColor}`;
|
179 |
+
|
180 |
+
docElement.appendChild(resizeHandle)
|
181 |
+
let isResizing = false
|
182 |
+
let startX, startY, startWidth, startHeight
|
183 |
+
|
184 |
+
resizeHandle.addEventListener('mousedown', function (e) {
|
185 |
+
e.preventDefault();
|
186 |
+
e.stopPropagation();
|
187 |
+
isResizing = true;
|
188 |
+
startX = e.clientX;
|
189 |
+
startY = e.clientY;
|
190 |
+
startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10);
|
191 |
+
startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10);
|
192 |
+
},
|
193 |
+
{ signal: this.docCtrl.signal },
|
194 |
+
);
|
195 |
+
|
196 |
+
// close button
|
197 |
+
const closeButton = document.createElement('div');
|
198 |
+
closeButton.textContent = '❌';
|
199 |
+
closeButton.style.position = 'absolute';
|
200 |
+
closeButton.style.top = '0';
|
201 |
+
closeButton.style.right = '0';
|
202 |
+
closeButton.style.cursor = 'pointer';
|
203 |
+
closeButton.style.padding = '5px';
|
204 |
+
closeButton.style.color = 'red';
|
205 |
+
closeButton.style.fontSize = '12px';
|
206 |
+
|
207 |
+
docElement.appendChild(closeButton)
|
208 |
+
|
209 |
+
closeButton.addEventListener('mousedown', (e) => {
|
210 |
+
e.stopPropagation();
|
211 |
+
this.show_doc = !this.show_doc
|
212 |
+
docElement.parentNode.removeChild(docElement)
|
213 |
+
docElement = null
|
214 |
+
if (contentWrapper) {
|
215 |
+
contentWrapper.remove()
|
216 |
+
contentWrapper = null
|
217 |
+
}
|
218 |
+
},
|
219 |
+
{ signal: this.docCtrl.signal },
|
220 |
+
);
|
221 |
+
|
222 |
+
document.addEventListener('mousemove', function (e) {
|
223 |
+
if (!isResizing) return;
|
224 |
+
const scale = app.canvas.ds.scale;
|
225 |
+
const newWidth = startWidth + (e.clientX - startX) / scale;
|
226 |
+
const newHeight = startHeight + (e.clientY - startY) / scale;;
|
227 |
+
docElement.style.width = `${newWidth}px`;
|
228 |
+
docElement.style.height = `${newHeight}px`;
|
229 |
+
},
|
230 |
+
{ signal: this.docCtrl.signal },
|
231 |
+
);
|
232 |
+
|
233 |
+
document.addEventListener('mouseup', function () {
|
234 |
+
isResizing = false
|
235 |
+
},
|
236 |
+
{ signal: this.docCtrl.signal },
|
237 |
+
)
|
238 |
+
|
239 |
+
document.body.appendChild(docElement)
|
240 |
+
}
|
241 |
+
// close the popup
|
242 |
+
else if (!this.show_doc && docElement !== null) {
|
243 |
+
docElement.parentNode.removeChild(docElement)
|
244 |
+
docElement = null
|
245 |
+
}
|
246 |
+
// update position of the popup
|
247 |
+
if (this.show_doc && docElement !== null) {
|
248 |
+
const rect = ctx.canvas.getBoundingClientRect()
|
249 |
+
const scaleX = rect.width / ctx.canvas.width
|
250 |
+
const scaleY = rect.height / ctx.canvas.height
|
251 |
+
|
252 |
+
const transform = new DOMMatrix()
|
253 |
+
.scaleSelf(scaleX, scaleY)
|
254 |
+
.multiplySelf(ctx.getTransform())
|
255 |
+
.translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0)
|
256 |
+
.translateSelf(10, -32)
|
257 |
+
|
258 |
+
const scale = new DOMMatrix()
|
259 |
+
.scaleSelf(transform.a, transform.d);
|
260 |
+
const bcr = app.canvas.canvas.getBoundingClientRect()
|
261 |
+
|
262 |
+
const styleObject = {
|
263 |
+
transformOrigin: '0 0',
|
264 |
+
transform: scale,
|
265 |
+
left: `${transform.a + bcr.x + transform.e}px`,
|
266 |
+
top: `${transform.d + bcr.y + transform.f}px`,
|
267 |
+
};
|
268 |
+
Object.assign(docElement.style, styleObject);
|
269 |
+
}
|
270 |
+
|
271 |
+
ctx.save()
|
272 |
+
ctx.translate(x - 2, iconSize - 34)
|
273 |
+
ctx.scale(iconSize / 32, iconSize / 32)
|
274 |
+
ctx.strokeStyle = 'rgba(255,255,255,0.3)'
|
275 |
+
ctx.lineCap = 'round'
|
276 |
+
ctx.lineJoin = 'round'
|
277 |
+
ctx.lineWidth = 2.4
|
278 |
+
ctx.font = 'bold 36px monospace'
|
279 |
+
ctx.fillStyle = 'orange';
|
280 |
+
ctx.fillText('?', 0, 24)
|
281 |
+
ctx.restore()
|
282 |
+
return r
|
283 |
+
}
|
284 |
+
// handle clicking of the icon
|
285 |
+
const mouseDown = nodeType.prototype.onMouseDown
|
286 |
+
nodeType.prototype.onMouseDown = function (e, localPos, canvas) {
|
287 |
+
const r = mouseDown ? mouseDown.apply(this, arguments) : undefined
|
288 |
+
const iconX = this.size[0] - iconSize - iconMargin
|
289 |
+
const iconY = iconSize - 34
|
290 |
+
if (
|
291 |
+
localPos[0] > iconX &&
|
292 |
+
localPos[0] < iconX + iconSize &&
|
293 |
+
localPos[1] > iconY &&
|
294 |
+
localPos[1] < iconY + iconSize
|
295 |
+
) {
|
296 |
+
if (this.show_doc === undefined) {
|
297 |
+
this.show_doc = true
|
298 |
+
} else {
|
299 |
+
this.show_doc = !this.show_doc
|
300 |
+
}
|
301 |
+
if (this.show_doc) {
|
302 |
+
this.docCtrl = new AbortController()
|
303 |
+
} else {
|
304 |
+
this.docCtrl.abort()
|
305 |
+
}
|
306 |
+
return true;
|
307 |
+
}
|
308 |
+
return r;
|
309 |
+
}
|
310 |
+
const onRem = nodeType.prototype.onRemoved
|
311 |
+
|
312 |
+
nodeType.prototype.onRemoved = function () {
|
313 |
+
const r = onRem ? onRem.apply(this, []) : undefined
|
314 |
+
|
315 |
+
if (docElement) {
|
316 |
+
docElement.remove()
|
317 |
+
docElement = null
|
318 |
+
}
|
319 |
+
|
320 |
+
if (contentWrapper) {
|
321 |
+
contentWrapper.remove()
|
322 |
+
contentWrapper = null
|
323 |
+
}
|
324 |
+
return r
|
325 |
+
}
|
326 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/web/js/jsnodes.js
ADDED
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
import { applyTextReplacements } from "../../../scripts/utils.js";
|
3 |
+
|
4 |
+
app.registerExtension({
|
5 |
+
name: "KJNodes.jsnodes",
|
6 |
+
async beforeRegisterNodeDef(nodeType, nodeData, app) {
|
7 |
+
if(!nodeData?.category?.startsWith("KJNodes")) {
|
8 |
+
return;
|
9 |
+
}
|
10 |
+
switch (nodeData.name) {
|
11 |
+
case "ConditioningMultiCombine":
|
12 |
+
nodeType.prototype.onNodeCreated = function () {
|
13 |
+
this.cond_type = "CONDITIONING"
|
14 |
+
this.inputs_offset = nodeData.name.includes("selective")?1:0
|
15 |
+
this.addWidget("button", "Update inputs", null, () => {
|
16 |
+
if (!this.inputs) {
|
17 |
+
this.inputs = [];
|
18 |
+
}
|
19 |
+
const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
|
20 |
+
if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
|
21 |
+
|
22 |
+
if(target_number_of_inputs < this.inputs.length){
|
23 |
+
for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
|
24 |
+
this.removeInput(i)
|
25 |
+
}
|
26 |
+
else{
|
27 |
+
for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
|
28 |
+
this.addInput(`conditioning_${i}`, this.cond_type)
|
29 |
+
}
|
30 |
+
});
|
31 |
+
}
|
32 |
+
break;
|
33 |
+
case "ImageBatchMulti":
|
34 |
+
case "ImageAddMulti":
|
35 |
+
case "ImageConcatMulti":
|
36 |
+
case "CrossFadeImagesMulti":
|
37 |
+
case "TransitionImagesMulti":
|
38 |
+
nodeType.prototype.onNodeCreated = function () {
|
39 |
+
this._type = "IMAGE"
|
40 |
+
this.inputs_offset = nodeData.name.includes("selective")?1:0
|
41 |
+
this.addWidget("button", "Update inputs", null, () => {
|
42 |
+
if (!this.inputs) {
|
43 |
+
this.inputs = [];
|
44 |
+
}
|
45 |
+
const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
|
46 |
+
if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
|
47 |
+
|
48 |
+
if(target_number_of_inputs < this.inputs.length){
|
49 |
+
for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
|
50 |
+
this.removeInput(i)
|
51 |
+
}
|
52 |
+
else{
|
53 |
+
for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
|
54 |
+
this.addInput(`image_${i}`, this._type)
|
55 |
+
}
|
56 |
+
});
|
57 |
+
}
|
58 |
+
break;
|
59 |
+
case "MaskBatchMulti":
|
60 |
+
nodeType.prototype.onNodeCreated = function () {
|
61 |
+
this._type = "MASK"
|
62 |
+
this.inputs_offset = nodeData.name.includes("selective")?1:0
|
63 |
+
this.addWidget("button", "Update inputs", null, () => {
|
64 |
+
if (!this.inputs) {
|
65 |
+
this.inputs = [];
|
66 |
+
}
|
67 |
+
const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
|
68 |
+
if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
|
69 |
+
|
70 |
+
if(target_number_of_inputs < this.inputs.length){
|
71 |
+
for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
|
72 |
+
this.removeInput(i)
|
73 |
+
}
|
74 |
+
else{
|
75 |
+
for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
|
76 |
+
this.addInput(`mask_${i}`, this._type)
|
77 |
+
}
|
78 |
+
});
|
79 |
+
}
|
80 |
+
break;
|
81 |
+
|
82 |
+
case "FluxBlockLoraSelect":
|
83 |
+
case "HunyuanVideoBlockLoraSelect":
|
84 |
+
nodeType.prototype.onNodeCreated = function () {
|
85 |
+
this.addWidget("button", "Set all", null, () => {
|
86 |
+
const userInput = prompt("Enter the values to set for widgets (e.g., s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0):", "");
|
87 |
+
if (userInput) {
|
88 |
+
const regex = /([sd])?(\d+(?:,\d+|-?\d+)*?)?=(\d+(\.\d+)?)/;
|
89 |
+
const match = userInput.match(regex);
|
90 |
+
if (match) {
|
91 |
+
const type = match[1];
|
92 |
+
const indicesPart = match[2];
|
93 |
+
const value = parseFloat(match[3]);
|
94 |
+
|
95 |
+
let targetWidgets = [];
|
96 |
+
if (type === 's') {
|
97 |
+
targetWidgets = this.widgets.filter(widget => widget.name.includes("single"));
|
98 |
+
} else if (type === 'd') {
|
99 |
+
targetWidgets = this.widgets.filter(widget => widget.name.includes("double"));
|
100 |
+
} else {
|
101 |
+
targetWidgets = this.widgets; // No type specified, all widgets
|
102 |
+
}
|
103 |
+
|
104 |
+
if (indicesPart) {
|
105 |
+
const indices = indicesPart.split(',').flatMap(part => {
|
106 |
+
if (part.includes('-')) {
|
107 |
+
const [start, end] = part.split('-').map(Number);
|
108 |
+
return Array.from({ length: end - start + 1 }, (_, i) => start + i);
|
109 |
+
}
|
110 |
+
return Number(part);
|
111 |
+
});
|
112 |
+
|
113 |
+
for (const index of indices) {
|
114 |
+
if (index < targetWidgets.length) {
|
115 |
+
targetWidgets[index].value = value;
|
116 |
+
}
|
117 |
+
}
|
118 |
+
} else {
|
119 |
+
// No indices provided, set value for all target widgets
|
120 |
+
for (const widget of targetWidgets) {
|
121 |
+
widget.value = value;
|
122 |
+
}
|
123 |
+
}
|
124 |
+
} else if (!isNaN(parseFloat(userInput))) {
|
125 |
+
// Single value provided, set it for all widgets
|
126 |
+
const value = parseFloat(userInput);
|
127 |
+
for (const widget of this.widgets) {
|
128 |
+
widget.value = value;
|
129 |
+
}
|
130 |
+
} else {
|
131 |
+
alert("Invalid input format. Please use the format s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0");
|
132 |
+
}
|
133 |
+
} else {
|
134 |
+
alert("Invalid input. Please enter a value.");
|
135 |
+
}
|
136 |
+
});
|
137 |
+
};
|
138 |
+
break;
|
139 |
+
|
140 |
+
case "GetMaskSizeAndCount":
|
141 |
+
const onGetMaskSizeConnectInput = nodeType.prototype.onConnectInput;
|
142 |
+
nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
|
143 |
+
const v = onGetMaskSizeConnectInput? onGetMaskSizeConnectInput.apply(this, arguments): undefined
|
144 |
+
this.outputs[1]["label"] = "width"
|
145 |
+
this.outputs[2]["label"] = "height"
|
146 |
+
this.outputs[3]["label"] = "count"
|
147 |
+
return v;
|
148 |
+
}
|
149 |
+
const onGetMaskSizeExecuted = nodeType.prototype.onAfterExecuteNode;
|
150 |
+
nodeType.prototype.onExecuted = function(message) {
|
151 |
+
const r = onGetMaskSizeExecuted? onGetMaskSizeExecuted.apply(this,arguments): undefined
|
152 |
+
let values = message["text"].toString().split('x').map(Number);
|
153 |
+
this.outputs[1]["label"] = values[1] + " width"
|
154 |
+
this.outputs[2]["label"] = values[2] + " height"
|
155 |
+
this.outputs[3]["label"] = values[0] + " count"
|
156 |
+
return r
|
157 |
+
}
|
158 |
+
break;
|
159 |
+
|
160 |
+
case "GetImageSizeAndCount":
|
161 |
+
const onGetImageSizeConnectInput = nodeType.prototype.onConnectInput;
|
162 |
+
nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
|
163 |
+
console.log(this)
|
164 |
+
const v = onGetImageSizeConnectInput? onGetImageSizeConnectInput.apply(this, arguments): undefined
|
165 |
+
//console.log(this)
|
166 |
+
this.outputs[1]["label"] = "width"
|
167 |
+
this.outputs[2]["label"] = "height"
|
168 |
+
this.outputs[3]["label"] = "count"
|
169 |
+
return v;
|
170 |
+
}
|
171 |
+
//const onGetImageSizeExecuted = nodeType.prototype.onExecuted;
|
172 |
+
const onGetImageSizeExecuted = nodeType.prototype.onAfterExecuteNode;
|
173 |
+
nodeType.prototype.onExecuted = function(message) {
|
174 |
+
console.log(this)
|
175 |
+
const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined
|
176 |
+
let values = message["text"].toString().split('x').map(Number);
|
177 |
+
console.log(values)
|
178 |
+
this.outputs[1]["label"] = values[1] + " width"
|
179 |
+
this.outputs[2]["label"] = values[2] + " height"
|
180 |
+
this.outputs[3]["label"] = values[0] + " count"
|
181 |
+
return r
|
182 |
+
}
|
183 |
+
break;
|
184 |
+
|
185 |
+
case "PreviewAnimation":
|
186 |
+
const onPreviewAnimationConnectInput = nodeType.prototype.onConnectInput;
|
187 |
+
nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
|
188 |
+
const v = onPreviewAnimationConnectInput? onPreviewAnimationConnectInput.apply(this, arguments): undefined
|
189 |
+
this.title = "Preview Animation"
|
190 |
+
return v;
|
191 |
+
}
|
192 |
+
const onPreviewAnimationExecuted = nodeType.prototype.onAfterExecuteNode;
|
193 |
+
nodeType.prototype.onExecuted = function(message) {
|
194 |
+
const r = onPreviewAnimationExecuted? onPreviewAnimationExecuted.apply(this,arguments): undefined
|
195 |
+
let values = message["text"].toString();
|
196 |
+
this.title = "Preview Animation " + values
|
197 |
+
return r
|
198 |
+
}
|
199 |
+
break;
|
200 |
+
|
201 |
+
case "VRAM_Debug":
|
202 |
+
const onVRAM_DebugConnectInput = nodeType.prototype.onConnectInput;
|
203 |
+
nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
|
204 |
+
const v = onVRAM_DebugConnectInput? onVRAM_DebugConnectInput.apply(this, arguments): undefined
|
205 |
+
this.outputs[3]["label"] = "freemem_before"
|
206 |
+
this.outputs[4]["label"] = "freemem_after"
|
207 |
+
return v;
|
208 |
+
}
|
209 |
+
const onVRAM_DebugExecuted = nodeType.prototype.onAfterExecuteNode;
|
210 |
+
nodeType.prototype.onExecuted = function(message) {
|
211 |
+
const r = onVRAM_DebugExecuted? onVRAM_DebugExecuted.apply(this,arguments): undefined
|
212 |
+
let values = message["text"].toString().split('x');
|
213 |
+
this.outputs[3]["label"] = values[0] + " freemem_before"
|
214 |
+
this.outputs[4]["label"] = values[1] + " freemem_after"
|
215 |
+
return r
|
216 |
+
}
|
217 |
+
break;
|
218 |
+
|
219 |
+
case "JoinStringMulti":
|
220 |
+
const originalOnNodeCreated = nodeType.prototype.onNodeCreated || function() {};
|
221 |
+
nodeType.prototype.onNodeCreated = function () {
|
222 |
+
originalOnNodeCreated.apply(this, arguments);
|
223 |
+
|
224 |
+
this._type = "STRING";
|
225 |
+
this.inputs_offset = nodeData.name.includes("selective") ? 1 : 0;
|
226 |
+
this.addWidget("button", "Update inputs", null, () => {
|
227 |
+
if (!this.inputs) {
|
228 |
+
this.inputs = [];
|
229 |
+
}
|
230 |
+
const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
|
231 |
+
if (target_number_of_inputs === this.inputs.length) return; // already set, do nothing
|
232 |
+
|
233 |
+
if (target_number_of_inputs < this.inputs.length) {
|
234 |
+
for (let i = this.inputs.length; i >= this.inputs_offset + target_number_of_inputs; i--)
|
235 |
+
this.removeInput(i);
|
236 |
+
} else {
|
237 |
+
for (let i = this.inputs.length + 1 - this.inputs_offset; i <= target_number_of_inputs; ++i)
|
238 |
+
this.addInput(`string_${i}`, this._type);
|
239 |
+
}
|
240 |
+
});
|
241 |
+
}
|
242 |
+
break;
|
243 |
+
case "SoundReactive":
|
244 |
+
nodeType.prototype.onNodeCreated = function () {
|
245 |
+
let audioContext;
|
246 |
+
let microphoneStream;
|
247 |
+
let animationFrameId;
|
248 |
+
let analyser;
|
249 |
+
let dataArray;
|
250 |
+
let startRangeHz;
|
251 |
+
let endRangeHz;
|
252 |
+
let smoothingFactor = 0.5;
|
253 |
+
let smoothedSoundLevel = 0;
|
254 |
+
|
255 |
+
// Function to update the widget value in real-time
|
256 |
+
const updateWidgetValueInRealTime = () => {
|
257 |
+
// Ensure analyser and dataArray are defined before using them
|
258 |
+
if (analyser && dataArray) {
|
259 |
+
analyser.getByteFrequencyData(dataArray);
|
260 |
+
|
261 |
+
const startRangeHzWidget = this.widgets.find(w => w.name === "start_range_hz");
|
262 |
+
if (startRangeHzWidget) startRangeHz = startRangeHzWidget.value;
|
263 |
+
const endRangeHzWidget = this.widgets.find(w => w.name === "end_range_hz");
|
264 |
+
if (endRangeHzWidget) endRangeHz = endRangeHzWidget.value;
|
265 |
+
const smoothingFactorWidget = this.widgets.find(w => w.name === "smoothing_factor");
|
266 |
+
if (smoothingFactorWidget) smoothingFactor = smoothingFactorWidget.value;
|
267 |
+
|
268 |
+
// Calculate frequency bin width (frequency resolution)
|
269 |
+
const frequencyBinWidth = audioContext.sampleRate / analyser.fftSize;
|
270 |
+
// Convert the widget values from Hz to indices
|
271 |
+
const startRangeIndex = Math.floor(startRangeHz / frequencyBinWidth);
|
272 |
+
const endRangeIndex = Math.floor(endRangeHz / frequencyBinWidth);
|
273 |
+
|
274 |
+
// Function to calculate the average value for a frequency range
|
275 |
+
const calculateAverage = (start, end) => {
|
276 |
+
const sum = dataArray.slice(start, end).reduce((acc, val) => acc + val, 0);
|
277 |
+
const average = sum / (end - start);
|
278 |
+
|
279 |
+
// Apply exponential moving average smoothing
|
280 |
+
smoothedSoundLevel = (average * (1 - smoothingFactor)) + (smoothedSoundLevel * smoothingFactor);
|
281 |
+
return smoothedSoundLevel;
|
282 |
+
};
|
283 |
+
// Calculate the average levels for each frequency range
|
284 |
+
const soundLevel = calculateAverage(startRangeIndex, endRangeIndex);
|
285 |
+
|
286 |
+
// Update the widget values
|
287 |
+
|
288 |
+
const lowLevelWidget = this.widgets.find(w => w.name === "sound_level");
|
289 |
+
if (lowLevelWidget) lowLevelWidget.value = soundLevel;
|
290 |
+
|
291 |
+
animationFrameId = requestAnimationFrame(updateWidgetValueInRealTime);
|
292 |
+
}
|
293 |
+
};
|
294 |
+
|
295 |
+
// Function to start capturing audio from the microphone
|
296 |
+
const startMicrophoneCapture = () => {
|
297 |
+
// Only create the audio context and analyser once
|
298 |
+
if (!audioContext) {
|
299 |
+
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
300 |
+
// Access the sample rate of the audio context
|
301 |
+
console.log(`Sample rate: ${audioContext.sampleRate}Hz`);
|
302 |
+
analyser = audioContext.createAnalyser();
|
303 |
+
analyser.fftSize = 2048;
|
304 |
+
dataArray = new Uint8Array(analyser.frequencyBinCount);
|
305 |
+
// Get the range values from widgets (assumed to be in Hz)
|
306 |
+
const lowRangeWidget = this.widgets.find(w => w.name === "low_range_hz");
|
307 |
+
if (lowRangeWidget) startRangeHz = lowRangeWidget.value;
|
308 |
+
|
309 |
+
const midRangeWidget = this.widgets.find(w => w.name === "mid_range_hz");
|
310 |
+
if (midRangeWidget) endRangeHz = midRangeWidget.value;
|
311 |
+
}
|
312 |
+
|
313 |
+
navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
|
314 |
+
microphoneStream = stream;
|
315 |
+
const microphone = audioContext.createMediaStreamSource(stream);
|
316 |
+
microphone.connect(analyser);
|
317 |
+
updateWidgetValueInRealTime();
|
318 |
+
}).catch(error => {
|
319 |
+
console.error('Access to microphone was denied or an error occurred:', error);
|
320 |
+
});
|
321 |
+
};
|
322 |
+
|
323 |
+
// Function to stop capturing audio from the microphone
|
324 |
+
const stopMicrophoneCapture = () => {
|
325 |
+
if (animationFrameId) {
|
326 |
+
cancelAnimationFrame(animationFrameId);
|
327 |
+
}
|
328 |
+
if (microphoneStream) {
|
329 |
+
microphoneStream.getTracks().forEach(track => track.stop());
|
330 |
+
}
|
331 |
+
if (audioContext) {
|
332 |
+
audioContext.close();
|
333 |
+
// Reset audioContext to ensure it can be created again when starting
|
334 |
+
audioContext = null;
|
335 |
+
}
|
336 |
+
};
|
337 |
+
|
338 |
+
// Add start button
|
339 |
+
this.addWidget("button", "Start mic capture", null, startMicrophoneCapture);
|
340 |
+
|
341 |
+
// Add stop button
|
342 |
+
this.addWidget("button", "Stop mic capture", null, stopMicrophoneCapture);
|
343 |
+
};
|
344 |
+
break;
|
345 |
+
case "SaveImageKJ":
|
346 |
+
const onNodeCreated = nodeType.prototype.onNodeCreated;
|
347 |
+
nodeType.prototype.onNodeCreated = function() {
|
348 |
+
const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : void 0;
|
349 |
+
const widget = this.widgets.find((w) => w.name === "filename_prefix");
|
350 |
+
widget.serializeValue = () => {
|
351 |
+
return applyTextReplacements(app, widget.value);
|
352 |
+
};
|
353 |
+
return r;
|
354 |
+
};
|
355 |
+
break;
|
356 |
+
|
357 |
+
}
|
358 |
+
|
359 |
+
},
|
360 |
+
async setup() {
|
361 |
+
// to keep Set/Get node virtual connections visible when offscreen
|
362 |
+
const originalComputeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes;
|
363 |
+
LGraphCanvas.prototype.computeVisibleNodes = function () {
|
364 |
+
const visibleNodesSet = new Set(originalComputeVisibleNodes.apply(this, arguments));
|
365 |
+
for (const node of this.graph._nodes) {
|
366 |
+
if ((node.type === "SetNode" || node.type === "GetNode") && node.drawConnection) {
|
367 |
+
visibleNodesSet.add(node);
|
368 |
+
}
|
369 |
+
}
|
370 |
+
return Array.from(visibleNodesSet);
|
371 |
+
};
|
372 |
+
|
373 |
+
}
|
374 |
+
});
|
custom_nodes/ComfyUI-KJNodes-main/web/js/point_editor.js
ADDED
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from '../../../scripts/app.js'
|
2 |
+
|
3 |
+
//from melmass
|
4 |
+
export function makeUUID() {
|
5 |
+
let dt = new Date().getTime()
|
6 |
+
const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
|
7 |
+
const r = ((dt + Math.random() * 16) % 16) | 0
|
8 |
+
dt = Math.floor(dt / 16)
|
9 |
+
return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
|
10 |
+
})
|
11 |
+
return uuid
|
12 |
+
}
|
13 |
+
|
14 |
+
export const loadScript = (
|
15 |
+
FILE_URL,
|
16 |
+
async = true,
|
17 |
+
type = 'text/javascript',
|
18 |
+
) => {
|
19 |
+
return new Promise((resolve, reject) => {
|
20 |
+
try {
|
21 |
+
// Check if the script already exists
|
22 |
+
const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
|
23 |
+
if (existingScript) {
|
24 |
+
resolve({ status: true, message: 'Script already loaded' })
|
25 |
+
return
|
26 |
+
}
|
27 |
+
|
28 |
+
const scriptEle = document.createElement('script')
|
29 |
+
scriptEle.type = type
|
30 |
+
scriptEle.async = async
|
31 |
+
scriptEle.src = FILE_URL
|
32 |
+
|
33 |
+
scriptEle.addEventListener('load', (ev) => {
|
34 |
+
resolve({ status: true })
|
35 |
+
})
|
36 |
+
|
37 |
+
scriptEle.addEventListener('error', (ev) => {
|
38 |
+
reject({
|
39 |
+
status: false,
|
40 |
+
message: `Failed to load the script ${FILE_URL}`,
|
41 |
+
})
|
42 |
+
})
|
43 |
+
|
44 |
+
document.body.appendChild(scriptEle)
|
45 |
+
} catch (error) {
|
46 |
+
reject(error)
|
47 |
+
}
|
48 |
+
})
|
49 |
+
}
|
50 |
+
const create_documentation_stylesheet = () => {
|
51 |
+
const tag = 'kj-pointseditor-stylesheet'
|
52 |
+
|
53 |
+
let styleTag = document.head.querySelector(tag)
|
54 |
+
|
55 |
+
if (!styleTag) {
|
56 |
+
styleTag = document.createElement('style')
|
57 |
+
styleTag.type = 'text/css'
|
58 |
+
styleTag.id = tag
|
59 |
+
styleTag.innerHTML = `
|
60 |
+
.points-editor {
|
61 |
+
|
62 |
+
position: absolute;
|
63 |
+
|
64 |
+
font: 12px monospace;
|
65 |
+
line-height: 1.5em;
|
66 |
+
padding: 10px;
|
67 |
+
z-index: 0;
|
68 |
+
overflow: hidden;
|
69 |
+
}
|
70 |
+
`
|
71 |
+
document.head.appendChild(styleTag)
|
72 |
+
}
|
73 |
+
}
|
74 |
+
|
75 |
+
loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => {
|
76 |
+
console.log(e)
|
77 |
+
})
|
78 |
+
loadScript('/kjweb_async/protovis.min.js').catch((e) => {
|
79 |
+
console.log(e)
|
80 |
+
})
|
81 |
+
create_documentation_stylesheet()
|
82 |
+
|
83 |
+
function chainCallback(object, property, callback) {
|
84 |
+
if (object == undefined) {
|
85 |
+
//This should not happen.
|
86 |
+
console.error("Tried to add callback to non-existant object")
|
87 |
+
return;
|
88 |
+
}
|
89 |
+
if (property in object) {
|
90 |
+
const callback_orig = object[property]
|
91 |
+
object[property] = function () {
|
92 |
+
const r = callback_orig.apply(this, arguments);
|
93 |
+
callback.apply(this, arguments);
|
94 |
+
return r
|
95 |
+
};
|
96 |
+
} else {
|
97 |
+
object[property] = callback;
|
98 |
+
}
|
99 |
+
}
|
100 |
+
app.registerExtension({
|
101 |
+
name: 'KJNodes.PointEditor',
|
102 |
+
|
103 |
+
async beforeRegisterNodeDef(nodeType, nodeData) {
|
104 |
+
if (nodeData?.name === 'PointsEditor') {
|
105 |
+
chainCallback(nodeType.prototype, "onNodeCreated", function () {
|
106 |
+
|
107 |
+
hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates"))
|
108 |
+
hideWidgetForGood(this, this.widgets.find(w => w.name === "neg_coordinates"))
|
109 |
+
hideWidgetForGood(this, this.widgets.find(w => w.name === "bboxes"))
|
110 |
+
|
111 |
+
var element = document.createElement("div");
|
112 |
+
this.uuid = makeUUID()
|
113 |
+
element.id = `points-editor-${this.uuid}`
|
114 |
+
|
115 |
+
// fake image widget to allow copy/paste
|
116 |
+
const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {});
|
117 |
+
hideWidgetForGood(this, fakeimagewidget)
|
118 |
+
|
119 |
+
this.pointsEditor = this.addDOMWidget(nodeData.name, "PointsEditorWidget", element, {
|
120 |
+
serialize: false,
|
121 |
+
hideOnZoom: false,
|
122 |
+
});
|
123 |
+
|
124 |
+
// context menu
|
125 |
+
this.contextMenu = document.createElement("div");
|
126 |
+
this.contextMenu.id = "context-menu";
|
127 |
+
this.contextMenu.style.display = "none";
|
128 |
+
this.contextMenu.style.position = "absolute";
|
129 |
+
this.contextMenu.style.backgroundColor = "#202020";
|
130 |
+
this.contextMenu.style.minWidth = "100px";
|
131 |
+
this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)";
|
132 |
+
this.contextMenu.style.zIndex = "100";
|
133 |
+
this.contextMenu.style.padding = "5px";
|
134 |
+
|
135 |
+
function styleMenuItem(menuItem) {
|
136 |
+
menuItem.style.display = "block";
|
137 |
+
menuItem.style.padding = "5px";
|
138 |
+
menuItem.style.color = "#FFF";
|
139 |
+
menuItem.style.fontFamily = "Arial, sans-serif";
|
140 |
+
menuItem.style.fontSize = "16px";
|
141 |
+
menuItem.style.textDecoration = "none";
|
142 |
+
menuItem.style.marginBottom = "5px";
|
143 |
+
}
|
144 |
+
function createMenuItem(id, textContent) {
|
145 |
+
let menuItem = document.createElement("a");
|
146 |
+
menuItem.href = "#";
|
147 |
+
menuItem.id = `menu-item-${id}`;
|
148 |
+
menuItem.textContent = textContent;
|
149 |
+
styleMenuItem(menuItem);
|
150 |
+
return menuItem;
|
151 |
+
}
|
152 |
+
|
153 |
+
// Create an array of menu items using the createMenuItem function
|
154 |
+
this.menuItems = [
|
155 |
+
createMenuItem(0, "Load Image"),
|
156 |
+
createMenuItem(1, "Clear Image"),
|
157 |
+
];
|
158 |
+
|
159 |
+
// Add mouseover and mouseout event listeners to each menu item for styling
|
160 |
+
this.menuItems.forEach(menuItem => {
|
161 |
+
menuItem.addEventListener('mouseover', function () {
|
162 |
+
this.style.backgroundColor = "gray";
|
163 |
+
});
|
164 |
+
|
165 |
+
menuItem.addEventListener('mouseout', function () {
|
166 |
+
this.style.backgroundColor = "#202020";
|
167 |
+
});
|
168 |
+
});
|
169 |
+
|
170 |
+
// Append each menu item to the context menu
|
171 |
+
this.menuItems.forEach(menuItem => {
|
172 |
+
this.contextMenu.appendChild(menuItem);
|
173 |
+
});
|
174 |
+
|
175 |
+
document.body.appendChild(this.contextMenu);
|
176 |
+
|
177 |
+
this.addWidget("button", "New canvas", null, () => {
|
178 |
+
if (!this.properties || !("points" in this.properties)) {
|
179 |
+
this.editor = new PointsEditor(this);
|
180 |
+
this.addProperty("points", this.constructor.type, "string");
|
181 |
+
this.addProperty("neg_points", this.constructor.type, "string");
|
182 |
+
|
183 |
+
}
|
184 |
+
else {
|
185 |
+
this.editor = new PointsEditor(this, true);
|
186 |
+
}
|
187 |
+
});
|
188 |
+
|
189 |
+
this.setSize([550, 550]);
|
190 |
+
this.resizable = false;
|
191 |
+
this.pointsEditor.parentEl = document.createElement("div");
|
192 |
+
this.pointsEditor.parentEl.className = "points-editor";
|
193 |
+
this.pointsEditor.parentEl.id = `points-editor-${this.uuid}`
|
194 |
+
element.appendChild(this.pointsEditor.parentEl);
|
195 |
+
|
196 |
+
chainCallback(this, "onConfigure", function () {
|
197 |
+
try {
|
198 |
+
this.editor = new PointsEditor(this);
|
199 |
+
} catch (error) {
|
200 |
+
console.error("An error occurred while configuring the editor:", error);
|
201 |
+
}
|
202 |
+
});
|
203 |
+
chainCallback(this, "onExecuted", function (message) {
|
204 |
+
let bg_image = message["bg_image"];
|
205 |
+
this.properties.imgData = {
|
206 |
+
name: "bg_image",
|
207 |
+
base64: bg_image
|
208 |
+
};
|
209 |
+
this.editor.refreshBackgroundImage(this);
|
210 |
+
});
|
211 |
+
|
212 |
+
}); // onAfterGraphConfigured
|
213 |
+
}//node created
|
214 |
+
} //before register
|
215 |
+
})//register
|
216 |
+
|
217 |
+
class PointsEditor {
|
218 |
+
constructor(context, reset = false) {
|
219 |
+
this.node = context;
|
220 |
+
this.reset = reset;
|
221 |
+
const self = this; // Keep a reference to the main class context
|
222 |
+
|
223 |
+
console.log("creatingPointEditor")
|
224 |
+
|
225 |
+
this.node.pasteFile = (file) => {
|
226 |
+
if (file.type.startsWith("image/")) {
|
227 |
+
this.handleImageFile(file);
|
228 |
+
return true;
|
229 |
+
}
|
230 |
+
return false;
|
231 |
+
};
|
232 |
+
|
233 |
+
this.node.onDragOver = function (e) {
|
234 |
+
if (e.dataTransfer && e.dataTransfer.items) {
|
235 |
+
return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/"));
|
236 |
+
}
|
237 |
+
return false;
|
238 |
+
};
|
239 |
+
|
240 |
+
// On drop upload files
|
241 |
+
this.node.onDragDrop = (e) => {
|
242 |
+
console.log("onDragDrop called");
|
243 |
+
let handled = false;
|
244 |
+
for (const file of e.dataTransfer.files) {
|
245 |
+
if (file.type.startsWith("image/")) {
|
246 |
+
this.handleImageFile(file);
|
247 |
+
handled = true;
|
248 |
+
}
|
249 |
+
}
|
250 |
+
return handled;
|
251 |
+
};
|
252 |
+
|
253 |
+
// context menu
|
254 |
+
this.createContextMenu();
|
255 |
+
|
256 |
+
if (reset && context.pointsEditor.element) {
|
257 |
+
context.pointsEditor.element.innerHTML = ''; // Clear the container
|
258 |
+
}
|
259 |
+
this.pos_coordWidget = context.widgets.find(w => w.name === "coordinates");
|
260 |
+
this.neg_coordWidget = context.widgets.find(w => w.name === "neg_coordinates");
|
261 |
+
this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store");
|
262 |
+
this.widthWidget = context.widgets.find(w => w.name === "width");
|
263 |
+
this.heightWidget = context.widgets.find(w => w.name === "height");
|
264 |
+
this.bboxStoreWidget = context.widgets.find(w => w.name === "bbox_store");
|
265 |
+
this.bboxWidget = context.widgets.find(w => w.name === "bboxes");
|
266 |
+
|
267 |
+
//widget callbacks
|
268 |
+
this.widthWidget.callback = () => {
|
269 |
+
this.width = this.widthWidget.value;
|
270 |
+
if (this.width > 256) {
|
271 |
+
context.setSize([this.width + 45, context.size[1]]);
|
272 |
+
}
|
273 |
+
this.vis.width(this.width);
|
274 |
+
this.updateData();
|
275 |
+
}
|
276 |
+
this.heightWidget.callback = () => {
|
277 |
+
this.height = this.heightWidget.value
|
278 |
+
this.vis.height(this.height)
|
279 |
+
context.setSize([context.size[0], this.height + 300]);
|
280 |
+
this.updateData();
|
281 |
+
}
|
282 |
+
this.pointsStoreWidget.callback = () => {
|
283 |
+
this.points = JSON.parse(pointsStoreWidget.value).positive;
|
284 |
+
this.neg_points = JSON.parse(pointsStoreWidget.value).negative;
|
285 |
+
this.updateData();
|
286 |
+
}
|
287 |
+
this.bboxStoreWidget.callback = () => {
|
288 |
+
this.bbox = JSON.parse(bboxStoreWidget.value)
|
289 |
+
this.updateData();
|
290 |
+
}
|
291 |
+
|
292 |
+
this.width = this.widthWidget.value;
|
293 |
+
this.height = this.heightWidget.value;
|
294 |
+
var i = 3;
|
295 |
+
this.points = [];
|
296 |
+
this.neg_points = [];
|
297 |
+
this.bbox = [{}];
|
298 |
+
var drawing = false;
|
299 |
+
|
300 |
+
// Initialize or reset points array
|
301 |
+
if (!reset && this.pointsStoreWidget.value != "") {
|
302 |
+
this.points = JSON.parse(this.pointsStoreWidget.value).positive;
|
303 |
+
this.neg_points = JSON.parse(this.pointsStoreWidget.value).negative;
|
304 |
+
this.bbox = JSON.parse(this.bboxStoreWidget.value);
|
305 |
+
console.log(this.bbox)
|
306 |
+
} else {
|
307 |
+
this.points = [
|
308 |
+
{
|
309 |
+
x: this.width / 2, // Middle point horizontally centered
|
310 |
+
y: this.height / 2 // Middle point vertically centered
|
311 |
+
}
|
312 |
+
];
|
313 |
+
this.neg_points = [
|
314 |
+
{
|
315 |
+
x: 0, // Middle point horizontally centered
|
316 |
+
y: 0 // Middle point vertically centered
|
317 |
+
}
|
318 |
+
];
|
319 |
+
const combinedPoints = {
|
320 |
+
positive: this.points,
|
321 |
+
negative: this.neg_points,
|
322 |
+
};
|
323 |
+
this.pointsStoreWidget.value = JSON.stringify(combinedPoints);
|
324 |
+
this.bboxStoreWidget.value = JSON.stringify(this.bbox);
|
325 |
+
}
|
326 |
+
|
327 |
+
//create main canvas panel
|
328 |
+
this.vis = new pv.Panel()
|
329 |
+
.width(this.width)
|
330 |
+
.height(this.height)
|
331 |
+
.fillStyle("#222")
|
332 |
+
.strokeStyle("gray")
|
333 |
+
.lineWidth(2)
|
334 |
+
.antialias(false)
|
335 |
+
.margin(10)
|
336 |
+
.event("mousedown", function () {
|
337 |
+
if (pv.event.shiftKey && pv.event.button === 2) { // Use pv.event to access the event object
|
338 |
+
let scaledMouse = {
|
339 |
+
x: this.mouse().x / app.canvas.ds.scale,
|
340 |
+
y: this.mouse().y / app.canvas.ds.scale
|
341 |
+
};
|
342 |
+
i = self.neg_points.push(scaledMouse) - 1;
|
343 |
+
self.updateData();
|
344 |
+
return this;
|
345 |
+
}
|
346 |
+
else if (pv.event.shiftKey) {
|
347 |
+
let scaledMouse = {
|
348 |
+
x: this.mouse().x / app.canvas.ds.scale,
|
349 |
+
y: this.mouse().y / app.canvas.ds.scale
|
350 |
+
};
|
351 |
+
i = self.points.push(scaledMouse) - 1;
|
352 |
+
self.updateData();
|
353 |
+
return this;
|
354 |
+
}
|
355 |
+
else if (pv.event.ctrlKey) {
|
356 |
+
console.log("start drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale);
|
357 |
+
drawing = true;
|
358 |
+
self.bbox[0].startX = this.mouse().x / app.canvas.ds.scale;
|
359 |
+
self.bbox[0].startY = this.mouse().y / app.canvas.ds.scale;
|
360 |
+
}
|
361 |
+
else if (pv.event.button === 2) {
|
362 |
+
self.node.contextMenu.style.display = 'block';
|
363 |
+
self.node.contextMenu.style.left = `${pv.event.clientX}px`;
|
364 |
+
self.node.contextMenu.style.top = `${pv.event.clientY}px`;
|
365 |
+
}
|
366 |
+
})
|
367 |
+
.event("mousemove", function () {
|
368 |
+
if (drawing) {
|
369 |
+
self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale;
|
370 |
+
self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale;
|
371 |
+
self.vis.render();
|
372 |
+
}
|
373 |
+
})
|
374 |
+
.event("mouseup", function () {
|
375 |
+
console.log("end drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale);
|
376 |
+
drawing = false;
|
377 |
+
self.updateData();
|
378 |
+
});
|
379 |
+
|
380 |
+
this.backgroundImage = this.vis.add(pv.Image).visible(false)
|
381 |
+
|
382 |
+
//create bounding box
|
383 |
+
this.bounding_box = this.vis.add(pv.Area)
|
384 |
+
.data(function () {
|
385 |
+
if (drawing || (self.bbox && self.bbox[0] && Object.keys(self.bbox[0]).length > 0)) {
|
386 |
+
return [self.bbox[0].startX, self.bbox[0].endX];
|
387 |
+
} else {
|
388 |
+
return [];
|
389 |
+
}
|
390 |
+
})
|
391 |
+
.bottom(function () {return self.height - Math.max(self.bbox[0].startY, self.bbox[0].endY); })
|
392 |
+
.left(function (d) {return d; })
|
393 |
+
.height(function () {return Math.abs(self.bbox[0].startY - self.bbox[0].endY);})
|
394 |
+
.fillStyle("rgba(70, 130, 180, 0.5)")
|
395 |
+
.strokeStyle("steelblue")
|
396 |
+
.visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; })
|
397 |
+
.add(pv.Dot)
|
398 |
+
.visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; })
|
399 |
+
.data(() => {
|
400 |
+
if (self.bbox && Object.keys(self.bbox[0]).length > 0) {
|
401 |
+
return [{
|
402 |
+
x: self.bbox[0].endX,
|
403 |
+
y: self.bbox[0].endY
|
404 |
+
}];
|
405 |
+
} else {
|
406 |
+
return [];
|
407 |
+
}
|
408 |
+
})
|
409 |
+
.left(d => d.x)
|
410 |
+
.top(d => d.y)
|
411 |
+
.radius(Math.log(Math.min(self.width, self.height)) * 1)
|
412 |
+
.shape("square")
|
413 |
+
.cursor("move")
|
414 |
+
.strokeStyle("steelblue")
|
415 |
+
.lineWidth(2)
|
416 |
+
.fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
|
417 |
+
.event("mousedown", pv.Behavior.drag())
|
418 |
+
.event("drag", function () {
|
419 |
+
let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new position by the inverse of the scale factor
|
420 |
+
let adjustedY = this.mouse().y / app.canvas.ds.scale;
|
421 |
+
|
422 |
+
// Adjust the new position if it would place the dot outside the bounds of the vis.Panel
|
423 |
+
adjustedX = Math.max(0, Math.min(self.vis.width(), adjustedX));
|
424 |
+
adjustedY = Math.max(0, Math.min(self.vis.height(), adjustedY));
|
425 |
+
self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale;
|
426 |
+
self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale;
|
427 |
+
self.vis.render();
|
428 |
+
})
|
429 |
+
.event("dragend", function () {
|
430 |
+
self.updateData();
|
431 |
+
});
|
432 |
+
|
433 |
+
//create positive points
|
434 |
+
this.vis.add(pv.Dot)
|
435 |
+
.data(() => this.points)
|
436 |
+
.left(d => d.x)
|
437 |
+
.top(d => d.y)
|
438 |
+
.radius(Math.log(Math.min(self.width, self.height)) * 4)
|
439 |
+
.shape("circle")
|
440 |
+
.cursor("move")
|
441 |
+
.strokeStyle(function () { return i == this.index ? "#07f907" : "#139613"; })
|
442 |
+
.lineWidth(4)
|
443 |
+
.fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
|
444 |
+
.event("mousedown", pv.Behavior.drag())
|
445 |
+
.event("dragstart", function () {
|
446 |
+
i = this.index;
|
447 |
+
})
|
448 |
+
.event("dragend", function () {
|
449 |
+
if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) {
|
450 |
+
this.index = i;
|
451 |
+
self.points.splice(i--, 1);
|
452 |
+
}
|
453 |
+
self.updateData();
|
454 |
+
|
455 |
+
})
|
456 |
+
.event("drag", function () {
|
457 |
+
let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
|
458 |
+
let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
|
459 |
+
// Determine the bounds of the vis.Panel
|
460 |
+
const panelWidth = self.vis.width();
|
461 |
+
const panelHeight = self.vis.height();
|
462 |
+
|
463 |
+
// Adjust the new position if it would place the dot outside the bounds of the vis.Panel
|
464 |
+
adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
|
465 |
+
adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
|
466 |
+
self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
|
467 |
+
self.vis.render(); // Re-render the visualization to reflect the new position
|
468 |
+
})
|
469 |
+
|
470 |
+
.anchor("center")
|
471 |
+
.add(pv.Label)
|
472 |
+
.left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left
|
473 |
+
.top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up
|
474 |
+
.font(25 + "px sans-serif")
|
475 |
+
.text(d => {return this.points.indexOf(d); })
|
476 |
+
.textStyle("#139613")
|
477 |
+
.textShadow("2px 2px 2px black")
|
478 |
+
.add(pv.Dot) // Add smaller point in the center
|
479 |
+
.data(() => this.points)
|
480 |
+
.left(d => d.x)
|
481 |
+
.top(d => d.y)
|
482 |
+
.radius(2) // Smaller radius for the center point
|
483 |
+
.shape("circle")
|
484 |
+
.fillStyle("red") // Color for the center point
|
485 |
+
.lineWidth(1); // Stroke thickness for the center point
|
486 |
+
|
487 |
+
//create negative points
|
488 |
+
this.vis.add(pv.Dot)
|
489 |
+
.data(() => this.neg_points)
|
490 |
+
.left(d => d.x)
|
491 |
+
.top(d => d.y)
|
492 |
+
.radius(Math.log(Math.min(self.width, self.height)) * 4)
|
493 |
+
.shape("circle")
|
494 |
+
.cursor("move")
|
495 |
+
.strokeStyle(function () { return i == this.index ? "#f91111" : "#891616"; })
|
496 |
+
.lineWidth(4)
|
497 |
+
.fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
|
498 |
+
.event("mousedown", pv.Behavior.drag())
|
499 |
+
.event("dragstart", function () {
|
500 |
+
i = this.index;
|
501 |
+
})
|
502 |
+
.event("dragend", function () {
|
503 |
+
if (pv.event.button === 2 && i !== 0 && i !== self.neg_points.length - 1) {
|
504 |
+
this.index = i;
|
505 |
+
self.neg_points.splice(i--, 1);
|
506 |
+
}
|
507 |
+
self.updateData();
|
508 |
+
|
509 |
+
})
|
510 |
+
.event("drag", function () {
|
511 |
+
let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
|
512 |
+
let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
|
513 |
+
// Determine the bounds of the vis.Panel
|
514 |
+
const panelWidth = self.vis.width();
|
515 |
+
const panelHeight = self.vis.height();
|
516 |
+
|
517 |
+
// Adjust the new position if it would place the dot outside the bounds of the vis.Panel
|
518 |
+
adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
|
519 |
+
adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
|
520 |
+
self.neg_points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
|
521 |
+
self.vis.render(); // Re-render the visualization to reflect the new position
|
522 |
+
})
|
523 |
+
.anchor("center")
|
524 |
+
.add(pv.Label)
|
525 |
+
.left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left
|
526 |
+
.top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up
|
527 |
+
.font(25 + "px sans-serif")
|
528 |
+
.text(d => {return this.neg_points.indexOf(d); })
|
529 |
+
.textStyle("red")
|
530 |
+
.textShadow("2px 2px 2px black")
|
531 |
+
.add(pv.Dot) // Add smaller point in the center
|
532 |
+
.data(() => this.neg_points)
|
533 |
+
.left(d => d.x)
|
534 |
+
.top(d => d.y)
|
535 |
+
.radius(2) // Smaller radius for the center point
|
536 |
+
.shape("circle")
|
537 |
+
.fillStyle("red") // Color for the center point
|
538 |
+
.lineWidth(1); // Stroke thickness for the center point
|
539 |
+
|
540 |
+
if (this.points.length != 0) {
|
541 |
+
this.vis.render();
|
542 |
+
}
|
543 |
+
|
544 |
+
var svgElement = this.vis.canvas();
|
545 |
+
svgElement.style['zIndex'] = "2"
|
546 |
+
svgElement.style['position'] = "relative"
|
547 |
+
this.node.pointsEditor.element.appendChild(svgElement);
|
548 |
+
|
549 |
+
if (this.width > 256) {
|
550 |
+
this.node.setSize([this.width + 45, this.node.size[1]]);
|
551 |
+
}
|
552 |
+
this.node.setSize([this.node.size[0], this.height + 300]);
|
553 |
+
this.updateData();
|
554 |
+
this.refreshBackgroundImage();
|
555 |
+
|
556 |
+
}//end constructor
|
557 |
+
|
558 |
+
updateData = () => {
|
559 |
+
if (!this.points || this.points.length === 0) {
|
560 |
+
console.log("no points");
|
561 |
+
return;
|
562 |
+
}
|
563 |
+
const combinedPoints = {
|
564 |
+
positive: this.points,
|
565 |
+
negative: this.neg_points,
|
566 |
+
};
|
567 |
+
this.pointsStoreWidget.value = JSON.stringify(combinedPoints);
|
568 |
+
this.pos_coordWidget.value = JSON.stringify(this.points);
|
569 |
+
this.neg_coordWidget.value = JSON.stringify(this.neg_points);
|
570 |
+
|
571 |
+
if (this.bbox.length != 0) {
|
572 |
+
let bboxString = JSON.stringify(this.bbox);
|
573 |
+
this.bboxStoreWidget.value = bboxString;
|
574 |
+
this.bboxWidget.value = bboxString;
|
575 |
+
}
|
576 |
+
|
577 |
+
this.vis.render();
|
578 |
+
};
|
579 |
+
|
580 |
+
handleImageLoad = (img, file, base64String) => {
|
581 |
+
console.log(img.width, img.height); // Access width and height here
|
582 |
+
this.widthWidget.value = img.width;
|
583 |
+
this.heightWidget.value = img.height;
|
584 |
+
|
585 |
+
if (img.width != this.vis.width() || img.height != this.vis.height()) {
|
586 |
+
if (img.width > 256) {
|
587 |
+
this.node.setSize([img.width + 45, this.node.size[1]]);
|
588 |
+
}
|
589 |
+
this.node.setSize([this.node.size[0], img.height + 300]);
|
590 |
+
this.vis.width(img.width);
|
591 |
+
this.vis.height(img.height);
|
592 |
+
this.height = img.height;
|
593 |
+
this.width = img.width;
|
594 |
+
this.updateData();
|
595 |
+
}
|
596 |
+
this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render();
|
597 |
+
};
|
598 |
+
|
599 |
+
processImage = (img, file) => {
|
600 |
+
const canvas = document.createElement('canvas');
|
601 |
+
const ctx = canvas.getContext('2d');
|
602 |
+
|
603 |
+
const maxWidth = 800; // maximum width
|
604 |
+
const maxHeight = 600; // maximum height
|
605 |
+
let width = img.width;
|
606 |
+
let height = img.height;
|
607 |
+
|
608 |
+
// Calculate the new dimensions while preserving the aspect ratio
|
609 |
+
if (width > height) {
|
610 |
+
if (width > maxWidth) {
|
611 |
+
height *= maxWidth / width;
|
612 |
+
width = maxWidth;
|
613 |
+
}
|
614 |
+
} else {
|
615 |
+
if (height > maxHeight) {
|
616 |
+
width *= maxHeight / height;
|
617 |
+
height = maxHeight;
|
618 |
+
}
|
619 |
+
}
|
620 |
+
|
621 |
+
canvas.width = width;
|
622 |
+
canvas.height = height;
|
623 |
+
ctx.drawImage(img, 0, 0, width, height);
|
624 |
+
|
625 |
+
// Get the compressed image data as a Base64 string
|
626 |
+
const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1
|
627 |
+
|
628 |
+
this.node.properties.imgData = {
|
629 |
+
name: file.name,
|
630 |
+
lastModified: file.lastModified,
|
631 |
+
size: file.size,
|
632 |
+
type: file.type,
|
633 |
+
base64: base64String
|
634 |
+
};
|
635 |
+
handleImageLoad(img, file, base64String);
|
636 |
+
};
|
637 |
+
|
638 |
+
handleImageFile = (file) => {
|
639 |
+
const reader = new FileReader();
|
640 |
+
reader.onloadend = () => {
|
641 |
+
const img = new Image();
|
642 |
+
img.src = reader.result;
|
643 |
+
img.onload = () => processImage(img, file);
|
644 |
+
};
|
645 |
+
reader.readAsDataURL(file);
|
646 |
+
|
647 |
+
const imageUrl = URL.createObjectURL(file);
|
648 |
+
const img = new Image();
|
649 |
+
img.src = imageUrl;
|
650 |
+
img.onload = () => this.handleImageLoad(img, file, null);
|
651 |
+
};
|
652 |
+
|
653 |
+
refreshBackgroundImage = () => {
|
654 |
+
if (this.node.properties.imgData && this.node.properties.imgData.base64) {
|
655 |
+
const base64String = this.node.properties.imgData.base64;
|
656 |
+
const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`;
|
657 |
+
const img = new Image();
|
658 |
+
img.src = imageUrl;
|
659 |
+
img.onload = () => this.handleImageLoad(img, null, base64String);
|
660 |
+
}
|
661 |
+
};
|
662 |
+
|
663 |
+
createContextMenu = () => {
|
664 |
+
self = this;
|
665 |
+
document.addEventListener('contextmenu', function (e) {
|
666 |
+
e.preventDefault();
|
667 |
+
});
|
668 |
+
|
669 |
+
document.addEventListener('click', function (e) {
|
670 |
+
if (!self.node.contextMenu.contains(e.target)) {
|
671 |
+
self.node.contextMenu.style.display = 'none';
|
672 |
+
}
|
673 |
+
});
|
674 |
+
|
675 |
+
this.node.menuItems.forEach((menuItem, index) => {
|
676 |
+
self = this;
|
677 |
+
menuItem.addEventListener('click', function (e) {
|
678 |
+
e.preventDefault();
|
679 |
+
switch (index) {
|
680 |
+
case 0:
|
681 |
+
// Create file input element
|
682 |
+
const fileInput = document.createElement('input');
|
683 |
+
fileInput.type = 'file';
|
684 |
+
fileInput.accept = 'image/*'; // Accept only image files
|
685 |
+
|
686 |
+
// Listen for file selection
|
687 |
+
fileInput.addEventListener('change', function (event) {
|
688 |
+
const file = event.target.files[0]; // Get the selected file
|
689 |
+
|
690 |
+
if (file) {
|
691 |
+
const imageUrl = URL.createObjectURL(file);
|
692 |
+
let img = new Image();
|
693 |
+
img.src = imageUrl;
|
694 |
+
img.onload = () => self.handleImageLoad(img, file, null);
|
695 |
+
}
|
696 |
+
});
|
697 |
+
|
698 |
+
fileInput.click();
|
699 |
+
|
700 |
+
self.node.contextMenu.style.display = 'none';
|
701 |
+
break;
|
702 |
+
case 1:
|
703 |
+
self.backgroundImage.visible(false).root.render();
|
704 |
+
self.node.properties.imgData = null;
|
705 |
+
self.node.contextMenu.style.display = 'none';
|
706 |
+
break;
|
707 |
+
}
|
708 |
+
});
|
709 |
+
});
|
710 |
+
}//end createContextMenu
|
711 |
+
}//end class
|
712 |
+
|
713 |
+
|
714 |
+
//from melmass
|
715 |
+
export function hideWidgetForGood(node, widget, suffix = '') {
|
716 |
+
widget.origType = widget.type
|
717 |
+
widget.origComputeSize = widget.computeSize
|
718 |
+
widget.origSerializeValue = widget.serializeValue
|
719 |
+
widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
|
720 |
+
widget.type = "converted-widget" + suffix
|
721 |
+
// widget.serializeValue = () => {
|
722 |
+
// // Prevent serializing the widget if we have no input linked
|
723 |
+
// const w = node.inputs?.find((i) => i.widget?.name === widget.name);
|
724 |
+
// if (w?.link == null) {
|
725 |
+
// return undefined;
|
726 |
+
// }
|
727 |
+
// return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
|
728 |
+
// };
|
729 |
+
|
730 |
+
// Hide any linked widgets, e.g. seed+seedControl
|
731 |
+
if (widget.linkedWidgets) {
|
732 |
+
for (const w of widget.linkedWidgets) {
|
733 |
+
hideWidgetForGood(node, w, ':' + widget.name)
|
734 |
+
}
|
735 |
+
}
|
736 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/web/js/setgetnodes.js
ADDED
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
|
3 |
+
//based on diffus3's SetGet: https://github.com/diffus3/ComfyUI-extensions
|
4 |
+
|
5 |
+
// Nodes that allow you to tunnel connections for cleaner graphs
|
6 |
+
function setColorAndBgColor(type) {
|
7 |
+
const colorMap = {
|
8 |
+
"MODEL": LGraphCanvas.node_colors.blue,
|
9 |
+
"LATENT": LGraphCanvas.node_colors.purple,
|
10 |
+
"VAE": LGraphCanvas.node_colors.red,
|
11 |
+
"CONDITIONING": LGraphCanvas.node_colors.brown,
|
12 |
+
"IMAGE": LGraphCanvas.node_colors.pale_blue,
|
13 |
+
"CLIP": LGraphCanvas.node_colors.yellow,
|
14 |
+
"FLOAT": LGraphCanvas.node_colors.green,
|
15 |
+
"MASK": { color: "#1c5715", bgcolor: "#1f401b"},
|
16 |
+
"INT": { color: "#1b4669", bgcolor: "#29699c"},
|
17 |
+
"CONTROL_NET": { color: "#156653", bgcolor: "#1c453b"},
|
18 |
+
"NOISE": { color: "#2e2e2e", bgcolor: "#242121"},
|
19 |
+
"GUIDER": { color: "#3c7878", bgcolor: "#1c453b"},
|
20 |
+
"SAMPLER": { color: "#614a4a", bgcolor: "#3b2c2c"},
|
21 |
+
"SIGMAS": { color: "#485248", bgcolor: "#272e27"},
|
22 |
+
|
23 |
+
};
|
24 |
+
|
25 |
+
const colors = colorMap[type];
|
26 |
+
if (colors) {
|
27 |
+
this.color = colors.color;
|
28 |
+
this.bgcolor = colors.bgcolor;
|
29 |
+
}
|
30 |
+
}
|
31 |
+
let disablePrefix = app.ui.settings.getSettingValue("KJNodes.disablePrefix")
|
32 |
+
const LGraphNode = LiteGraph.LGraphNode
|
33 |
+
|
34 |
+
function showAlert(message) {
|
35 |
+
app.extensionManager.toast.add({
|
36 |
+
severity: 'warn',
|
37 |
+
summary: "KJ Get/Set",
|
38 |
+
detail: `${message}. Most likely you're missing custom nodes`,
|
39 |
+
life: 5000,
|
40 |
+
})
|
41 |
+
}
|
42 |
+
app.registerExtension({
|
43 |
+
name: "SetNode",
|
44 |
+
registerCustomNodes() {
|
45 |
+
class SetNode extends LGraphNode {
|
46 |
+
defaultVisibility = true;
|
47 |
+
serialize_widgets = true;
|
48 |
+
drawConnection = false;
|
49 |
+
currentGetters = null;
|
50 |
+
slotColor = "#FFF";
|
51 |
+
canvas = app.canvas;
|
52 |
+
menuEntry = "Show connections";
|
53 |
+
|
54 |
+
constructor(title) {
|
55 |
+
super(title)
|
56 |
+
if (!this.properties) {
|
57 |
+
this.properties = {
|
58 |
+
"previousName": ""
|
59 |
+
};
|
60 |
+
}
|
61 |
+
this.properties.showOutputText = SetNode.defaultVisibility;
|
62 |
+
|
63 |
+
const node = this;
|
64 |
+
|
65 |
+
this.addWidget(
|
66 |
+
"text",
|
67 |
+
"Constant",
|
68 |
+
'',
|
69 |
+
(s, t, u, v, x) => {
|
70 |
+
node.validateName(node.graph);
|
71 |
+
if(this.widgets[0].value !== ''){
|
72 |
+
this.title = (!disablePrefix ? "Set_" : "") + this.widgets[0].value;
|
73 |
+
}
|
74 |
+
this.update();
|
75 |
+
this.properties.previousName = this.widgets[0].value;
|
76 |
+
},
|
77 |
+
{}
|
78 |
+
)
|
79 |
+
|
80 |
+
this.addInput("*", "*");
|
81 |
+
this.addOutput("*", '*');
|
82 |
+
|
83 |
+
this.onConnectionsChange = function(
|
84 |
+
slotType, //1 = input, 2 = output
|
85 |
+
slot,
|
86 |
+
isChangeConnect,
|
87 |
+
link_info,
|
88 |
+
output
|
89 |
+
) {
|
90 |
+
//On Disconnect
|
91 |
+
if (slotType == 1 && !isChangeConnect) {
|
92 |
+
if(this.inputs[slot].name === ''){
|
93 |
+
this.inputs[slot].type = '*';
|
94 |
+
this.inputs[slot].name = '*';
|
95 |
+
this.title = "Set"
|
96 |
+
}
|
97 |
+
}
|
98 |
+
if (slotType == 2 && !isChangeConnect) {
|
99 |
+
this.outputs[slot].type = '*';
|
100 |
+
this.outputs[slot].name = '*';
|
101 |
+
|
102 |
+
}
|
103 |
+
//On Connect
|
104 |
+
if (link_info && node.graph && slotType == 1 && isChangeConnect) {
|
105 |
+
const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
|
106 |
+
|
107 |
+
if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) {
|
108 |
+
const type = fromNode.outputs[link_info.origin_slot].type;
|
109 |
+
|
110 |
+
if (this.title === "Set"){
|
111 |
+
this.title = (!disablePrefix ? "Set_" : "") + type;
|
112 |
+
}
|
113 |
+
if (this.widgets[0].value === '*'){
|
114 |
+
this.widgets[0].value = type
|
115 |
+
}
|
116 |
+
|
117 |
+
this.validateName(node.graph);
|
118 |
+
this.inputs[0].type = type;
|
119 |
+
this.inputs[0].name = type;
|
120 |
+
|
121 |
+
if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){
|
122 |
+
setColorAndBgColor.call(this, type);
|
123 |
+
}
|
124 |
+
} else {
|
125 |
+
showAlert("node input undefined.")
|
126 |
+
}
|
127 |
+
}
|
128 |
+
if (link_info && node.graph && slotType == 2 && isChangeConnect) {
|
129 |
+
const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
|
130 |
+
|
131 |
+
if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) {
|
132 |
+
const type = fromNode.inputs[link_info.origin_slot].type;
|
133 |
+
|
134 |
+
this.outputs[0].type = type;
|
135 |
+
this.outputs[0].name = type;
|
136 |
+
} else {
|
137 |
+
showAlert('node output undefined');
|
138 |
+
}
|
139 |
+
}
|
140 |
+
|
141 |
+
|
142 |
+
//Update either way
|
143 |
+
this.update();
|
144 |
+
}
|
145 |
+
|
146 |
+
this.validateName = function(graph) {
|
147 |
+
let widgetValue = node.widgets[0].value;
|
148 |
+
|
149 |
+
if (widgetValue !== '') {
|
150 |
+
let tries = 0;
|
151 |
+
const existingValues = new Set();
|
152 |
+
|
153 |
+
graph._nodes.forEach(otherNode => {
|
154 |
+
if (otherNode !== this && otherNode.type === 'SetNode') {
|
155 |
+
existingValues.add(otherNode.widgets[0].value);
|
156 |
+
}
|
157 |
+
});
|
158 |
+
|
159 |
+
while (existingValues.has(widgetValue)) {
|
160 |
+
widgetValue = node.widgets[0].value + "_" + tries;
|
161 |
+
tries++;
|
162 |
+
}
|
163 |
+
|
164 |
+
node.widgets[0].value = widgetValue;
|
165 |
+
this.update();
|
166 |
+
}
|
167 |
+
}
|
168 |
+
|
169 |
+
this.clone = function () {
|
170 |
+
const cloned = SetNode.prototype.clone.apply(this);
|
171 |
+
cloned.inputs[0].name = '*';
|
172 |
+
cloned.inputs[0].type = '*';
|
173 |
+
cloned.value = '';
|
174 |
+
cloned.properties.previousName = '';
|
175 |
+
cloned.size = cloned.computeSize();
|
176 |
+
return cloned;
|
177 |
+
};
|
178 |
+
|
179 |
+
this.onAdded = function(graph) {
|
180 |
+
this.validateName(graph);
|
181 |
+
}
|
182 |
+
|
183 |
+
|
184 |
+
this.update = function() {
|
185 |
+
if (!node.graph) {
|
186 |
+
return;
|
187 |
+
}
|
188 |
+
|
189 |
+
const getters = this.findGetters(node.graph);
|
190 |
+
getters.forEach(getter => {
|
191 |
+
getter.setType(this.inputs[0].type);
|
192 |
+
});
|
193 |
+
|
194 |
+
if (this.widgets[0].value) {
|
195 |
+
const gettersWithPreviousName = this.findGetters(node.graph, true);
|
196 |
+
gettersWithPreviousName.forEach(getter => {
|
197 |
+
getter.setName(this.widgets[0].value);
|
198 |
+
});
|
199 |
+
}
|
200 |
+
|
201 |
+
const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode");
|
202 |
+
allGetters.forEach(otherNode => {
|
203 |
+
if (otherNode.setComboValues) {
|
204 |
+
otherNode.setComboValues();
|
205 |
+
}
|
206 |
+
});
|
207 |
+
}
|
208 |
+
|
209 |
+
|
210 |
+
this.findGetters = function(graph, checkForPreviousName) {
|
211 |
+
const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value;
|
212 |
+
return graph._nodes.filter(otherNode => otherNode.type === 'GetNode' && otherNode.widgets[0].value === name && name !== '');
|
213 |
+
}
|
214 |
+
|
215 |
+
|
216 |
+
// This node is purely frontend and does not impact the resulting prompt so should not be serialized
|
217 |
+
this.isVirtualNode = true;
|
218 |
+
}
|
219 |
+
|
220 |
+
|
221 |
+
onRemoved() {
|
222 |
+
const allGetters = this.graph._nodes.filter((otherNode) => otherNode.type == "GetNode");
|
223 |
+
allGetters.forEach((otherNode) => {
|
224 |
+
if (otherNode.setComboValues) {
|
225 |
+
otherNode.setComboValues([this]);
|
226 |
+
}
|
227 |
+
})
|
228 |
+
}
|
229 |
+
getExtraMenuOptions(_, options) {
|
230 |
+
this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
|
231 |
+
options.unshift(
|
232 |
+
{
|
233 |
+
content: this.menuEntry,
|
234 |
+
callback: () => {
|
235 |
+
this.currentGetters = this.findGetters(this.graph);
|
236 |
+
if (this.currentGetters.length == 0) return;
|
237 |
+
let linkType = (this.currentGetters[0].outputs[0].type);
|
238 |
+
this.slotColor = this.canvas.default_connection_color_byType[linkType]
|
239 |
+
this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
|
240 |
+
this.drawConnection = !this.drawConnection;
|
241 |
+
this.canvas.setDirty(true, true);
|
242 |
+
|
243 |
+
},
|
244 |
+
has_submenu: true,
|
245 |
+
submenu: {
|
246 |
+
title: "Color",
|
247 |
+
options: [
|
248 |
+
{
|
249 |
+
content: "Highlight",
|
250 |
+
callback: () => {
|
251 |
+
this.slotColor = "orange"
|
252 |
+
this.canvas.setDirty(true, true);
|
253 |
+
}
|
254 |
+
}
|
255 |
+
],
|
256 |
+
},
|
257 |
+
},
|
258 |
+
{
|
259 |
+
content: "Hide all connections",
|
260 |
+
callback: () => {
|
261 |
+
const allGetters = this.graph._nodes.filter(otherNode => otherNode.type === "GetNode" || otherNode.type === "SetNode");
|
262 |
+
allGetters.forEach(otherNode => {
|
263 |
+
otherNode.drawConnection = false;
|
264 |
+
console.log(otherNode);
|
265 |
+
});
|
266 |
+
|
267 |
+
this.menuEntry = "Show connections";
|
268 |
+
this.drawConnection = false
|
269 |
+
this.canvas.setDirty(true, true);
|
270 |
+
|
271 |
+
},
|
272 |
+
|
273 |
+
},
|
274 |
+
);
|
275 |
+
// Dynamically add a submenu for all getters
|
276 |
+
this.currentGetters = this.findGetters(this.graph);
|
277 |
+
if (this.currentGetters) {
|
278 |
+
|
279 |
+
let gettersSubmenu = this.currentGetters.map(getter => ({
|
280 |
+
|
281 |
+
content: `${getter.title} id: ${getter.id}`,
|
282 |
+
callback: () => {
|
283 |
+
this.canvas.centerOnNode(getter);
|
284 |
+
this.canvas.selectNode(getter, false);
|
285 |
+
this.canvas.setDirty(true, true);
|
286 |
+
|
287 |
+
},
|
288 |
+
}));
|
289 |
+
|
290 |
+
options.unshift({
|
291 |
+
content: "Getters",
|
292 |
+
has_submenu: true,
|
293 |
+
submenu: {
|
294 |
+
title: "GetNodes",
|
295 |
+
options: gettersSubmenu,
|
296 |
+
}
|
297 |
+
});
|
298 |
+
}
|
299 |
+
}
|
300 |
+
|
301 |
+
|
302 |
+
onDrawForeground(ctx, lGraphCanvas) {
|
303 |
+
if (this.drawConnection) {
|
304 |
+
this._drawVirtualLinks(lGraphCanvas, ctx);
|
305 |
+
}
|
306 |
+
}
|
307 |
+
// onDrawCollapsed(ctx, lGraphCanvas) {
|
308 |
+
// if (this.drawConnection) {
|
309 |
+
// this._drawVirtualLinks(lGraphCanvas, ctx);
|
310 |
+
// }
|
311 |
+
// }
|
312 |
+
_drawVirtualLinks(lGraphCanvas, ctx) {
|
313 |
+
if (!this.currentGetters?.length) return;
|
314 |
+
var title = this.getTitle ? this.getTitle() : this.title;
|
315 |
+
var title_width = ctx.measureText(title).width;
|
316 |
+
if (!this.flags.collapsed) {
|
317 |
+
var start_node_slotpos = [
|
318 |
+
this.size[0],
|
319 |
+
LiteGraph.NODE_TITLE_HEIGHT * 0.5,
|
320 |
+
];
|
321 |
+
}
|
322 |
+
else {
|
323 |
+
|
324 |
+
var start_node_slotpos = [
|
325 |
+
title_width + 55,
|
326 |
+
-15,
|
327 |
+
|
328 |
+
];
|
329 |
+
}
|
330 |
+
// Provide a default link object with necessary properties, to avoid errors as link can't be null anymore
|
331 |
+
const defaultLink = { type: 'default', color: this.slotColor };
|
332 |
+
|
333 |
+
for (const getter of this.currentGetters) {
|
334 |
+
if (!this.flags.collapsed) {
|
335 |
+
var end_node_slotpos = this.getConnectionPos(false, 0);
|
336 |
+
end_node_slotpos = [
|
337 |
+
getter.pos[0] - end_node_slotpos[0] + this.size[0],
|
338 |
+
getter.pos[1] - end_node_slotpos[1]
|
339 |
+
];
|
340 |
+
}
|
341 |
+
else {
|
342 |
+
var end_node_slotpos = this.getConnectionPos(false, 0);
|
343 |
+
end_node_slotpos = [
|
344 |
+
getter.pos[0] - end_node_slotpos[0] + title_width + 50,
|
345 |
+
getter.pos[1] - end_node_slotpos[1] - 30
|
346 |
+
];
|
347 |
+
}
|
348 |
+
lGraphCanvas.renderLink(
|
349 |
+
ctx,
|
350 |
+
start_node_slotpos,
|
351 |
+
end_node_slotpos,
|
352 |
+
defaultLink,
|
353 |
+
false,
|
354 |
+
null,
|
355 |
+
this.slotColor,
|
356 |
+
LiteGraph.RIGHT,
|
357 |
+
LiteGraph.LEFT
|
358 |
+
);
|
359 |
+
}
|
360 |
+
}
|
361 |
+
}
|
362 |
+
|
363 |
+
LiteGraph.registerNodeType(
|
364 |
+
"SetNode",
|
365 |
+
Object.assign(SetNode, {
|
366 |
+
title: "Set",
|
367 |
+
})
|
368 |
+
);
|
369 |
+
|
370 |
+
SetNode.category = "KJNodes";
|
371 |
+
},
|
372 |
+
});
|
373 |
+
|
374 |
+
app.registerExtension({
|
375 |
+
name: "GetNode",
|
376 |
+
registerCustomNodes() {
|
377 |
+
class GetNode extends LGraphNode {
|
378 |
+
|
379 |
+
defaultVisibility = true;
|
380 |
+
serialize_widgets = true;
|
381 |
+
drawConnection = false;
|
382 |
+
slotColor = "#FFF";
|
383 |
+
currentSetter = null;
|
384 |
+
canvas = app.canvas;
|
385 |
+
|
386 |
+
constructor(title) {
|
387 |
+
super(title)
|
388 |
+
if (!this.properties) {
|
389 |
+
this.properties = {};
|
390 |
+
}
|
391 |
+
this.properties.showOutputText = GetNode.defaultVisibility;
|
392 |
+
const node = this;
|
393 |
+
this.addWidget(
|
394 |
+
"combo",
|
395 |
+
"Constant",
|
396 |
+
"",
|
397 |
+
(e) => {
|
398 |
+
this.onRename();
|
399 |
+
},
|
400 |
+
{
|
401 |
+
values: () => {
|
402 |
+
const setterNodes = node.graph._nodes.filter((otherNode) => otherNode.type == 'SetNode');
|
403 |
+
return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort();
|
404 |
+
}
|
405 |
+
}
|
406 |
+
)
|
407 |
+
|
408 |
+
this.addOutput("*", '*');
|
409 |
+
this.onConnectionsChange = function(
|
410 |
+
slotType, //0 = output, 1 = input
|
411 |
+
slot, //self-explanatory
|
412 |
+
isChangeConnect,
|
413 |
+
link_info,
|
414 |
+
output
|
415 |
+
) {
|
416 |
+
this.validateLinks();
|
417 |
+
}
|
418 |
+
|
419 |
+
this.setName = function(name) {
|
420 |
+
node.widgets[0].value = name;
|
421 |
+
node.onRename();
|
422 |
+
node.serialize();
|
423 |
+
}
|
424 |
+
|
425 |
+
this.onRename = function() {
|
426 |
+
const setter = this.findSetter(node.graph);
|
427 |
+
if (setter) {
|
428 |
+
let linkType = (setter.inputs[0].type);
|
429 |
+
|
430 |
+
this.setType(linkType);
|
431 |
+
this.title = (!disablePrefix ? "Get_" : "") + setter.widgets[0].value;
|
432 |
+
|
433 |
+
if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){
|
434 |
+
setColorAndBgColor.call(this, linkType);
|
435 |
+
}
|
436 |
+
|
437 |
+
} else {
|
438 |
+
this.setType('*');
|
439 |
+
}
|
440 |
+
}
|
441 |
+
|
442 |
+
this.clone = function () {
|
443 |
+
const cloned = GetNode.prototype.clone.apply(this);
|
444 |
+
cloned.size = cloned.computeSize();
|
445 |
+
return cloned;
|
446 |
+
};
|
447 |
+
|
448 |
+
this.validateLinks = function() {
|
449 |
+
if (this.outputs[0].type !== '*' && this.outputs[0].links) {
|
450 |
+
this.outputs[0].links.filter(linkId => {
|
451 |
+
const link = node.graph.links[linkId];
|
452 |
+
return link && (!link.type.split(",").includes(this.outputs[0].type) && link.type !== '*');
|
453 |
+
}).forEach(linkId => {
|
454 |
+
node.graph.removeLink(linkId);
|
455 |
+
});
|
456 |
+
}
|
457 |
+
};
|
458 |
+
|
459 |
+
this.setType = function(type) {
|
460 |
+
this.outputs[0].name = type;
|
461 |
+
this.outputs[0].type = type;
|
462 |
+
this.validateLinks();
|
463 |
+
}
|
464 |
+
|
465 |
+
this.findSetter = function(graph) {
|
466 |
+
const name = this.widgets[0].value;
|
467 |
+
const foundNode = graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== '');
|
468 |
+
return foundNode;
|
469 |
+
};
|
470 |
+
|
471 |
+
this.goToSetter = function() {
|
472 |
+
const setter = this.findSetter(this.graph);
|
473 |
+
this.canvas.centerOnNode(setter);
|
474 |
+
this.canvas.selectNode(setter, false);
|
475 |
+
};
|
476 |
+
|
477 |
+
// This node is purely frontend and does not impact the resulting prompt so should not be serialized
|
478 |
+
this.isVirtualNode = true;
|
479 |
+
}
|
480 |
+
|
481 |
+
getInputLink(slot) {
|
482 |
+
const setter = this.findSetter(this.graph);
|
483 |
+
|
484 |
+
if (setter) {
|
485 |
+
const slotInfo = setter.inputs[slot];
|
486 |
+
const link = this.graph.links[slotInfo.link];
|
487 |
+
return link;
|
488 |
+
} else {
|
489 |
+
const errorMessage = "No SetNode found for " + this.widgets[0].value + "(" + this.type + ")";
|
490 |
+
showAlert(errorMessage);
|
491 |
+
//throw new Error(errorMessage);
|
492 |
+
}
|
493 |
+
}
|
494 |
+
onAdded(graph) {
|
495 |
+
}
|
496 |
+
getExtraMenuOptions(_, options) {
|
497 |
+
let menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
|
498 |
+
|
499 |
+
options.unshift(
|
500 |
+
{
|
501 |
+
content: "Go to setter",
|
502 |
+
callback: () => {
|
503 |
+
this.goToSetter();
|
504 |
+
},
|
505 |
+
},
|
506 |
+
{
|
507 |
+
content: menuEntry,
|
508 |
+
callback: () => {
|
509 |
+
this.currentSetter = this.findSetter(this.graph);
|
510 |
+
if (this.currentSetter.length == 0) return;
|
511 |
+
let linkType = (this.currentSetter.inputs[0].type);
|
512 |
+
this.drawConnection = !this.drawConnection;
|
513 |
+
this.slotColor = this.canvas.default_connection_color_byType[linkType]
|
514 |
+
menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
|
515 |
+
this.canvas.setDirty(true, true);
|
516 |
+
},
|
517 |
+
},
|
518 |
+
);
|
519 |
+
}
|
520 |
+
|
521 |
+
onDrawForeground(ctx, lGraphCanvas) {
|
522 |
+
if (this.drawConnection) {
|
523 |
+
this._drawVirtualLink(lGraphCanvas, ctx);
|
524 |
+
}
|
525 |
+
}
|
526 |
+
// onDrawCollapsed(ctx, lGraphCanvas) {
|
527 |
+
// if (this.drawConnection) {
|
528 |
+
// this._drawVirtualLink(lGraphCanvas, ctx);
|
529 |
+
// }
|
530 |
+
// }
|
531 |
+
_drawVirtualLink(lGraphCanvas, ctx) {
|
532 |
+
if (!this.currentSetter) return;
|
533 |
+
|
534 |
+
// Provide a default link object with necessary properties, to avoid errors as link can't be null anymore
|
535 |
+
const defaultLink = { type: 'default', color: this.slotColor };
|
536 |
+
|
537 |
+
let start_node_slotpos = this.currentSetter.getConnectionPos(false, 0);
|
538 |
+
start_node_slotpos = [
|
539 |
+
start_node_slotpos[0] - this.pos[0],
|
540 |
+
start_node_slotpos[1] - this.pos[1],
|
541 |
+
];
|
542 |
+
let end_node_slotpos = [0, -LiteGraph.NODE_TITLE_HEIGHT * 0.5];
|
543 |
+
lGraphCanvas.renderLink(
|
544 |
+
ctx,
|
545 |
+
start_node_slotpos,
|
546 |
+
end_node_slotpos,
|
547 |
+
defaultLink,
|
548 |
+
false,
|
549 |
+
null,
|
550 |
+
this.slotColor
|
551 |
+
);
|
552 |
+
}
|
553 |
+
}
|
554 |
+
|
555 |
+
LiteGraph.registerNodeType(
|
556 |
+
"GetNode",
|
557 |
+
Object.assign(GetNode, {
|
558 |
+
title: "Get",
|
559 |
+
})
|
560 |
+
);
|
561 |
+
|
562 |
+
GetNode.category = "KJNodes";
|
563 |
+
},
|
564 |
+
});
|
custom_nodes/ComfyUI-KJNodes-main/web/js/spline_editor.js
ADDED
@@ -0,0 +1,866 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from '../../../scripts/app.js'
|
2 |
+
|
3 |
+
//from melmass
|
4 |
+
export function makeUUID() {
|
5 |
+
let dt = new Date().getTime()
|
6 |
+
const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
|
7 |
+
const r = ((dt + Math.random() * 16) % 16) | 0
|
8 |
+
dt = Math.floor(dt / 16)
|
9 |
+
return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
|
10 |
+
})
|
11 |
+
return uuid
|
12 |
+
}
|
13 |
+
|
14 |
+
export const loadScript = (
|
15 |
+
FILE_URL,
|
16 |
+
async = true,
|
17 |
+
type = 'text/javascript',
|
18 |
+
) => {
|
19 |
+
return new Promise((resolve, reject) => {
|
20 |
+
try {
|
21 |
+
// Check if the script already exists
|
22 |
+
const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
|
23 |
+
if (existingScript) {
|
24 |
+
resolve({ status: true, message: 'Script already loaded' })
|
25 |
+
return
|
26 |
+
}
|
27 |
+
|
28 |
+
const scriptEle = document.createElement('script')
|
29 |
+
scriptEle.type = type
|
30 |
+
scriptEle.async = async
|
31 |
+
scriptEle.src = FILE_URL
|
32 |
+
|
33 |
+
scriptEle.addEventListener('load', (ev) => {
|
34 |
+
resolve({ status: true })
|
35 |
+
})
|
36 |
+
|
37 |
+
scriptEle.addEventListener('error', (ev) => {
|
38 |
+
reject({
|
39 |
+
status: false,
|
40 |
+
message: `Failed to load the script ${FILE_URL}`,
|
41 |
+
})
|
42 |
+
})
|
43 |
+
|
44 |
+
document.body.appendChild(scriptEle)
|
45 |
+
} catch (error) {
|
46 |
+
reject(error)
|
47 |
+
}
|
48 |
+
})
|
49 |
+
}
|
50 |
+
const create_documentation_stylesheet = () => {
|
51 |
+
const tag = 'kj-splineditor-stylesheet'
|
52 |
+
|
53 |
+
let styleTag = document.head.querySelector(tag)
|
54 |
+
|
55 |
+
if (!styleTag) {
|
56 |
+
styleTag = document.createElement('style')
|
57 |
+
styleTag.type = 'text/css'
|
58 |
+
styleTag.id = tag
|
59 |
+
styleTag.innerHTML = `
|
60 |
+
.spline-editor {
|
61 |
+
|
62 |
+
position: absolute;
|
63 |
+
|
64 |
+
font: 12px monospace;
|
65 |
+
line-height: 1.5em;
|
66 |
+
padding: 10px;
|
67 |
+
z-index: 0;
|
68 |
+
overflow: hidden;
|
69 |
+
}
|
70 |
+
`
|
71 |
+
document.head.appendChild(styleTag)
|
72 |
+
}
|
73 |
+
}
|
74 |
+
|
75 |
+
loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => {
|
76 |
+
console.log(e)
|
77 |
+
})
|
78 |
+
loadScript('/kjweb_async/protovis.min.js').catch((e) => {
|
79 |
+
console.log(e)
|
80 |
+
})
|
81 |
+
create_documentation_stylesheet()
|
82 |
+
|
83 |
+
function chainCallback(object, property, callback) {
|
84 |
+
if (object == undefined) {
|
85 |
+
//This should not happen.
|
86 |
+
console.error("Tried to add callback to non-existant object")
|
87 |
+
return;
|
88 |
+
}
|
89 |
+
if (property in object) {
|
90 |
+
const callback_orig = object[property]
|
91 |
+
object[property] = function () {
|
92 |
+
const r = callback_orig.apply(this, arguments);
|
93 |
+
callback.apply(this, arguments);
|
94 |
+
return r
|
95 |
+
};
|
96 |
+
} else {
|
97 |
+
object[property] = callback;
|
98 |
+
}
|
99 |
+
}
|
100 |
+
app.registerExtension({
|
101 |
+
name: 'KJNodes.SplineEditor',
|
102 |
+
|
103 |
+
async beforeRegisterNodeDef(nodeType, nodeData) {
|
104 |
+
if (nodeData?.name === 'SplineEditor') {
|
105 |
+
chainCallback(nodeType.prototype, "onNodeCreated", function () {
|
106 |
+
|
107 |
+
hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates"))
|
108 |
+
|
109 |
+
var element = document.createElement("div");
|
110 |
+
this.uuid = makeUUID()
|
111 |
+
element.id = `spline-editor-${this.uuid}`
|
112 |
+
|
113 |
+
// fake image widget to allow copy/paste
|
114 |
+
const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {});
|
115 |
+
hideWidgetForGood(this, fakeimagewidget)
|
116 |
+
|
117 |
+
this.splineEditor = this.addDOMWidget(nodeData.name, "SplineEditorWidget", element, {
|
118 |
+
serialize: false,
|
119 |
+
hideOnZoom: false,
|
120 |
+
});
|
121 |
+
|
122 |
+
// context menu
|
123 |
+
this.contextMenu = document.createElement("div");
|
124 |
+
this.contextMenu.className = 'spline-editor-context-menu';
|
125 |
+
this.contextMenu.id = "context-menu";
|
126 |
+
this.contextMenu.style.display = "none";
|
127 |
+
this.contextMenu.style.position = "absolute";
|
128 |
+
this.contextMenu.style.backgroundColor = "#202020";
|
129 |
+
this.contextMenu.style.minWidth = "100px";
|
130 |
+
this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)";
|
131 |
+
this.contextMenu.style.zIndex = "100";
|
132 |
+
this.contextMenu.style.padding = "5px";
|
133 |
+
|
134 |
+
function styleMenuItem(menuItem) {
|
135 |
+
menuItem.style.display = "block";
|
136 |
+
menuItem.style.padding = "5px";
|
137 |
+
menuItem.style.color = "#FFF";
|
138 |
+
menuItem.style.fontFamily = "Arial, sans-serif";
|
139 |
+
menuItem.style.fontSize = "16px";
|
140 |
+
menuItem.style.textDecoration = "none";
|
141 |
+
menuItem.style.marginBottom = "5px";
|
142 |
+
}
|
143 |
+
function createMenuItem(id, textContent) {
|
144 |
+
let menuItem = document.createElement("a");
|
145 |
+
menuItem.href = "#";
|
146 |
+
menuItem.id = `menu-item-${id}`;
|
147 |
+
menuItem.textContent = textContent;
|
148 |
+
styleMenuItem(menuItem);
|
149 |
+
return menuItem;
|
150 |
+
}
|
151 |
+
|
152 |
+
// Create an array of menu items using the createMenuItem function
|
153 |
+
this.menuItems = [
|
154 |
+
createMenuItem(0, "Toggle handles"),
|
155 |
+
createMenuItem(1, "Display sample points"),
|
156 |
+
createMenuItem(2, "Switch point shape"),
|
157 |
+
createMenuItem(3, "Background image"),
|
158 |
+
createMenuItem(4, "Invert point order"),
|
159 |
+
createMenuItem(5, "Clear Image"),
|
160 |
+
];
|
161 |
+
|
162 |
+
// Add mouseover and mouseout event listeners to each menu item for styling
|
163 |
+
this.menuItems.forEach(menuItem => {
|
164 |
+
menuItem.addEventListener('mouseover', function() {
|
165 |
+
this.style.backgroundColor = "gray";
|
166 |
+
});
|
167 |
+
|
168 |
+
menuItem.addEventListener('mouseout', function() {
|
169 |
+
this.style.backgroundColor = "#202020";
|
170 |
+
});
|
171 |
+
});
|
172 |
+
|
173 |
+
// Append each menu item to the context menu
|
174 |
+
this.menuItems.forEach(menuItem => {
|
175 |
+
this.contextMenu.appendChild(menuItem);
|
176 |
+
});
|
177 |
+
|
178 |
+
document.body.appendChild(this.contextMenu);
|
179 |
+
|
180 |
+
this.addWidget("button", "New spline", null, () => {
|
181 |
+
if (!this.properties || !("points" in this.properties)) {
|
182 |
+
this.editor = new SplineEditor(this);
|
183 |
+
this.addProperty("points", this.constructor.type, "string");
|
184 |
+
}
|
185 |
+
else {
|
186 |
+
this.editor = new SplineEditor(this, true);
|
187 |
+
}
|
188 |
+
});
|
189 |
+
|
190 |
+
this.setSize([550, 950]);
|
191 |
+
this.resizable = false;
|
192 |
+
this.splineEditor.parentEl = document.createElement("div");
|
193 |
+
this.splineEditor.parentEl.className = "spline-editor";
|
194 |
+
this.splineEditor.parentEl.id = `spline-editor-${this.uuid}`
|
195 |
+
element.appendChild(this.splineEditor.parentEl);
|
196 |
+
|
197 |
+
chainCallback(this, "onConfigure", function () {
|
198 |
+
try {
|
199 |
+
this.editor = new SplineEditor(this);
|
200 |
+
} catch (error) {
|
201 |
+
console.error("An error occurred while configuring the editor:", error);
|
202 |
+
}
|
203 |
+
});
|
204 |
+
chainCallback(this, "onExecuted", function (message) {
|
205 |
+
let bg_image = message["bg_image"];
|
206 |
+
this.properties.imgData = {
|
207 |
+
name: "bg_image",
|
208 |
+
base64: bg_image
|
209 |
+
};
|
210 |
+
this.editor.refreshBackgroundImage(this);
|
211 |
+
});
|
212 |
+
|
213 |
+
}); // onAfterGraphConfigured
|
214 |
+
}//node created
|
215 |
+
} //before register
|
216 |
+
})//register
|
217 |
+
|
218 |
+
|
219 |
+
class SplineEditor{
|
220 |
+
constructor(context, reset = false) {
|
221 |
+
this.node = context;
|
222 |
+
this.reset=reset;
|
223 |
+
const self = this;
|
224 |
+
console.log("creatingSplineEditor")
|
225 |
+
|
226 |
+
this.node.pasteFile = (file) => {
|
227 |
+
if (file.type.startsWith("image/")) {
|
228 |
+
this.handleImageFile(file);
|
229 |
+
return true;
|
230 |
+
}
|
231 |
+
return false;
|
232 |
+
};
|
233 |
+
|
234 |
+
this.node.onDragOver = function (e) {
|
235 |
+
if (e.dataTransfer && e.dataTransfer.items) {
|
236 |
+
return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/"));
|
237 |
+
}
|
238 |
+
return false;
|
239 |
+
};
|
240 |
+
|
241 |
+
// On drop upload files
|
242 |
+
this.node.onDragDrop = (e) => {
|
243 |
+
console.log("onDragDrop called");
|
244 |
+
let handled = false;
|
245 |
+
for (const file of e.dataTransfer.files) {
|
246 |
+
if (file.type.startsWith("image/")) {
|
247 |
+
this.handleImageFile(file);
|
248 |
+
handled = true;
|
249 |
+
}
|
250 |
+
}
|
251 |
+
return handled;
|
252 |
+
};
|
253 |
+
|
254 |
+
// context menu
|
255 |
+
this.createContextMenu();
|
256 |
+
|
257 |
+
|
258 |
+
this.dotShape = "circle";
|
259 |
+
this.drawSamplePoints = false;
|
260 |
+
|
261 |
+
if (reset && context.splineEditor.element) {
|
262 |
+
context.splineEditor.element.innerHTML = ''; // Clear the container
|
263 |
+
}
|
264 |
+
this.coordWidget = context.widgets.find(w => w.name === "coordinates");
|
265 |
+
this.interpolationWidget = context.widgets.find(w => w.name === "interpolation");
|
266 |
+
this.pointsWidget = context.widgets.find(w => w.name === "points_to_sample");
|
267 |
+
this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store");
|
268 |
+
this.tensionWidget = context.widgets.find(w => w.name === "tension");
|
269 |
+
this.minValueWidget = context.widgets.find(w => w.name === "min_value");
|
270 |
+
this.maxValueWidget = context.widgets.find(w => w.name === "max_value");
|
271 |
+
this.samplingMethodWidget = context.widgets.find(w => w.name === "sampling_method");
|
272 |
+
this.widthWidget = context.widgets.find(w => w.name === "mask_width");
|
273 |
+
this.heightWidget = context.widgets.find(w => w.name === "mask_height");
|
274 |
+
|
275 |
+
this.interpolation = this.interpolationWidget.value
|
276 |
+
this.tension = this.tensionWidget.value
|
277 |
+
this.points_to_sample = this.pointsWidget.value
|
278 |
+
this.rangeMin = this.minValueWidget.value
|
279 |
+
this.rangeMax = this.maxValueWidget.value
|
280 |
+
this.pointsLayer = null;
|
281 |
+
this.samplingMethod = this.samplingMethodWidget.value
|
282 |
+
|
283 |
+
if (this.samplingMethod == "path") {
|
284 |
+
this.dotShape = "triangle"
|
285 |
+
}
|
286 |
+
|
287 |
+
|
288 |
+
this.interpolationWidget.callback = () => {
|
289 |
+
this.interpolation = this.interpolationWidget.value
|
290 |
+
this.updatePath();
|
291 |
+
}
|
292 |
+
this.samplingMethodWidget.callback = () => {
|
293 |
+
this.samplingMethod = this.samplingMethodWidget.value
|
294 |
+
if (this.samplingMethod == "path") {
|
295 |
+
this.dotShape = "triangle"
|
296 |
+
}
|
297 |
+
else if (this.samplingMethod == "controlpoints") {
|
298 |
+
this.dotShape = "circle"
|
299 |
+
this.drawSamplePoints = true;
|
300 |
+
}
|
301 |
+
this.updatePath();
|
302 |
+
}
|
303 |
+
this.tensionWidget.callback = () => {
|
304 |
+
this.tension = this.tensionWidget.value
|
305 |
+
this.updatePath();
|
306 |
+
}
|
307 |
+
this.pointsWidget.callback = () => {
|
308 |
+
this.points_to_sample = this.pointsWidget.value
|
309 |
+
this.updatePath();
|
310 |
+
}
|
311 |
+
this.minValueWidget.callback = () => {
|
312 |
+
this.rangeMin = this.minValueWidget.value
|
313 |
+
this.updatePath();
|
314 |
+
}
|
315 |
+
this.maxValueWidget.callback = () => {
|
316 |
+
this.rangeMax = this.maxValueWidget.value
|
317 |
+
this.updatePath();
|
318 |
+
}
|
319 |
+
this.widthWidget.callback = () => {
|
320 |
+
this.width = this.widthWidget.value;
|
321 |
+
if (this.width > 256) {
|
322 |
+
context.setSize([this.width + 45, context.size[1]]);
|
323 |
+
}
|
324 |
+
this.vis.width(this.width);
|
325 |
+
this.updatePath();
|
326 |
+
}
|
327 |
+
this.heightWidget.callback = () => {
|
328 |
+
this.height = this.heightWidget.value
|
329 |
+
this.vis.height(this.height)
|
330 |
+
context.setSize([context.size[0], this.height + 430]);
|
331 |
+
this.updatePath();
|
332 |
+
}
|
333 |
+
this.pointsStoreWidget.callback = () => {
|
334 |
+
points = JSON.parse(this.pointsStoreWidget.value);
|
335 |
+
this.updatePath();
|
336 |
+
}
|
337 |
+
|
338 |
+
// Initialize or reset points array
|
339 |
+
this.drawHandles = false;
|
340 |
+
this.drawRuler = true;
|
341 |
+
var hoverIndex = -1;
|
342 |
+
var isDragging = false;
|
343 |
+
this.width = this.widthWidget.value;
|
344 |
+
this.height = this.heightWidget.value;
|
345 |
+
var i = 3;
|
346 |
+
this.points = [];
|
347 |
+
|
348 |
+
if (!reset && this.pointsStoreWidget.value != "") {
|
349 |
+
this.points = JSON.parse(this.pointsStoreWidget.value);
|
350 |
+
} else {
|
351 |
+
this.points = pv.range(1, 4).map((i, index) => {
|
352 |
+
if (index === 0) {
|
353 |
+
// First point at the bottom-left corner
|
354 |
+
return { x: 0, y: this.height };
|
355 |
+
} else if (index === 2) {
|
356 |
+
// Last point at the top-right corner
|
357 |
+
return { x: this.width, y: 0 };
|
358 |
+
} else {
|
359 |
+
// Other points remain as they were
|
360 |
+
return {
|
361 |
+
x: i * this.width / 5,
|
362 |
+
y: 50 + Math.random() * (this.height - 100)
|
363 |
+
};
|
364 |
+
}
|
365 |
+
});
|
366 |
+
this.pointsStoreWidget.value = JSON.stringify(this.points);
|
367 |
+
}
|
368 |
+
|
369 |
+
this.vis = new pv.Panel()
|
370 |
+
.width(this.width)
|
371 |
+
.height(this.height)
|
372 |
+
.fillStyle("#222")
|
373 |
+
.strokeStyle("gray")
|
374 |
+
.lineWidth(2)
|
375 |
+
.antialias(false)
|
376 |
+
.margin(10)
|
377 |
+
.event("mousedown", function () {
|
378 |
+
if (pv.event.shiftKey) { // Use pv.event to access the event object
|
379 |
+
let scaledMouse = {
|
380 |
+
x: this.mouse().x / app.canvas.ds.scale,
|
381 |
+
y: this.mouse().y / app.canvas.ds.scale
|
382 |
+
};
|
383 |
+
i = self.points.push(scaledMouse) - 1;
|
384 |
+
self.updatePath();
|
385 |
+
return this;
|
386 |
+
}
|
387 |
+
else if (pv.event.ctrlKey) {
|
388 |
+
// Capture the clicked location
|
389 |
+
let clickedPoint = {
|
390 |
+
x: this.mouse().x / app.canvas.ds.scale,
|
391 |
+
y: this.mouse().y / app.canvas.ds.scale
|
392 |
+
};
|
393 |
+
|
394 |
+
// Find the two closest points to the clicked location
|
395 |
+
let { point1Index, point2Index } = self.findClosestPoints(self.points, clickedPoint);
|
396 |
+
|
397 |
+
// Calculate the midpoint between the two closest points
|
398 |
+
let midpoint = {
|
399 |
+
x: (self.points[point1Index].x + self.points[point2Index].x) / 2,
|
400 |
+
y: (self.points[point1Index].y + self.points[point2Index].y) / 2
|
401 |
+
};
|
402 |
+
|
403 |
+
// Insert the midpoint into the array
|
404 |
+
self.points.splice(point2Index, 0, midpoint);
|
405 |
+
i = point2Index;
|
406 |
+
self.updatePath();
|
407 |
+
}
|
408 |
+
else if (pv.event.button === 2) {
|
409 |
+
self.node.contextMenu.style.display = 'block';
|
410 |
+
self.node.contextMenu.style.left = `${pv.event.clientX}px`;
|
411 |
+
self.node.contextMenu.style.top = `${pv.event.clientY}px`;
|
412 |
+
}
|
413 |
+
})
|
414 |
+
this.backgroundImage = this.vis.add(pv.Image).visible(false)
|
415 |
+
|
416 |
+
this.vis.add(pv.Rule)
|
417 |
+
.data(pv.range(0, this.height, 64))
|
418 |
+
.bottom(d => d)
|
419 |
+
.strokeStyle("gray")
|
420 |
+
.lineWidth(3)
|
421 |
+
.visible(() => self.drawRuler)
|
422 |
+
|
423 |
+
// vis.add(pv.Rule)
|
424 |
+
// .data(pv.range(0, points_to_sample, 1))
|
425 |
+
// .left(d => d * 512 / (points_to_sample - 1))
|
426 |
+
// .strokeStyle("gray")
|
427 |
+
// .lineWidth(2)
|
428 |
+
|
429 |
+
this.vis.add(pv.Line)
|
430 |
+
.data(() => this.points)
|
431 |
+
.left(d => d.x)
|
432 |
+
.top(d => d.y)
|
433 |
+
.interpolate(() => this.interpolation)
|
434 |
+
.tension(() => this.tension)
|
435 |
+
.segmented(() => false)
|
436 |
+
.strokeStyle(pv.Colors.category10().by(pv.index))
|
437 |
+
.lineWidth(3)
|
438 |
+
|
439 |
+
this.vis.add(pv.Dot)
|
440 |
+
.data(() => this.points)
|
441 |
+
.left(d => d.x)
|
442 |
+
.top(d => d.y)
|
443 |
+
.radius(10)
|
444 |
+
.shape(function() {
|
445 |
+
return self.dotShape;
|
446 |
+
})
|
447 |
+
.angle(function() {
|
448 |
+
const index = this.index;
|
449 |
+
let angle = 0;
|
450 |
+
|
451 |
+
if (self.dotShape === "triangle") {
|
452 |
+
let dxNext = 0, dyNext = 0;
|
453 |
+
if (index < self.points.length - 1) {
|
454 |
+
dxNext = self.points[index + 1].x - self.points[index].x;
|
455 |
+
dyNext = self.points[index + 1].y - self.points[index].y;
|
456 |
+
}
|
457 |
+
|
458 |
+
let dxPrev = 0, dyPrev = 0;
|
459 |
+
if (index > 0) {
|
460 |
+
dxPrev = self.points[index].x - self.points[index - 1].x;
|
461 |
+
dyPrev = self.points[index].y - self.points[index - 1].y;
|
462 |
+
}
|
463 |
+
|
464 |
+
const dx = (dxNext + dxPrev) / 2;
|
465 |
+
const dy = (dyNext + dyPrev) / 2;
|
466 |
+
|
467 |
+
angle = Math.atan2(dy, dx);
|
468 |
+
angle -= Math.PI / 2;
|
469 |
+
angle = (angle + 2 * Math.PI) % (2 * Math.PI);
|
470 |
+
}
|
471 |
+
|
472 |
+
return angle;
|
473 |
+
})
|
474 |
+
.cursor("move")
|
475 |
+
.strokeStyle(function () { return i == this.index ? "#ff7f0e" : "#1f77b4"; })
|
476 |
+
.fillStyle(function () { return "rgba(100, 100, 100, 0.3)"; })
|
477 |
+
.event("mousedown", pv.Behavior.drag())
|
478 |
+
.event("dragstart", function () {
|
479 |
+
i = this.index;
|
480 |
+
hoverIndex = this.index;
|
481 |
+
isDragging = true;
|
482 |
+
if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) {
|
483 |
+
self.points.splice(i--, 1);
|
484 |
+
self.vis.render();
|
485 |
+
}
|
486 |
+
return this;
|
487 |
+
})
|
488 |
+
.event("dragend", function() {
|
489 |
+
if (this.pathElements !== null) {
|
490 |
+
self.updatePath();
|
491 |
+
}
|
492 |
+
isDragging = false;
|
493 |
+
})
|
494 |
+
.event("drag", function () {
|
495 |
+
let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
|
496 |
+
let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
|
497 |
+
// Determine the bounds of the vis.Panel
|
498 |
+
const panelWidth = self.vis.width();
|
499 |
+
const panelHeight = self.vis.height();
|
500 |
+
|
501 |
+
// Adjust the new position if it would place the dot outside the bounds of the vis.Panel
|
502 |
+
adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
|
503 |
+
adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
|
504 |
+
self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
|
505 |
+
self.vis.render(); // Re-render the visualization to reflect the new position
|
506 |
+
})
|
507 |
+
.event("mouseover", function() {
|
508 |
+
hoverIndex = this.index; // Set the hover index to the index of the hovered dot
|
509 |
+
self.vis.render(); // Re-render the visualization
|
510 |
+
})
|
511 |
+
.event("mouseout", function() {
|
512 |
+
!isDragging && (hoverIndex = -1); // Reset the hover index when the mouse leaves the dot
|
513 |
+
self.vis.render(); // Re-render the visualization
|
514 |
+
})
|
515 |
+
.anchor("center")
|
516 |
+
.add(pv.Label)
|
517 |
+
.visible(function() {
|
518 |
+
return hoverIndex === this.index; // Only show the label for the hovered dot
|
519 |
+
})
|
520 |
+
.left(d => d.x < this.width / 2 ? d.x + 80 : d.x - 70) // Shift label to right if on left half, otherwise shift to left
|
521 |
+
.top(d => d.y < this.height / 2 ? d.y + 20 : d.y - 20) // Shift label down if on top half, otherwise shift up
|
522 |
+
.font(12 + "px sans-serif")
|
523 |
+
.text(d => {
|
524 |
+
if (this.samplingMethod == "path") {
|
525 |
+
return `X: ${Math.round(d.x)}, Y: ${Math.round(d.y)}`;
|
526 |
+
} else {
|
527 |
+
let frame = Math.round((d.x / self.width) * self.points_to_sample);
|
528 |
+
let normalizedY = (1.0 - (d.y / self.height) - 0.0) * (self.rangeMax - self.rangeMin) + self.rangeMin;
|
529 |
+
let normalizedX = (d.x / self.width);
|
530 |
+
return `F: ${frame}, X: ${normalizedX.toFixed(2)}, Y: ${normalizedY.toFixed(2)}`;
|
531 |
+
}
|
532 |
+
})
|
533 |
+
.textStyle("orange")
|
534 |
+
|
535 |
+
if (this.points.length != 0) {
|
536 |
+
this.vis.render();
|
537 |
+
}
|
538 |
+
var svgElement = this.vis.canvas();
|
539 |
+
svgElement.style['zIndex'] = "2"
|
540 |
+
svgElement.style['position'] = "relative"
|
541 |
+
this.node.splineEditor.element.appendChild(svgElement);
|
542 |
+
this.pathElements = svgElement.getElementsByTagName('path'); // Get all path elements
|
543 |
+
|
544 |
+
if (this.width > 256) {
|
545 |
+
this.node.setSize([this.width + 45, this.node.size[1]]);
|
546 |
+
}
|
547 |
+
this.node.setSize([this.node.size[0], this.height + 430]);
|
548 |
+
this.updatePath();
|
549 |
+
this.refreshBackgroundImage();
|
550 |
+
}
|
551 |
+
|
552 |
+
updatePath = () => {
|
553 |
+
if (!this.points || this.points.length === 0) {
|
554 |
+
console.log("no points");
|
555 |
+
return;
|
556 |
+
}
|
557 |
+
if (this.samplingMethod != "controlpoints") {
|
558 |
+
var coords = this.samplePoints(this.pathElements[0], this.points_to_sample, this.samplingMethod, this.width);
|
559 |
+
}
|
560 |
+
else {
|
561 |
+
var coords = this.points
|
562 |
+
}
|
563 |
+
|
564 |
+
if (this.drawSamplePoints) {
|
565 |
+
if (this.pointsLayer) {
|
566 |
+
// Update the data of the existing points layer
|
567 |
+
this.pointsLayer.data(coords);
|
568 |
+
} else {
|
569 |
+
// Create the points layer if it doesn't exist
|
570 |
+
this.pointsLayer = this.vis.add(pv.Dot)
|
571 |
+
.data(coords)
|
572 |
+
.left(function(d) { return d.x; })
|
573 |
+
.top(function(d) { return d.y; })
|
574 |
+
.radius(5) // Adjust the radius as needed
|
575 |
+
.fillStyle("red") // Change the color as needed
|
576 |
+
.strokeStyle("black") // Change the stroke color as needed
|
577 |
+
.lineWidth(1); // Adjust the line width as needed
|
578 |
+
}
|
579 |
+
} else {
|
580 |
+
if (this.pointsLayer) {
|
581 |
+
// Remove the points layer
|
582 |
+
this.pointsLayer.data([]);
|
583 |
+
this.vis.render();
|
584 |
+
}
|
585 |
+
}
|
586 |
+
let coordsString = JSON.stringify(coords);
|
587 |
+
this.pointsStoreWidget.value = JSON.stringify(this.points);
|
588 |
+
if (this.coordWidget) {
|
589 |
+
this.coordWidget.value = coordsString;
|
590 |
+
}
|
591 |
+
this.vis.render();
|
592 |
+
};
|
593 |
+
handleImageLoad = (img, file, base64String) => {
|
594 |
+
console.log(img.width, img.height); // Access width and height here
|
595 |
+
this.widthWidget.value = img.width;
|
596 |
+
this.heightWidget.value = img.height;
|
597 |
+
this.drawRuler = false;
|
598 |
+
|
599 |
+
if (img.width != this.vis.width() || img.height != this.vis.height()) {
|
600 |
+
if (img.width > 256) {
|
601 |
+
this.node.setSize([img.width + 45, this.node.size[1]]);
|
602 |
+
}
|
603 |
+
this.node.setSize([this.node.size[0], img.height + 500]);
|
604 |
+
this.vis.width(img.width);
|
605 |
+
this.vis.height(img.height);
|
606 |
+
this.height = img.height;
|
607 |
+
this.width = img.width;
|
608 |
+
|
609 |
+
this.updatePath();
|
610 |
+
}
|
611 |
+
this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render();
|
612 |
+
};
|
613 |
+
|
614 |
+
processImage = (img, file) => {
|
615 |
+
const canvas = document.createElement('canvas');
|
616 |
+
const ctx = canvas.getContext('2d');
|
617 |
+
|
618 |
+
const maxWidth = 800; // maximum width
|
619 |
+
const maxHeight = 600; // maximum height
|
620 |
+
let width = img.width;
|
621 |
+
let height = img.height;
|
622 |
+
|
623 |
+
// Calculate the new dimensions while preserving the aspect ratio
|
624 |
+
if (width > height) {
|
625 |
+
if (width > maxWidth) {
|
626 |
+
height *= maxWidth / width;
|
627 |
+
width = maxWidth;
|
628 |
+
}
|
629 |
+
} else {
|
630 |
+
if (height > maxHeight) {
|
631 |
+
width *= maxHeight / height;
|
632 |
+
height = maxHeight;
|
633 |
+
}
|
634 |
+
}
|
635 |
+
|
636 |
+
canvas.width = width;
|
637 |
+
canvas.height = height;
|
638 |
+
ctx.drawImage(img, 0, 0, width, height);
|
639 |
+
|
640 |
+
// Get the compressed image data as a Base64 string
|
641 |
+
const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1
|
642 |
+
|
643 |
+
this.node.properties.imgData = {
|
644 |
+
name: file.name,
|
645 |
+
lastModified: file.lastModified,
|
646 |
+
size: file.size,
|
647 |
+
type: file.type,
|
648 |
+
base64: base64String
|
649 |
+
};
|
650 |
+
handleImageLoad(img, file, base64String);
|
651 |
+
};
|
652 |
+
|
653 |
+
handleImageFile = (file) => {
|
654 |
+
const reader = new FileReader();
|
655 |
+
reader.onloadend = () => {
|
656 |
+
const img = new Image();
|
657 |
+
img.src = reader.result;
|
658 |
+
img.onload = () => processImage(img, file);
|
659 |
+
};
|
660 |
+
reader.readAsDataURL(file);
|
661 |
+
|
662 |
+
const imageUrl = URL.createObjectURL(file);
|
663 |
+
const img = new Image();
|
664 |
+
img.src = imageUrl;
|
665 |
+
img.onload = () => this.handleImageLoad(img, file, null);
|
666 |
+
};
|
667 |
+
|
668 |
+
refreshBackgroundImage = () => {
|
669 |
+
if (this.node.properties.imgData && this.node.properties.imgData.base64) {
|
670 |
+
const base64String = this.node.properties.imgData.base64;
|
671 |
+
const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`;
|
672 |
+
const img = new Image();
|
673 |
+
img.src = imageUrl;
|
674 |
+
img.onload = () => this.handleImageLoad(img, null, base64String);
|
675 |
+
}
|
676 |
+
};
|
677 |
+
|
678 |
+
createContextMenu = () => {
|
679 |
+
self = this;
|
680 |
+
document.addEventListener('contextmenu', function (e) {
|
681 |
+
e.preventDefault();
|
682 |
+
|
683 |
+
});
|
684 |
+
|
685 |
+
document.addEventListener('click', function (e) {
|
686 |
+
document.querySelectorAll('.spline-editor-context-menu').forEach(menu => {
|
687 |
+
menu.style.display = 'none';
|
688 |
+
});
|
689 |
+
});
|
690 |
+
|
691 |
+
this.node.menuItems.forEach((menuItem, index) => {
|
692 |
+
self = this;
|
693 |
+
menuItem.addEventListener('click', function (e) {
|
694 |
+
e.preventDefault();
|
695 |
+
switch (index) {
|
696 |
+
case 0:
|
697 |
+
e.preventDefault();
|
698 |
+
if (!self.drawHandles) {
|
699 |
+
self.drawHandles = true
|
700 |
+
self.vis.add(pv.Line)
|
701 |
+
.data(() => self.points.map((point, index) => ({
|
702 |
+
start: point,
|
703 |
+
end: [index]
|
704 |
+
})))
|
705 |
+
.left(d => d.start.x)
|
706 |
+
.top(d => d.start.y)
|
707 |
+
.interpolate("linear")
|
708 |
+
.tension(0) // Straight lines
|
709 |
+
.strokeStyle("#ff7f0e") // Same color as control points
|
710 |
+
.lineWidth(1)
|
711 |
+
.visible(() => self.drawHandles);
|
712 |
+
self.vis.render();
|
713 |
+
} else {
|
714 |
+
self.drawHandles = false
|
715 |
+
self.vis.render();
|
716 |
+
}
|
717 |
+
self.node.contextMenu.style.display = 'none';
|
718 |
+
break;
|
719 |
+
case 1:
|
720 |
+
e.preventDefault();
|
721 |
+
self.drawSamplePoints = !self.drawSamplePoints;
|
722 |
+
self.updatePath();
|
723 |
+
break;
|
724 |
+
case 2:
|
725 |
+
e.preventDefault();
|
726 |
+
if (self.dotShape == "circle"){
|
727 |
+
self.dotShape = "triangle"
|
728 |
+
}
|
729 |
+
else {
|
730 |
+
self.dotShape = "circle"
|
731 |
+
}
|
732 |
+
console.log(self.dotShape)
|
733 |
+
self.updatePath();
|
734 |
+
break;
|
735 |
+
case 3:
|
736 |
+
// Create file input element
|
737 |
+
const fileInput = document.createElement('input');
|
738 |
+
fileInput.type = 'file';
|
739 |
+
fileInput.accept = 'image/*'; // Accept only image files
|
740 |
+
|
741 |
+
// Listen for file selection
|
742 |
+
fileInput.addEventListener('change', function (event) {
|
743 |
+
const file = event.target.files[0]; // Get the selected file
|
744 |
+
|
745 |
+
if (file) {
|
746 |
+
const imageUrl = URL.createObjectURL(file);
|
747 |
+
let img = new Image();
|
748 |
+
img.src = imageUrl;
|
749 |
+
img.onload = () => self.handleImageLoad(img, file, null);
|
750 |
+
}
|
751 |
+
});
|
752 |
+
|
753 |
+
fileInput.click();
|
754 |
+
|
755 |
+
self.node.contextMenu.style.display = 'none';
|
756 |
+
break;
|
757 |
+
case 4:
|
758 |
+
e.preventDefault();
|
759 |
+
self.points.reverse();
|
760 |
+
self.updatePath();
|
761 |
+
break;
|
762 |
+
case 5:
|
763 |
+
self.backgroundImage.visible(false).root.render();
|
764 |
+
self.node.properties.imgData = null;
|
765 |
+
self.node.contextMenu.style.display = 'none';
|
766 |
+
break;
|
767 |
+
}
|
768 |
+
});
|
769 |
+
});
|
770 |
+
}
|
771 |
+
|
772 |
+
samplePoints(svgPathElement, numSamples, samplingMethod, width) {
|
773 |
+
var svgWidth = width; // Fixed width of the SVG element
|
774 |
+
var pathLength = svgPathElement.getTotalLength();
|
775 |
+
var points = [];
|
776 |
+
|
777 |
+
for (var i = 0; i < numSamples; i++) {
|
778 |
+
if (samplingMethod === "time") {
|
779 |
+
// Calculate the x-coordinate for the current sample based on the SVG's width
|
780 |
+
var x = (svgWidth / (numSamples - 1)) * i;
|
781 |
+
// Find the point on the path that intersects the vertical line at the calculated x-coordinate
|
782 |
+
var point = this.findPointAtX(svgPathElement, x, pathLength);
|
783 |
+
}
|
784 |
+
else if (samplingMethod === "path") {
|
785 |
+
// Calculate the distance along the path for the current sample
|
786 |
+
var distance = (pathLength / (numSamples - 1)) * i;
|
787 |
+
// Get the point at the current distance
|
788 |
+
var point = svgPathElement.getPointAtLength(distance);
|
789 |
+
}
|
790 |
+
|
791 |
+
// Add the point to the array of points
|
792 |
+
points.push({ x: point.x, y: point.y });
|
793 |
+
}
|
794 |
+
return points;
|
795 |
+
}
|
796 |
+
|
797 |
+
findClosestPoints(points, clickedPoint) {
|
798 |
+
// Calculate distances from clickedPoint to each point in the array
|
799 |
+
let distances = points.map(point => {
|
800 |
+
let dx = clickedPoint.x - point.x;
|
801 |
+
let dy = clickedPoint.y - point.y;
|
802 |
+
return { index: points.indexOf(point), distance: Math.sqrt(dx * dx + dy * dy) };
|
803 |
+
});
|
804 |
+
// Sort distances and get the indices of the two closest points
|
805 |
+
let sortedDistances = distances.sort((a, b) => a.distance - b.distance);
|
806 |
+
let closestPoint1Index = sortedDistances[0].index;
|
807 |
+
let closestPoint2Index = sortedDistances[1].index;
|
808 |
+
// Ensure point1Index is always the smaller index
|
809 |
+
if (closestPoint1Index > closestPoint2Index) {
|
810 |
+
[closestPoint1Index, closestPoint2Index] = [closestPoint2Index, closestPoint1Index];
|
811 |
+
}
|
812 |
+
return { point1Index: closestPoint1Index, point2Index: closestPoint2Index };
|
813 |
+
}
|
814 |
+
|
815 |
+
findPointAtX(svgPathElement, targetX, pathLength) {
|
816 |
+
let low = 0;
|
817 |
+
let high = pathLength;
|
818 |
+
let bestPoint = svgPathElement.getPointAtLength(0);
|
819 |
+
|
820 |
+
while (low <= high) {
|
821 |
+
let mid = low + (high - low) / 2;
|
822 |
+
let point = svgPathElement.getPointAtLength(mid);
|
823 |
+
|
824 |
+
if (Math.abs(point.x - targetX) < 1) {
|
825 |
+
return point; // The point is close enough to the target
|
826 |
+
}
|
827 |
+
|
828 |
+
if (point.x < targetX) {
|
829 |
+
low = mid + 1;
|
830 |
+
} else {
|
831 |
+
high = mid - 1;
|
832 |
+
}
|
833 |
+
|
834 |
+
// Keep track of the closest point found so far
|
835 |
+
if (Math.abs(point.x - targetX) < Math.abs(bestPoint.x - targetX)) {
|
836 |
+
bestPoint = point;
|
837 |
+
}
|
838 |
+
}
|
839 |
+
|
840 |
+
// Return the closest point found
|
841 |
+
return bestPoint;
|
842 |
+
}
|
843 |
+
}
|
844 |
+
//from melmass
|
845 |
+
export function hideWidgetForGood(node, widget, suffix = '') {
|
846 |
+
widget.origType = widget.type
|
847 |
+
widget.origComputeSize = widget.computeSize
|
848 |
+
widget.origSerializeValue = widget.serializeValue
|
849 |
+
widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
|
850 |
+
widget.type = "converted-widget" + suffix
|
851 |
+
// widget.serializeValue = () => {
|
852 |
+
// // Prevent serializing the widget if we have no input linked
|
853 |
+
// const w = node.inputs?.find((i) => i.widget?.name === widget.name);
|
854 |
+
// if (w?.link == null) {
|
855 |
+
// return undefined;
|
856 |
+
// }
|
857 |
+
// return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
|
858 |
+
// };
|
859 |
+
|
860 |
+
// Hide any linked widgets, e.g. seed+seedControl
|
861 |
+
if (widget.linkedWidgets) {
|
862 |
+
for (const w of widget.linkedWidgets) {
|
863 |
+
hideWidgetForGood(node, w, ':' + widget.name)
|
864 |
+
}
|
865 |
+
}
|
866 |
+
}
|
custom_nodes/ComfyUI-KJNodes-main/web/red.png
ADDED
![]() |
custom_nodes/ComfyUI-essentials-main/.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/__pycache__/
|
2 |
+
/luts/*.cube
|
3 |
+
/luts/*.CUBE
|
4 |
+
/fonts/*.ttf
|
5 |
+
/fonts/*.otf
|
6 |
+
!/fonts/ShareTechMono-Regular.ttf
|
custom_nodes/ComfyUI-essentials-main/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Matteo Spinelli
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
custom_nodes/ComfyUI-essentials-main/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# :wrench: ComfyUI Essentials
|
2 |
+
|
3 |
+
Essential nodes that are weirdly missing from ComfyUI core. With few exceptions they are new features and not commodities. I hope this will be just a temporary repository until the nodes get included into ComfyUI.
|
4 |
+
|
5 |
+
# Sponsorship
|
6 |
+
|
7 |
+
<div align="center">
|
8 |
+
|
9 |
+
**[:heart: Github Sponsor](https://github.com/sponsors/cubiq) | [:coin: Paypal](https://paypal.me/matt3o)**
|
10 |
+
|
11 |
+
</div>
|
12 |
+
|
13 |
+
If you like my work and wish to see updates and new features please consider sponsoring my projects.
|
14 |
+
|
15 |
+
- [ComfyUI IPAdapter Plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus)
|
16 |
+
- [ComfyUI InstantID (Native)](https://github.com/cubiq/ComfyUI_InstantID)
|
17 |
+
- [ComfyUI Essentials](https://github.com/cubiq/ComfyUI_essentials)
|
18 |
+
- [ComfyUI FaceAnalysis](https://github.com/cubiq/ComfyUI_FaceAnalysis)
|
19 |
+
|
20 |
+
Not to mention the documentation and videos tutorials. Check my **ComfyUI Advanced Understanding** videos on YouTube for example, [part 1](https://www.youtube.com/watch?v=_C7kR2TFIX0) and [part 2](https://www.youtube.com/watch?v=ijqXnW_9gzc)
|
21 |
+
|
22 |
+
The only way to keep the code open and free is by sponsoring its development. The more sponsorships the more time I can dedicate to my open source projects.
|
23 |
+
|
24 |
+
Please consider a [Github Sponsorship](https://github.com/sponsors/cubiq) or [PayPal donation](https://paypal.me/matt3o) (Matteo "matt3o" Spinelli). For sponsorships of $50+, let me know if you'd like to be mentioned in this readme file, you can find me on [Discord](https://latent.vision/discord) or _matt3o :snail: gmail.com_.
|
25 |
+
|
26 |
+
## Current sponsors
|
27 |
+
|
28 |
+
It's only thanks to generous sponsors that **the whole community** can enjoy open and free software. Please join me in thanking the following companies and individuals!
|
29 |
+
|
30 |
+
### :trophy: Gold sponsors
|
31 |
+
|
32 |
+
[](https://kaiber.ai/) [](https://www.instasd.com/)
|
33 |
+
|
34 |
+
### :tada: Silver sponsors
|
35 |
+
|
36 |
+
[](https://openart.ai/workflows) [](https://www.finetuners.ai/) [](https://comfy.icu/)
|
37 |
+
|
38 |
+
### Other companies supporting my projects
|
39 |
+
|
40 |
+
- [RunComfy](https://www.runcomfy.com/) (ComfyUI Cloud)
|
41 |
+
|
42 |
+
### Esteemed individuals
|
43 |
+
|
44 |
+
- [Øystein Ø. Olsen](https://github.com/FireNeslo)
|
45 |
+
- [Jack Gane](https://github.com/ganeJackS)
|
46 |
+
- [Nathan Shipley](https://www.nathanshipley.com/)
|
47 |
+
- [Dkdnzia](https://github.com/Dkdnzia)
|
48 |
+
|
49 |
+
[And all my public and private sponsors!](https://github.com/sponsors/cubiq)
|
custom_nodes/ComfyUI-essentials-main/__init__.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#from .essentials import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
2 |
+
from .image import IMAGE_CLASS_MAPPINGS, IMAGE_NAME_MAPPINGS
|
3 |
+
from .mask import MASK_CLASS_MAPPINGS, MASK_NAME_MAPPINGS
|
4 |
+
from .sampling import SAMPLING_CLASS_MAPPINGS, SAMPLING_NAME_MAPPINGS
|
5 |
+
from .segmentation import SEG_CLASS_MAPPINGS, SEG_NAME_MAPPINGS
|
6 |
+
from .misc import MISC_CLASS_MAPPINGS, MISC_NAME_MAPPINGS
|
7 |
+
from .conditioning import COND_CLASS_MAPPINGS, COND_NAME_MAPPINGS
|
8 |
+
from .text import TEXT_CLASS_MAPPINGS, TEXT_NAME_MAPPINGS
|
9 |
+
|
10 |
+
WEB_DIRECTORY = "./js"
|
11 |
+
|
12 |
+
NODE_CLASS_MAPPINGS = {}
|
13 |
+
NODE_DISPLAY_NAME_MAPPINGS = {}
|
14 |
+
|
15 |
+
NODE_CLASS_MAPPINGS.update(COND_CLASS_MAPPINGS)
|
16 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(COND_NAME_MAPPINGS)
|
17 |
+
|
18 |
+
NODE_CLASS_MAPPINGS.update(IMAGE_CLASS_MAPPINGS)
|
19 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(IMAGE_NAME_MAPPINGS)
|
20 |
+
|
21 |
+
NODE_CLASS_MAPPINGS.update(MASK_CLASS_MAPPINGS)
|
22 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(MASK_NAME_MAPPINGS)
|
23 |
+
|
24 |
+
NODE_CLASS_MAPPINGS.update(SAMPLING_CLASS_MAPPINGS)
|
25 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(SAMPLING_NAME_MAPPINGS)
|
26 |
+
|
27 |
+
NODE_CLASS_MAPPINGS.update(SEG_CLASS_MAPPINGS)
|
28 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(SEG_NAME_MAPPINGS)
|
29 |
+
|
30 |
+
NODE_CLASS_MAPPINGS.update(TEXT_CLASS_MAPPINGS)
|
31 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(TEXT_NAME_MAPPINGS)
|
32 |
+
|
33 |
+
NODE_CLASS_MAPPINGS.update(MISC_CLASS_MAPPINGS)
|
34 |
+
NODE_DISPLAY_NAME_MAPPINGS.update(MISC_NAME_MAPPINGS)
|
35 |
+
|
36 |
+
__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', "WEB_DIRECTORY"]
|