{ "id": "91f6bbe2-ed41-4fd6-bac7-71d5b5864ecb", "revision": 0, "last_node_id": 73, "last_link_id": 143, "nodes": [ { "id": 39, "type": "VAELoader", "pos": [ 20, 340 ], "size": [ 330, 60 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "VAE", "type": "VAE", "slot_index": 0, "links": [ 76 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "VAELoader", "models": [ { "name": "qwen_image_vae.safetensors", "url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors", "directory": "vae" } ], "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "qwen_image_vae.safetensors" ] }, { "id": 38, "type": "CLIPLoader", "pos": [ 20, 190 ], "size": [ 330, 110 ], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [ { "name": "CLIP", "type": "CLIP", "slot_index": 0, "links": [ 74, 75 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "CLIPLoader", "models": [ { "name": "qwen_2.5_vl_7b_fp8_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors", "directory": "text_encoders" } ], "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_image", "default" ] }, { "id": 58, "type": "EmptySD3LatentImage", "pos": [ 50, 510 ], "size": [ 270, 106 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 107 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "EmptySD3LatentImage", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ 1328, 1328, 1 ] }, { "id": 67, "type": "MarkdownNote", "pos": [ -540, 10 ], "size": [ 520, 410 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [], "title": "Model links", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "[Tutorial](https://docs.comfy.org/tutorials/image/qwen/qwen-image) | [教程](https://docs.comfy.org/zh-CN/tutorials/image/qwen/qwen-image)\n\n\n## Model links\n\nYou can find all the models on [Huggingface](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/tree/main) or [Modelscope](https://modelscope.cn/models/Comfy-Org/Qwen-Image_ComfyUI/files)\n\n**diffusion model**\n\n- [qwen_image_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors)\n\n**text encoder**\n\n- [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors)\n\n**vae**\n\n- [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors)\n\nModel Storage Location\n\n```\nšŸ“‚ ComfyUI/\nā”œā”€ā”€ šŸ“‚ models/\n│ ā”œā”€ā”€ šŸ“‚ diffusion_models/\n│ │ └── qwen_image_fp8_e4m3fn.safetensors\n│ ā”œā”€ā”€ šŸ“‚ vae/\n│ │ └── qwen_image_vae.safetensors\n│ └── šŸ“‚ text_encoders/\n│ └── qwen_2.5_vl_7b_fp8_scaled.safetensors\n```\n" ], "color": "#432", "bgcolor": "#653" }, { "id": 69, "type": "MarkdownNote", "pos": [ -320, 470 ], "size": [ 290, 90 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [], "title": "VRAM Usage", "properties": { "widget_ue_connectable": {} }, "widgets_values": [ "- GPU:RTX4090 24GB\n- VRAM:86%\n- 1st generation 94s\n- 2nd generation 71s" ], "color": "#432", "bgcolor": "#653" }, { "id": 3, "type": "KSampler", "pos": [ 850, 120 ], "size": [ 300, 262 ], "flags": {}, "order": 12, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 136 }, { "name": "positive", "type": "CONDITIONING", "link": 46 }, { "name": "negative", "type": "CONDITIONING", "link": 52 }, { "name": "latent_image", "type": "LATENT", "link": 107 } ], "outputs": [ { "name": "LATENT", "type": "LATENT", "slot_index": 0, "links": [ 128 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "KSampler", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ 381468518463320, "randomize", 20, 2.5, "euler", "simple", 1 ] }, { "id": 71, "type": "Note", "pos": [ 850, -130 ], "size": [ 290, 90 ], "flags": {}, "order": 5, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "Increase the shift if you get too many blury/dark/bad images. Decrease if you want to try increasing detail." ], "color": "#432", "bgcolor": "#653" }, { "id": 8, "type": "VAEDecode", "pos": [ 1170, -90 ], "size": [ 210, 46 ], "flags": { "collapsed": false }, "order": 13, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 128 }, { "name": "vae", "type": "VAE", "link": 76 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "slot_index": 0, "links": [ 110 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "VAEDecode", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [] }, { "id": 70, "type": "Note", "pos": [ 850, 430 ], "size": [ 300, 120 ], "flags": {}, "order": 6, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "Set cfg to 1.0 for a speed boost at the cost of consistency. Samplers like res_multistep work pretty well at cfg 1.0\n\nThe official number of steps is 50 but I think that's too much. Even just 10 steps seems to work." ], "color": "#432", "bgcolor": "#653" }, { "id": 37, "type": "UNETLoader", "pos": [ 20, 50 ], "size": [ 330, 90 ], "flags": {}, "order": 7, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "slot_index": 0, "links": [ 132 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "UNETLoader", "models": [ { "name": "qwen_image_fp8_e4m3fn.safetensors", "url": "https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors", "directory": "diffusion_models" } ], "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "qwen_image_fp8_e4m3fn.safetensors", "default" ] }, { "id": 66, "type": "ModelSamplingAuraFlow", "pos": [ 841.4788208007812, 0.16747500002384186 ], "size": [ 300, 58 ], "flags": {}, "order": 11, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 143 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 136 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "ModelSamplingAuraFlow", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ 3.1000000000000005 ] }, { "id": 60, "type": "SaveImage", "pos": [ 1170, 10 ], "size": [ 490, 600 ], "flags": {}, "order": 14, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 110 } ], "outputs": [], "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "SaveImage", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "ComfyUI" ] }, { "id": 7, "type": "CLIPTextEncode", "pos": [ 390, 260 ], "size": [ 425.27801513671875, 180.6060791015625 ], "flags": {}, "order": 9, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 75 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 52 ] } ], "title": "CLIP Text Encode (Negative Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "CLIPTextEncode", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "" ], "color": "#322", "bgcolor": "#533" }, { "id": 6, "type": "CLIPTextEncode", "pos": [ 390, 60 ], "size": [ 422.84503173828125, 164.31304931640625 ], "flags": {}, "order": 8, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 74 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 46 ] } ], "title": "CLIP Text Encode (Positive Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.48", "Node name for S&R": "CLIPTextEncode", "enableTabs": false, "tabWidth": 65, "tabXOffset": 10, "hasSecondTab": false, "secondTabText": "Send Back", "secondTabOffset": 80, "secondTabWidth": 65, "widget_ue_connectable": {} }, "widgets_values": [ "Japanese modern anime style, a close-up shot of a smiling woman surrounded flowers." ], "color": "#232", "bgcolor": "#353" }, { "id": 73, "type": "LoraLoaderModelOnly", "pos": [ 456.83819580078125, -118.97000122070312 ], "size": [ 270, 82 ], "flags": {}, "order": 10, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 132 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 143 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.49", "Node name for S&R": "LoraLoaderModelOnly" }, "widgets_values": [ "modern-anime.safetensors", 1 ] } ], "links": [ [ 46, 6, 0, 3, 1, "CONDITIONING" ], [ 52, 7, 0, 3, 2, "CONDITIONING" ], [ 74, 38, 0, 6, 0, "CLIP" ], [ 75, 38, 0, 7, 0, "CLIP" ], [ 76, 39, 0, 8, 1, "VAE" ], [ 107, 58, 0, 3, 3, "LATENT" ], [ 110, 8, 0, 60, 0, "IMAGE" ], [ 128, 3, 0, 8, 0, "LATENT" ], [ 132, 37, 0, 73, 0, "MODEL" ], [ 136, 66, 0, 3, 0, "MODEL" ], [ 143, 73, 0, 66, 0, "MODEL" ] ], "groups": [ { "id": 1, "title": "Step1 - Load models", "bounding": [ 10, -20, 350, 433.6000061035156 ], "color": "#3f789e", "font_size": 24, "flags": {} }, { "id": 2, "title": "Step2 - Image size", "bounding": [ 10, 430, 350, 210 ], "color": "#3f789e", "font_size": 24, "flags": {} }, { "id": 3, "title": "Step3 - Prompt", "bounding": [ 380, -20, 450, 470 ], "color": "#3f789e", "font_size": 24, "flags": {} } ], "config": {}, "extra": { "ds": { "scale": 0.7627768444385553, "offset": [ 600.4831374676982, 299.8879766477119 ] }, "frontendVersion": "1.23.4", "ue_links": [], "links_added_by_ue": [], "VHS_latentpreview": false, "VHS_latentpreviewrate": 0, "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, "version": 0.4 }