job: custom_job config: name: flux_train_replicate process: - type: custom_sd_trainer training_folder: output device: cuda:0 trigger_word: HST network: type: lora linear: 32 linear_alpha: 32 network_kwargs: only_if_contains: - transformer.transformer_blocks.7.norm1.linear - transformer.transformer_blocks.7.norm1_context.linear - transformer.transformer_blocks.7.attn.to_q - transformer.transformer_blocks.7.attn.to_k - transformer.transformer_blocks.7.attn.to_v - transformer.transformer_blocks.7.attn.add_k_proj - transformer.transformer_blocks.7.attn.add_v_proj - transformer.transformer_blocks.7.attn.add_q_proj - transformer.transformer_blocks.7.attn.to_out.0 - transformer.transformer_blocks.7.attn.to_add_out - transformer.transformer_blocks.7.ff.net.0.proj - transformer.transformer_blocks.7.ff.net.2 - transformer.transformer_blocks.7.ff_context.net.0.proj - transformer.transformer_blocks.7.ff_context.net.2 - transformer.transformer_blocks.13.norm1.linear - transformer.transformer_blocks.13.norm1_context.linear - transformer.transformer_blocks.13.attn.to_q - transformer.transformer_blocks.13.attn.to_k - transformer.transformer_blocks.13.attn.to_v - transformer.transformer_blocks.13.attn.add_k_proj - transformer.transformer_blocks.13.attn.add_v_proj - transformer.transformer_blocks.13.attn.add_q_proj - transformer.transformer_blocks.13.attn.to_out.0 - transformer.transformer_blocks.13.attn.to_add_out - transformer.transformer_blocks.13.ff.net.0.proj - transformer.transformer_blocks.13.ff.net.2 - transformer.transformer_blocks.13.ff_context.net.0.proj - transformer.transformer_blocks.13.ff_context.net.2 - transformer.single_transformer_blocks.7.norm.linear - transformer.single_transformer_blocks.7.proj_mlp - transformer.single_transformer_blocks.7.proj_out - transformer.single_transformer_blocks.7.attn.to_q - transformer.single_transformer_blocks.7.attn.to_k - transformer.single_transformer_blocks.7.attn.to_v - transformer.single_transformer_blocks.13.norm.linear - transformer.single_transformer_blocks.13.proj_mlp - transformer.single_transformer_blocks.13.proj_out - transformer.single_transformer_blocks.13.attn.to_q - transformer.single_transformer_blocks.13.attn.to_k - transformer.single_transformer_blocks.13.attn.to_v save: dtype: float16 save_every: 501 max_step_saves_to_keep: 1 datasets: - folder_path: input_images caption_ext: txt caption_dropout_rate: 0.05 shuffle_tokens: false cache_latents_to_disk: false cache_latents: true resolution: - 512 - 768 - 1024 train: batch_size: 4 steps: 500 gradient_accumulation_steps: 1 train_unet: true train_text_encoder: false content_or_style: balanced gradient_checkpointing: true noise_scheduler: flowmatch optimizer: adamw8bit lr: 0.001 ema_config: use_ema: true ema_decay: 0.99 dtype: bf16 model: name_or_path: FLUX.1-dev is_flux: true quantize: true sample: sampler: flowmatch sample_every: 501 width: 1024 height: 1024 prompts: [] neg: '' seed: 42 walk_seed: true guidance_scale: 3.5 sample_steps: 28 meta: name: flux_train_replicate version: '1.0'