kratosboy507 commited on
Commit
dff1327
·
verified ·
1 Parent(s): d2cd66a

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. epoch10/adapter_config.json +43 -0
  2. epoch10/adapter_model.safetensors +3 -0
  3. epoch10/hunyuan_video.toml +109 -0
  4. epoch12/adapter_config.json +43 -0
  5. epoch12/adapter_model.safetensors +3 -0
  6. epoch12/hunyuan_video.toml +109 -0
  7. epoch14/adapter_config.json +43 -0
  8. epoch14/adapter_model.safetensors +3 -0
  9. epoch14/hunyuan_video.toml +109 -0
  10. epoch16/adapter_config.json +43 -0
  11. epoch16/adapter_model.safetensors +3 -0
  12. epoch16/hunyuan_video.toml +109 -0
  13. epoch18/adapter_config.json +43 -0
  14. epoch18/adapter_model.safetensors +3 -0
  15. epoch18/hunyuan_video.toml +109 -0
  16. epoch2/adapter_config.json +43 -0
  17. epoch2/adapter_model.safetensors +3 -0
  18. epoch2/hunyuan_video.toml +109 -0
  19. epoch20/adapter_config.json +43 -0
  20. epoch20/adapter_model.safetensors +3 -0
  21. epoch20/hunyuan_video.toml +109 -0
  22. epoch22/adapter_config.json +43 -0
  23. epoch22/adapter_model.safetensors +3 -0
  24. epoch22/hunyuan_video.toml +109 -0
  25. epoch4/adapter_config.json +43 -0
  26. epoch4/adapter_model.safetensors +3 -0
  27. epoch4/hunyuan_video.toml +109 -0
  28. epoch6/adapter_config.json +43 -0
  29. epoch6/adapter_model.safetensors +3 -0
  30. epoch6/hunyuan_video.toml +109 -0
  31. epoch8/adapter_config.json +43 -0
  32. epoch8/adapter_model.safetensors +3 -0
  33. epoch8/hunyuan_video.toml +109 -0
  34. events.out.tfevents.1737020282.modal.3348.0 +3 -0
  35. global_step1027/layer_00-model_states.pt +3 -0
  36. global_step1027/layer_01-model_states.pt +3 -0
  37. global_step1027/layer_02-model_states.pt +3 -0
  38. global_step1027/layer_03-model_states.pt +3 -0
  39. global_step1027/layer_04-model_states.pt +3 -0
  40. global_step1027/layer_05-model_states.pt +3 -0
  41. global_step1027/layer_06-model_states.pt +3 -0
  42. global_step1027/layer_07-model_states.pt +3 -0
  43. global_step1027/layer_08-model_states.pt +3 -0
  44. global_step1027/layer_09-model_states.pt +3 -0
  45. global_step1027/layer_10-model_states.pt +3 -0
  46. global_step1027/layer_11-model_states.pt +3 -0
  47. global_step1027/layer_12-model_states.pt +3 -0
  48. global_step1027/layer_13-model_states.pt +3 -0
  49. global_step1027/layer_14-model_states.pt +3 -0
  50. global_step1027/layer_15-model_states.pt +3 -0
epoch10/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch10/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b26d913f569d3c63995bfac613be1a6ba1e4702522b574a6a0948a84a5cb6db
3
+ size 322519480
epoch10/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch12/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch12/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef99d203f220b971927de7f35a92faef61f2bb2fcc39348439fdc50173310ace
3
+ size 322519480
epoch12/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch14/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch14/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e50816eb1aeedf56409603fae068041b9de60dc646a8df257a1500ceb6644d9
3
+ size 322519480
epoch14/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch16/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch16/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98485752533efdf979c98f7ab5ac0bcfcc6602d99371f1bc44ec4b2373a88844
3
+ size 322519480
epoch16/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch18/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch18/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4414c1f6b4c2624ad69b74ce150f0185db84324d1ac5e7f60a5a7b0a4ed25dc8
3
+ size 322519480
epoch18/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch2/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch2/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f48bddaa694882e0c7021e79b2bec040890a53ffd35ddb4a1479767b949f14b
3
+ size 322519480
epoch2/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch20/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch20/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b787305efddd090a4f9877768fd04f60e08dd1f31a23eef4f2749fcc4b1b8b4
3
+ size 322519480
epoch20/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch22/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch22/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a9d7962c914d71343aa491fcaf0a9e970bd47d97da5498c127945d8f8624b9f
3
+ size 322519480
epoch22/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch4/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch4/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0d5df7fc57c3bd8b4b8b73a1f738d6d9e86ff82918152e88aa5fde5995049e
3
+ size 322519480
epoch4/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch6/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch6/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc5e1ed33955907134f267adb243a4bba935378bd15c764350f20f11790c76b
3
+ size 322519480
epoch6/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
epoch8/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "txt_mod.linear",
27
+ "img_mlp.fc2",
28
+ "img_mlp.fc1",
29
+ "txt_attn_proj",
30
+ "img_mod.linear",
31
+ "img_attn_qkv",
32
+ "img_attn_proj",
33
+ "txt_attn_qkv",
34
+ "modulation.linear",
35
+ "linear2",
36
+ "txt_mlp.fc1",
37
+ "txt_mlp.fc2",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch8/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7601a1323a60c43c627d3b7b2eab33e938bfb62a1c892313a1f89a004633764
3
+ size 322519480
epoch8/hunyuan_video.toml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/diffusion-pipe/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 22
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 2
17
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
18
+ pipeline_stages = 1
19
+ # Number of micro-batches sent through the pipeline for each training step.
20
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
21
+ gradient_accumulation_steps = 1
22
+ # Grad norm clipping.
23
+ gradient_clipping = 1.0
24
+ # Learning rate warmup.
25
+ warmup_steps = 100
26
+
27
+ # eval settings
28
+
29
+ eval_every_n_epochs = 1
30
+ eval_before_first_step = true
31
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
32
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
33
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
34
+ eval_micro_batch_size_per_gpu = 1
35
+ eval_gradient_accumulation_steps = 1
36
+
37
+ # misc settings
38
+
39
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
40
+ save_every_n_epochs = 2
41
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
42
+ #checkpoint_every_n_epochs = 1
43
+ checkpoint_every_n_minutes = 120
44
+ # Always set to true unless you have a huge amount of VRAM.
45
+ activation_checkpointing = true
46
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
47
+ partition_method = 'parameters'
48
+ # dtype for saving the LoRA or model, if different from training dtype
49
+ save_dtype = 'bfloat16'
50
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
51
+ caching_batch_size = 16
52
+ # How often deepspeed logs to console.
53
+ steps_per_print = 1
54
+ # How to extract video clips for training from a single input video file.
55
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
56
+ # number of frames for that bucket.
57
+ # single_beginning: one clip starting at the beginning of the video
58
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
59
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
60
+ # default is single_middle
61
+ video_clip_mode = 'single_middle'
62
+
63
+ [model]
64
+ # flux, ltx-video, or hunyuan-video
65
+ type = 'hunyuan-video'
66
+ transformer_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
67
+ vae_path = '/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
68
+ llm_path = '/diffusion-pipe/models/llm'
69
+ clip_path = '/diffusion-pipe/models/clip'
70
+ # Base dtype used for all models.
71
+ dtype = 'bfloat16'
72
+ # Hunyuan Video supports fp8 for the transformer when training LoRA.
73
+ transformer_dtype = 'float8'
74
+ # How to sample timesteps to train on. Can be logit_normal or uniform.
75
+ timestep_sample_method = 'logit_normal'
76
+
77
+ # flux example
78
+ # [model]
79
+ # type = 'flux'
80
+ # # Path to Huggingface Diffusers directory for Flux
81
+ # diffusers_path = '/data2/imagegen_models/FLUX.1-dev'
82
+ # # You can override the transformer from a BFL format checkpoint.
83
+ # transformer_path = '/data2/imagegen_models/flux-dev-single-files/consolidated_s6700-schnell.safetensors'
84
+ # dtype = 'bfloat16'
85
+ # flux_shift = true
86
+
87
+ # LTV-Video example
88
+ # [model]
89
+ # type = 'ltx-video'
90
+ # diffusers_path = '/data2/imagegen_models/LTX-Video'
91
+ # dtype = 'bfloat16'
92
+ # timestep_sample_method = 'logit_normal'
93
+
94
+ [adapter]
95
+ type = 'lora'
96
+ rank = 32
97
+ # Dtype for the LoRA weights you are training.
98
+ dtype = 'bfloat16'
99
+ # You can initialize the lora weights from a previously trained lora.
100
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
101
+
102
+ [optimizer]
103
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
104
+ # Look at train.py for other options. You could also easily edit the file and add your own.
105
+ type = 'adamw_optimi'
106
+ lr = 5e-5
107
+ betas = [0.9, 0.99]
108
+ weight_decay = 0.01
109
+ eps = 1e-8
events.out.tfevents.1737020282.modal.3348.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7f1f4d0844c2e911fca3e4662a9f074af00289e8efb57687938282c7e314b13
3
+ size 76609
global_step1027/layer_00-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c682c9cc8e731c37498845f3a980635b04094468e55e77553c7a12ccff998f5
3
+ size 920
global_step1027/layer_01-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80399ca739b97c0eca95fbb87e44c5fcc8c49505076fbbfcf6498ab0436de61b
3
+ size 9051114
global_step1027/layer_02-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f347631ea6f6ce794e6f22286f2af971bd27ebe60ce2da38050cd8171b2e32a0
3
+ size 9051114
global_step1027/layer_03-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58b895a100fa34c755fbf8596fd19f47483620ea4df21e959e994bc694f33071
3
+ size 9051114
global_step1027/layer_04-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:967e02c29c288a25bb9984a5aee9098791fa788750bba40e073adbd899966951
3
+ size 9051114
global_step1027/layer_05-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b847886f074e4acfe50cbb328579b48d8e443982eaa277074461a35e32485ba
3
+ size 9051114
global_step1027/layer_06-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6afb834c929f7ef67d945dfd77cdd931e13b793d9802320ebe33fa57f87d10c7
3
+ size 9051114
global_step1027/layer_07-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:032712b5389ace4ca7dc99fc8f966775bd5bb873faf31521eae1f1c0d5652d55
3
+ size 9051114
global_step1027/layer_08-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3411ef035dc92a30c3a8d1ea9ec3cfe8a87a0cea3d8866b24f97f9952c487661
3
+ size 9051114
global_step1027/layer_09-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a5f76554415ac46ec06ab2211a32c459b842fe7d5360e8be1e54e63cf508e15
3
+ size 9051114
global_step1027/layer_10-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a841549789ec903285d6290842d02ff3818faa37f281742a5dd56661179c3eb5
3
+ size 9051114
global_step1027/layer_11-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fae9c030f68f2515b9eb0f362dc11abb135365c758d59a19629272ee4f83c86
3
+ size 9051114
global_step1027/layer_12-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6490b678de916e4bf5880da964701e139d5930b43074901e6a340d6924752667
3
+ size 9051114
global_step1027/layer_13-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09bfeba167db18ce306108b15ea8676cb8b50a847a1c286d83c9b77a85c882fc
3
+ size 9051114
global_step1027/layer_14-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297a60e0d65f0d02051821a208c0663aa48445d5f32d50fbf10ed4c408c375bb
3
+ size 9051114
global_step1027/layer_15-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1829c786458eb632d700b3f4d41ec532473ab74114efcdacac776b9402e9d6e8
3
+ size 9051114