skeskinen commited on
Commit
e42c759
·
verified ·
1 Parent(s): 0fbfe56

Upload my file

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. dataset_wan.toml +9 -0
  3. epoch10/adapter_config.json +40 -0
  4. epoch10/adapter_model.safetensors +3 -0
  5. epoch10/wan.toml +48 -0
  6. epoch20/adapter_config.json +40 -0
  7. epoch20/adapter_model.safetensors +3 -0
  8. epoch20/wan.toml +48 -0
  9. epoch30/adapter_config.json +40 -0
  10. epoch30/adapter_model.safetensors +3 -0
  11. epoch30/wan.toml +48 -0
  12. epoch40/adapter_config.json +40 -0
  13. epoch40/adapter_model.safetensors +3 -0
  14. epoch40/wan.toml +48 -0
  15. epoch50/adapter_config.json +40 -0
  16. epoch50/adapter_model.safetensors +3 -0
  17. epoch50/wan.toml +48 -0
  18. epoch60/adapter_config.json +40 -0
  19. epoch60/adapter_model.safetensors +3 -0
  20. epoch60/wan.toml +48 -0
  21. epoch70/adapter_config.json +40 -0
  22. epoch70/adapter_model.safetensors +3 -0
  23. epoch70/wan.toml +48 -0
  24. epoch80/adapter_config.json +40 -0
  25. epoch80/adapter_model.safetensors +3 -0
  26. epoch80/wan.toml +48 -0
  27. epoch90/adapter_config.json +40 -0
  28. epoch90/adapter_model.safetensors +3 -0
  29. epoch90/wan.toml +48 -0
  30. events.out.tfevents.1746472272.420c94ca0326.20093.0 +3 -0
  31. global_step1347/layer_00-model_states.pt +3 -0
  32. global_step1347/layer_01-model_states.pt +3 -0
  33. global_step1347/layer_02-model_states.pt +3 -0
  34. global_step1347/layer_03-model_states.pt +3 -0
  35. global_step1347/layer_04-model_states.pt +3 -0
  36. global_step1347/layer_05-model_states.pt +3 -0
  37. global_step1347/layer_06-model_states.pt +3 -0
  38. global_step1347/layer_07-model_states.pt +3 -0
  39. global_step1347/layer_08-model_states.pt +3 -0
  40. global_step1347/layer_09-model_states.pt +3 -0
  41. global_step1347/layer_10-model_states.pt +3 -0
  42. global_step1347/layer_11-model_states.pt +3 -0
  43. global_step1347/layer_12-model_states.pt +3 -0
  44. global_step1347/layer_13-model_states.pt +3 -0
  45. global_step1347/layer_14-model_states.pt +3 -0
  46. global_step1347/layer_15-model_states.pt +3 -0
  47. global_step1347/layer_16-model_states.pt +3 -0
  48. global_step1347/layer_17-model_states.pt +3 -0
  49. global_step1347/layer_18-model_states.pt +3 -0
  50. global_step1347/layer_19-model_states.pt +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ wandb/run-20250505_191035-lg5j0rns/run-lg5j0rns.wandb filter=lfs diff=lfs merge=lfs -text
dataset_wan.toml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ resolutions = [ 480,]
2
+ enable_ar_bucket = false
3
+ min_ar = 1.68
4
+ max_ar = 1.88
5
+ num_ar_buckets = 1
6
+ frame_buckets = [21, 29,]
7
+ [[directory]]
8
+ path = "/workspace/push-in"
9
+ num_repeats = 1
epoch10/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch10/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:289864341f994a6fcf8f5c3414a6dcd23de631a5fe5afad7f264df00541d8a87
3
+ size 359257680
epoch10/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch20/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch20/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac074113b4b467c929fe5875f6d432eec7de0b0981fbf111b3b4539ceeb81fca
3
+ size 359257680
epoch20/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch30/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch30/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2aa1c81823f31172c95da55350391df8991fbf7149a561445ca1ded87bca7c92
3
+ size 359257680
epoch30/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch40/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch40/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0f364b8c7a21f9f80bc3a73556cac784dc29a37904d7eb9d139409d93697520
3
+ size 359257680
epoch40/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch50/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch50/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b26b9b57c892aaf4f39f460930fbdabe4fe065b9b75529d3f8dff22bfec3c62a
3
+ size 359257680
epoch50/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch60/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch60/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:018b6bef9b29073808580488549a5a04ab05d9cd7878ec446544d850bcdcade8
3
+ size 359257680
epoch60/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch70/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch70/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9fb051fc7e6fad7591faa818e93db55e6d31657425ffa6b1223456295f9cc1
3
+ size 359257680
epoch70/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch80/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch80/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55088f6a96ac17cf17b9b1a290f3fd31f1195e378deb49b8a4ebab5af40833ce
3
+ size 359257680
epoch80/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
epoch90/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "o",
28
+ "k",
29
+ "v",
30
+ "q",
31
+ "ffn.0",
32
+ "v_img",
33
+ "ffn.2",
34
+ "k_img"
35
+ ],
36
+ "task_type": null,
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
epoch90/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29c25e965a959758884efd1e90a46c8b4cede98ea1a1ebc31c59fdedc2cb9ee3
3
+ size 359257680
epoch90/wan.toml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = "/workspace/ComfyUI/models/loras/out"
2
+ dataset = "/workspace/configs/dataset_wan.toml"
3
+ epochs = 1000
4
+ micro_batch_size_per_gpu = 1
5
+ pipeline_stages = 1
6
+ gradient_accumulation_steps = 1
7
+ gradient_clipping = 1.0
8
+ warmup_steps = 40
9
+ activation_checkpointing = true
10
+ partition_method = "parameters"
11
+ save_dtype = "bfloat16"
12
+ caching_batch_size = 1
13
+ steps_per_print = 1
14
+ video_clip_mode = "single_beginning"
15
+ save_every_n_epochs = 10
16
+ checkpoint_every_n_minutes = 120
17
+ blocks_to_swap = 20
18
+
19
+ eval_every_n_epochs = 1
20
+ eval_before_first_step = true
21
+ eval_micro_batch_size_per_gpu = 1
22
+ eval_gradient_accumulation_steps = 1
23
+
24
+ [model]
25
+ type = "wan"
26
+ ckpt_path = "/workspace/Wan2.1"
27
+ transformer_path = '/workspace/ComfyUI/models/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors'
28
+ llm_path = '/workspace/ComfyUI/models/text_encoders/umt5-xxl-enc-bf16.safetensors'
29
+ dtype = "bfloat16"
30
+ timestep_sample_method = "logit_normal"
31
+
32
+ [adapter]
33
+ type = "lora"
34
+ rank = 32
35
+ dtype = "bfloat16"
36
+
37
+ [optimizer]
38
+ type = "adamw_optimi"
39
+ lr = 1e-5
40
+ betas = [ 0.9, 0.99,]
41
+ weight_decay = 0.01
42
+
43
+ [monitoring]
44
+ # Set to true and fill in these fields to enable wandb
45
+ enable_wandb = true
46
+ wandb_api_key = 'f46df1bb828b735bd22f94fff1be190ba5e046f9'
47
+ wandb_tracker_name = 'wan-lora'
48
+ wandb_run_name = 'wan-lora'
events.out.tfevents.1746472272.420c94ca0326.20093.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc78cf675810341f01a41a7c7ccd6a754c25400995bb4d0046bc0150d9ce6910
3
+ size 196583
global_step1347/layer_00-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c682c9cc8e731c37498845f3a980635b04094468e55e77553c7a12ccff998f5
3
+ size 920
global_step1347/layer_01-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7100d3e26fa520af5212aadefeb24aaad671b0eaf4ac35796621e87b495b4ee4
3
+ size 8986838
global_step1347/layer_02-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53604eef3ce9c3701d7c49e833a6471bb8bcc51867208d2ec394fff92482b0d1
3
+ size 8986838
global_step1347/layer_03-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c84057c3ff7784ea7b61dc1e9537396a690fd73974e58774c007e714260cb4
3
+ size 8986838
global_step1347/layer_04-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8357f02b722621308523d92acd7506dccd3f17d3ebd8e7f3d43c1e34e9c8c848
3
+ size 8986838
global_step1347/layer_05-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64c7d27d5a18b0316dbbddb16d1d9bbeac45b2c66ea553b8d152a7df9d5fad1c
3
+ size 8986838
global_step1347/layer_06-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4624cab288a75205f419b56e51adc265de9dccfa14c67b8571ff3092a9ce1d21
3
+ size 8986838
global_step1347/layer_07-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b23272ad418c071cf4b938a8c32fe87a8b299b902dcc42a3ffd5abd7b34144a
3
+ size 8986838
global_step1347/layer_08-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6386616c4a89bb31ecee50c953b444e1b6ef593b8ef31c01b04d1f8a63156875
3
+ size 8986838
global_step1347/layer_09-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:589e276b89d62900e4d88c5fc3fa96c34da720712431617f86b03c87ffc0635b
3
+ size 8986838
global_step1347/layer_10-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80395f67c0189df41d694fe127ac2024e4605b3eb5156c98863600e8d0af0530
3
+ size 8986838
global_step1347/layer_11-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7735fb65d7188cc33e6628493c940ec8b71901f45942892f197a05ac284d93d9
3
+ size 8986838
global_step1347/layer_12-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c141f4c5ed2a2c8005f581457364760c1f1916a8bde29b52324f2b529a76f0
3
+ size 8986838
global_step1347/layer_13-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54a712e41d40d643b548b7df9040dbfaed0f0f0d5d07bdad0f7670bfca159264
3
+ size 8986838
global_step1347/layer_14-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c06997565bb8ba811ab0f82a6b185df638e6204fd04f5f5908436789dbd72ba7
3
+ size 8986838
global_step1347/layer_15-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14e50409e4577a754706ef51dddb84291fc28916e332a4097b8dad56ec2a7de
3
+ size 8986838
global_step1347/layer_16-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9efc6563cc1bc1277d6322bfa2986291678391a61d75c9e208caac873b5a602e
3
+ size 8986838
global_step1347/layer_17-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75ea56cd7102eedb76d670cfe01055c65b6ee2e10486f48a73911d4a81bca5a0
3
+ size 8986838
global_step1347/layer_18-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41fd7023adf82f25c5c91a9cbe32ffe763814550cca1269bf2b31f6f8141ae14
3
+ size 8986838
global_step1347/layer_19-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a2baedfd12f75feb166421fa9e248835e86c2f4751589513316cd5afb1b37dd
3
+ size 8986838