File size: 3,353 Bytes
9152057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
[[subsets]]
caption_extension = ".txt"
caption_tag_dropout_rate = 0.1
image_dir = "D:/datasets/reg2"
name = "reg2"
num_repeats = 1
random_crop = true
random_crop_padding_percent = 0.06
shuffle_caption = true

[[subsets]]
caption_extension = ".txt"
caption_tag_dropout_rate = 0.1
image_dir = "Y:/stable-diffusion/datasets/Artoria tiny"
keep_tokens = 2
name = "ishiri small"
num_repeats = 1
random_crop = true
random_crop_padding_percent = 0.06
shuffle_caption = true

[[subsets]]
caption_extension = ".txt"
caption_tag_dropout_rate = 0.1
image_dir = "D:/datasets/reg"
name = "reg"
num_repeats = 1
random_crop_padding_percent = 0.05
shuffle_caption = true

[[subsets]]
caption_extension = ".txt"
caption_tag_dropout_rate = 0.1
image_dir = "D:/datasets/mix"
name = "mix"
num_repeats = 1
random_crop = true
random_crop_padding_percent = 0.06
shuffle_caption = true

[train_mode]
train_mode = "lora"

[general_args.args]
persistent_data_loader_workers = true
pretrained_model_name_or_path = "Y:/stable-diffusion/models/Stable-diffusion/Illustrious-XL-v20.safetensors"
debiased_estimation_loss = true
mixed_precision = "bf16"
gradient_checkpointing = true
gradient_accumulation_steps = 64
seed = 8602366.0
max_data_loader_n_workers = 2
max_token_length = 225
prior_loss_weight = 0.01
xformers = true
max_train_epochs = 30
sdxl = true
v_parameterization = true

[general_args.dataset_args]
batch_size = 4
resolution = 1024

[network_args.args]
network_dim = 64
network_alpha = 128.0
min_timestep = 0
max_timestep = 1000
network_train_unet_only = true

[optimizer_args.args]
optimizer_type = "AdamW8bitAO"
lr_scheduler = "constant_with_warmup"
loss_type = "l2"
learning_rate = 3e-5
warmup_ratio = 0.01
max_grad_norm = 0.1
zero_terminal_snr = true

[saving_args.args]
output_dir = "Y:/stable-diffusion/lora/derrian_distro/models"
output_name = "illustriousXL20-vpred-conv-attempt"
save_precision = "bf16"
save_model_as = "safetensors"
save_every_n_epochs = 1
save_toml = true
save_toml_location = "Y:/stable-diffusion/lora/derrian_distro/settings"

[logging_args.args]
logging_dir = "Y:/stable-diffusion/lora/derrian_distro/logs"
log_prefix_mode = "disabled"
run_name_mode = "default"
log_with = "tensorboard"

[edm_loss_args.args]
edm2_loss_weighting = true
edm2_loss_weighting_optimizer = "LoraEasyCustomOptimizer.fmarscrop.FMARSCropV2ExMachina"
edm2_loss_weighting_optimizer_lr = "5e-3"
edm2_loss_weighting_optimizer_args = "{'update_strategy':'cautious', 'gamma':0.0, 'betas':(0.99,0.9999,0.999), 'adaptive_clip':0}"
edm2_loss_weighting_max_grad_norm = "0"
edm2_loss_weighting_generate_graph = true
edm2_loss_weighting_generate_graph_output_dir = "Y:/stable-diffusion/lora/derrian_distro/LoRA_Easy_Training_Scripts/edm2 graphs"
edm2_loss_weighting_generate_graph_every_x_steps = 10
edm2_loss_weighting_generate_graph_y_limit = 20
edm2_loss_weighting_initial_weights = ""
edm2_loss_weighting_num_channels = 448

[bucket_args.dataset_args]
enable_bucket = true
bucket_reso_steps = 64
max_bucket_reso = 2048
min_bucket_reso = 768

[network_args.args.network_args]
conv_dim = 64
conv_alpha = 128.0
algo = "locon"
dora_wd = true

[optimizer_args.args.optimizer_args]
weight_decay = "0.042"
betas = "0.9,0.99"
bf16_stochastic_round = "True"