sukaka commited on
Commit
02513a4
·
1 Parent(s): ce68a03

Upload watercolor_lora/watercolor.toml with huggingface_hub

Browse files
Files changed (1) hide show
  1. watercolor_lora/watercolor.toml +39 -0
watercolor_lora/watercolor.toml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model_name_or_path = "sd-models/animefull-final-pruned.safetensors"
2
+ train_data_dir = "/kaggle/input/lora-train-picture/watercolor"
3
+ shuffle_caption = true
4
+ caption_extension = ".txt"
5
+ keep_tokens = 1
6
+ resolution = "512,768"
7
+ enable_bucket = true
8
+ output_dir = "/kaggle/working/output"
9
+ output_name = "watercolor"
10
+ save_precision = "fp16"
11
+ save_every_n_epochs = 1
12
+ train_batch_size = 2
13
+ max_token_length = 225
14
+ xformers = true
15
+ max_train_epochs = 12
16
+ seed = 1337
17
+ gradient_accumulation_steps = 64
18
+ mixed_precision = "fp16"
19
+ clip_skip = 2
20
+ logging_dir = "/kaggle/working/output/logs"
21
+ log_with = "wandb"
22
+ log_prefix = "watercolor"
23
+ log_tracker_name = "watercolor"
24
+ noise_offset = 0.1
25
+ lowram = true
26
+ sample_every_n_epochs = 1
27
+ sample_prompts = "/kaggle/input/lora-train-picture/watercolor/tag.txt"
28
+ sample_sampler = "euler_a"
29
+ optimizer_type = "Lion"
30
+ learning_rate = 7e-5
31
+ optimizer_args = [ "weight_decay=0.01", "betas=.95,.98",]
32
+ lr_scheduler = "cosine_with_restarts"
33
+ min_snr_gamma = 5.0
34
+ unet_lr = 7e-5
35
+ text_encoder_lr = 8e-6
36
+ network_module = "networks.lora"
37
+ network_dim = 128
38
+ network_alpha = 64.0
39
+ training_comment = "watercolor-sukaka"