recoilme commited on
Commit
ea6c506
·
1 Parent(s): a8a0e65
README.md CHANGED
@@ -5,32 +5,47 @@ license: apache-2.0
5
  Краткая инструкция по установке
6
  Обновите систему и установите git-lfs:
7
 
8
- ```apt update
9
- apt install git-lfs```
 
 
10
  Обновите pip и установите требуемые пакеты:
11
 
12
- ```python -m pip install --upgrade pip
13
- pip install -r requirements.txt```
 
 
14
  Настройте git-credentials:
15
 
16
- ```git config --global credential.helper store```
 
 
17
  Клонируйте репозиторий:
18
 
19
- ```git clone https://huggingface.co/AiArtLab/sdxs
20
- cd sdxs/```
 
 
21
  Подготовьте датасет:
22
 
23
- ```mkdir datasets
 
24
  cd datasets
25
- git clone https://huggingface.co/datasets/AiArtLab/imagenet-1kk```
 
26
  Выполните вход в сервисы:
27
 
28
- ```huggingface-cli login
29
- wandb login```
 
 
30
  Настройте accelerate для многокарточного обучения:
31
 
32
- ```accelerate config```
33
- (выберите multigpu и bf16)
34
- Запустите обучение:
 
35
 
36
- ```nohup accelerate launch train.py &```
 
 
 
5
  Краткая инструкция по установке
6
  Обновите систему и установите git-lfs:
7
 
8
+ ```
9
+ apt update
10
+ apt install git-lfs
11
+ ```
12
  Обновите pip и установите требуемые пакеты:
13
 
14
+ ```
15
+ python -m pip install --upgrade pip
16
+ pip install -r requirements.txt
17
+ ```
18
  Настройте git-credentials:
19
 
20
+ ```
21
+ git config --global credential.helper store
22
+ ```
23
  Клонируйте репозиторий:
24
 
25
+ ```
26
+ git clone https://huggingface.co/AiArtLab/sdxs
27
+ cd sdxs/
28
+ ```
29
  Подготовьте датасет:
30
 
31
+ ```
32
+ mkdir datasets
33
  cd datasets
34
+ git clone https://huggingface.co/datasets/AiArtLab/imagenet-1kk
35
+ ```
36
  Выполните вход в сервисы:
37
 
38
+ ```
39
+ huggingface-cli login
40
+ wandb login
41
+ ```
42
  Настройте accelerate для многокарточного обучения:
43
 
44
+ ```
45
+ accelerate config
46
+ ```
47
+ Запустите обучение!
48
 
49
+ ```
50
+ nohup accelerate launch train.py &
51
+ ```
dataset_folder.py → dataset_fromfolder.py RENAMED
File without changes
samples/sdxs_192x320_0.jpg CHANGED
samples/sdxs_192x384_0.jpg CHANGED
samples/sdxs_256x320_0.jpg CHANGED
samples/sdxs_256x384_0.jpg CHANGED
samples/sdxs_320x192_0.jpg CHANGED
samples/sdxs_320x256_0.jpg CHANGED
samples/sdxs_320x320_0.jpg CHANGED
samples/sdxs_320x384_0.jpg CHANGED
samples/sdxs_384x192_0.jpg CHANGED
samples/sdxs_384x256_0.jpg CHANGED
samples/sdxs_384x320_0.jpg CHANGED
samples/sdxs_384x384_0.jpg CHANGED
train.py CHANGED
@@ -22,12 +22,12 @@ from diffusers.models.attention_processor import AttnProcessor2_0
22
 
23
  # --------------------------- Параметры ---------------------------
24
  save_path = "datasets/dataset384_temp" #"datasets/dataset384" #"datasets/imagenet-1kk" #"datasets/siski576" #"datasets/siski384" #"datasets/siski64" #"datasets/mnist"
25
- batch_size = 30 #555 #35 #7
26
  base_learning_rate = 0 #5e-5 #8e-5
27
  min_learning_rate = 2.5e-5 #2e-5
28
- num_epochs = 1 #36 #18
29
  project = "sdxs"
30
- use_wandb = False
31
  limit = 0
32
  save_model = True
33
  checkpoints_folder = ""
@@ -266,6 +266,7 @@ if lowram:
266
  from transformers.optimization import Adafactor
267
 
268
  # [1] Создаем словарь оптимизаторов (для каждого параметра)
 
269
  optimizer_dict = {
270
  p: Adafactor(
271
  [p],
@@ -476,7 +477,7 @@ for epoch in range(start_epoch, start_epoch + num_epochs):
476
 
477
  for step, (latents, embeddings) in enumerate(dataloader):
478
  with accelerator.accumulate(unet):
479
- if step == 2:
480
  used_gb = torch.cuda.max_memory_allocated() / 1024**3
481
  print(f"Шаг {step}: {used_gb:.2f} GB")
482
  # Forward pass
 
22
 
23
  # --------------------------- Параметры ---------------------------
24
  save_path = "datasets/dataset384_temp" #"datasets/dataset384" #"datasets/imagenet-1kk" #"datasets/siski576" #"datasets/siski384" #"datasets/siski64" #"datasets/mnist"
25
+ batch_size = 33 #555 #35 #7
26
  base_learning_rate = 0 #5e-5 #8e-5
27
  min_learning_rate = 2.5e-5 #2e-5
28
+ num_epochs = 5 #36 #18
29
  project = "sdxs"
30
+ use_wandb = True
31
  limit = 0
32
  save_model = True
33
  checkpoints_folder = ""
 
266
  from transformers.optimization import Adafactor
267
 
268
  # [1] Создаем словарь оптимизаторов (для каждого параметра)
269
+ base_learning_rate = 0
270
  optimizer_dict = {
271
  p: Adafactor(
272
  [p],
 
477
 
478
  for step, (latents, embeddings) in enumerate(dataloader):
479
  with accelerator.accumulate(unet):
480
+ if save_model == False and step == 3 :
481
  used_gb = torch.cuda.max_memory_allocated() / 1024**3
482
  print(f"Шаг {step}: {used_gb:.2f} GB")
483
  # Forward pass