xotopo commited on
Commit
def4d6a
·
verified ·
1 Parent(s): 0167fea

Upload 4 files

Browse files
-aVa8YRPK6fPh3VxZPigE_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"images_data_url": "https://v3.fal.media/files/tiger/NtRsKOWxQtwx7MmoSh5vO_1737739784160.zip", "trigger_word": "ATHARV", "disable_captions": false, "disable_segmentation_and_captioning": false, "learning_rate": 0.0005, "b_up_factor": 3.0, "create_masks": true, "iter_multiplier": 1.0, "steps": 1500, "is_style": false, "is_input_format_already_preprocessed": false, "data_archive_format": null, "resume_with_lora": null, "rank": 16, "debug_preprocessed_images": false, "instance_prompt": "ATHARV"}
README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: flux-1-dev-non-commercial-license
4
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
5
+ language:
6
+ - en
7
+ tags:
8
+ - flux
9
+ - diffusers
10
+ - lora
11
+ - replicate
12
+ base_model: "black-forest-labs/FLUX.1-dev"
13
+ pipeline_tag: text-to-image
14
+ # widget:
15
+ # - text: >-
16
+ # prompt
17
+ # output:
18
+ # url: https://...
19
+ instance_prompt: ATHARV
20
+ ---
21
+
22
+ # ATHARV Abtest
23
+
24
+ <Gallery />
25
+
26
+ Trained on Replicate using:
27
+
28
+ https://replicate.com/ostris/flux-dev-lora-trainer/train
29
+
30
+ ## Trigger words
31
+
32
+ You should use `ATHARV` to trigger the image generation.
33
+
34
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
35
+
36
+ ```py
37
+ from diffusers import AutoPipelineForText2Image
38
+ import torch
39
+
40
+ pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
41
+ pipeline.load_lora_weights('xotopo/atharv1500', weight_name='aqV2XR2nv3X_pwJzvMKLo_pytorch_lora_weights.safetensors')
42
+ image = pipeline('your prompt').images[0]
43
+ ```
44
+
45
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
aqV2XR2nv3X_pwJzvMKLo_pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49a5f45978260d0349b43e138d49760b4a68fa06c50d571c793e43ebcce4b88
3
+ size 89745224
config.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ job: custom_job
2
+ config:
3
+ name: flux_train_replicate
4
+ process:
5
+ - type: custom_sd_trainer
6
+ training_folder: output
7
+ device: cuda:0
8
+ trigger_word: ATHARV
9
+ network:
10
+ type: lora
11
+ linear: 16
12
+ linear_alpha: 16
13
+ save:
14
+ dtype: float16
15
+ save_every: 1801
16
+ max_step_saves_to_keep: 1
17
+ datasets:
18
+ - folder_path: input_images
19
+ caption_ext: txt
20
+ caption_dropout_rate: 0.05
21
+ shuffle_tokens: false
22
+ cache_latents_to_disk: false
23
+ cache_latents: true
24
+ resolution:
25
+ - 512
26
+ - 768
27
+ - 1024
28
+ train:
29
+ batch_size: 1
30
+ steps: 1800
31
+ gradient_accumulation_steps: 1
32
+ train_unet: true
33
+ train_text_encoder: false
34
+ content_or_style: balanced
35
+ gradient_checkpointing: true
36
+ noise_scheduler: flowmatch
37
+ optimizer: adamw8bit
38
+ lr: 0.0004
39
+ ema_config:
40
+ use_ema: true
41
+ ema_decay: 0.99
42
+ dtype: bf16
43
+ model:
44
+ name_or_path: FLUX.1-dev
45
+ is_flux: true
46
+ quantize: true
47
+ sample:
48
+ sampler: flowmatch
49
+ sample_every: 1801
50
+ width: 1024
51
+ height: 1024
52
+ prompts: []
53
+ neg: ''
54
+ seed: 42
55
+ walk_seed: true
56
+ guidance_scale: 3.5
57
+ sample_steps: 28
58
+ meta:
59
+ name: flux_train_replicate
60
+ version: '1.0'