Spaces:
Running
Running
Create autorun_lora_gradio.py
Browse files- autorun_lora_gradio.py +91 -0
autorun_lora_gradio.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import uuid
|
3 |
+
import gradio as gr
|
4 |
+
from pathlib import Path
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
from your_existing_training_file import create_dataset, start_training # <-- update this import as needed
|
7 |
+
|
8 |
+
# Constants
|
9 |
+
REPO_ID = "rahul7star/ohamlab"
|
10 |
+
FOLDER_IN_REPO = "filter-demo/upload_20250708_041329_9c5c81"
|
11 |
+
CONCEPT_SENTENCE = "ohamlab style"
|
12 |
+
LORA_NAME = "ohami_filter_autorun"
|
13 |
+
|
14 |
+
def auto_run_lora_from_repo():
|
15 |
+
local_dir = Path(f"/tmp/{LORA_NAME}-{uuid.uuid4()}")
|
16 |
+
os.makedirs(local_dir, exist_ok=True)
|
17 |
+
|
18 |
+
# Download at least one file to force HF to pull full folder
|
19 |
+
hf_hub_download(
|
20 |
+
repo_id=REPO_ID,
|
21 |
+
repo_type="dataset",
|
22 |
+
subfolder=FOLDER_IN_REPO,
|
23 |
+
local_dir=local_dir,
|
24 |
+
local_dir_use_symlinks=False,
|
25 |
+
force_download=False,
|
26 |
+
etag_timeout=10,
|
27 |
+
allow_patterns=["*.jpg", "*.png", "*.jpeg"],
|
28 |
+
)
|
29 |
+
|
30 |
+
image_dir = local_dir / FOLDER_IN_REPO
|
31 |
+
image_paths = list(image_dir.rglob("*.jpg")) + list(image_dir.rglob("*.jpeg")) + list(image_dir.rglob("*.png"))
|
32 |
+
|
33 |
+
if not image_paths:
|
34 |
+
raise gr.Error("No images found in the Hugging Face repo folder.")
|
35 |
+
|
36 |
+
# Captions
|
37 |
+
captions = [
|
38 |
+
f"Generated image caption for {img.stem} in the {CONCEPT_SENTENCE} [trigger]" for img in image_paths
|
39 |
+
]
|
40 |
+
|
41 |
+
# Create dataset
|
42 |
+
dataset_path = create_dataset(image_paths, *captions)
|
43 |
+
|
44 |
+
# Static prompts
|
45 |
+
sample_1 = f"A stylized portrait using {CONCEPT_SENTENCE}"
|
46 |
+
sample_2 = f"A cat in the {CONCEPT_SENTENCE}"
|
47 |
+
sample_3 = f"A selfie processed in {CONCEPT_SENTENCE}"
|
48 |
+
|
49 |
+
# Config
|
50 |
+
steps = 1000
|
51 |
+
lr = 4e-4
|
52 |
+
rank = 16
|
53 |
+
model_to_train = "dev"
|
54 |
+
low_vram = True
|
55 |
+
use_more_advanced_options = True
|
56 |
+
more_advanced_options = """\
|
57 |
+
training:
|
58 |
+
seed: 42
|
59 |
+
precision: bf16
|
60 |
+
batch_size: 2
|
61 |
+
augmentation:
|
62 |
+
flip: true
|
63 |
+
color_jitter: true
|
64 |
+
"""
|
65 |
+
|
66 |
+
# Train
|
67 |
+
return start_training(
|
68 |
+
lora_name=LORA_NAME,
|
69 |
+
concept_sentence=CONCEPT_SENTENCE,
|
70 |
+
steps=steps,
|
71 |
+
lr=lr,
|
72 |
+
rank=rank,
|
73 |
+
model_to_train=model_to_train,
|
74 |
+
low_vram=low_vram,
|
75 |
+
dataset_folder=dataset_path,
|
76 |
+
sample_1=sample_1,
|
77 |
+
sample_2=sample_2,
|
78 |
+
sample_3=sample_3,
|
79 |
+
use_more_advanced_options=use_more_advanced_options,
|
80 |
+
more_advanced_options=more_advanced_options
|
81 |
+
)
|
82 |
+
|
83 |
+
# Gradio UI
|
84 |
+
with gr.Blocks(title="LoRA Autorun from HF Repo") as demo:
|
85 |
+
gr.Markdown("# 🚀 Auto Run LoRA from Hugging Face Repo")
|
86 |
+
output = gr.Textbox(label="Training Status", lines=3)
|
87 |
+
run_button = gr.Button("Run Training from HF Repo")
|
88 |
+
run_button.click(fn=auto_run_lora_from_repo, outputs=output)
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
demo.launch(share=True)
|