Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -35,7 +35,7 @@ import json
|
|
| 35 |
import yaml
|
| 36 |
from slugify import slugify
|
| 37 |
|
| 38 |
-
|
| 39 |
|
| 40 |
|
| 41 |
|
|
@@ -525,8 +525,119 @@ def start_training(
|
|
| 525 |
return f"Training started successfully with config: {config_path}"
|
| 526 |
|
| 527 |
# ========== MAIN ENDPOINT ==========
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
@app.post("/train-from-hf")
|
| 529 |
def auto_run_lora_from_repo():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 530 |
try:
|
| 531 |
# Set HF cache path if not already set
|
| 532 |
os.environ["HF_HOME"] = "/tmp/hf_cache"
|
|
|
|
| 35 |
import yaml
|
| 36 |
from slugify import slugify
|
| 37 |
|
| 38 |
+
from flux_train import start_training
|
| 39 |
|
| 40 |
|
| 41 |
|
|
|
|
| 525 |
return f"Training started successfully with config: {config_path}"
|
| 526 |
|
| 527 |
# ========== MAIN ENDPOINT ==========
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
import os
|
| 537 |
+
import uuid
|
| 538 |
+
import json
|
| 539 |
+
import shutil
|
| 540 |
+
import yaml
|
| 541 |
+
from pathlib import Path
|
| 542 |
+
from fastapi import FastAPI, HTTPException
|
| 543 |
+
from fastapi.responses import JSONResponse
|
| 544 |
+
from huggingface_hub import hf_hub_download
|
| 545 |
+
from flux_train import start_training
|
| 546 |
+
|
| 547 |
+
app = FastAPI()
|
| 548 |
+
|
| 549 |
@app.post("/train-from-hf")
|
| 550 |
def auto_run_lora_from_repo():
|
| 551 |
+
try:
|
| 552 |
+
# ✅ Static or dynamic config
|
| 553 |
+
REPO_ID = "rahul7star/ohamlab"
|
| 554 |
+
FOLDER_IN_REPO = "filter-demo/upload_20250708_041329_9c5c81"
|
| 555 |
+
CONCEPT_SENTENCE = "ohamlab style"
|
| 556 |
+
LORA_NAME = "ohami_filter_autorun"
|
| 557 |
+
|
| 558 |
+
# ✅ Setup HF cache
|
| 559 |
+
os.environ["HF_HOME"] = "/tmp/hf_cache"
|
| 560 |
+
os.makedirs("/tmp/hf_cache", exist_ok=True)
|
| 561 |
+
|
| 562 |
+
# ✅ Download dataset from HF
|
| 563 |
+
local_dir = Path(f"/tmp/{LORA_NAME}-{uuid.uuid4()}")
|
| 564 |
+
os.makedirs(local_dir, exist_ok=True)
|
| 565 |
+
|
| 566 |
+
hf_hub_download(
|
| 567 |
+
repo_id=REPO_ID,
|
| 568 |
+
repo_type="dataset",
|
| 569 |
+
filename=".placeholder", # needs a dummy filename to trigger folder download
|
| 570 |
+
subfolder=FOLDER_IN_REPO,
|
| 571 |
+
local_dir=local_dir,
|
| 572 |
+
local_dir_use_symlinks=False,
|
| 573 |
+
force_download=False
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
image_dir = local_dir / FOLDER_IN_REPO
|
| 577 |
+
image_paths = list(image_dir.rglob("*.jpg")) + list(image_dir.rglob("*.jpeg")) + list(image_dir.rglob("*.png"))
|
| 578 |
+
|
| 579 |
+
if not image_paths:
|
| 580 |
+
raise HTTPException(status_code=400, detail="No images found in the Hugging Face folder.")
|
| 581 |
+
|
| 582 |
+
# ✅ Auto-generate captions
|
| 583 |
+
captions = [
|
| 584 |
+
f"Autogenerated caption for {img.stem} in the {CONCEPT_SENTENCE} [trigger]" for img in image_paths
|
| 585 |
+
]
|
| 586 |
+
|
| 587 |
+
# ✅ Create dataset folder with metadata.jsonl
|
| 588 |
+
dataset_folder = f"datasets_{uuid.uuid4()}"
|
| 589 |
+
os.makedirs(dataset_folder, exist_ok=True)
|
| 590 |
+
|
| 591 |
+
jsonl_file_path = os.path.join(dataset_folder, "metadata.jsonl")
|
| 592 |
+
with open(jsonl_file_path, "a") as jsonl_file:
|
| 593 |
+
for index, image in enumerate(image_paths):
|
| 594 |
+
new_image_path = shutil.copy(str(image), dataset_folder)
|
| 595 |
+
file_name = os.path.basename(new_image_path)
|
| 596 |
+
data = {"file_name": file_name, "prompt": captions[index]}
|
| 597 |
+
jsonl_file.write(json.dumps(data) + "\n")
|
| 598 |
+
|
| 599 |
+
# ✅ Optional advanced config
|
| 600 |
+
slugged_lora_name = LORA_NAME.replace(" ", "_")
|
| 601 |
+
os.makedirs("tmp_configs", exist_ok=True)
|
| 602 |
+
config_path = f"tmp_configs/{uuid.uuid4()}_{slugged_lora_name}.yaml"
|
| 603 |
+
|
| 604 |
+
config = {
|
| 605 |
+
"sample_1": "a stylish anime character with ohamlab style",
|
| 606 |
+
"sample_2": "a cartoon car in ohamlab style",
|
| 607 |
+
"sample_3": "portrait in ohamlab lighting"
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
with open(config_path, "w") as f:
|
| 611 |
+
yaml.dump(config, f)
|
| 612 |
+
|
| 613 |
+
# ✅ Final call to train
|
| 614 |
+
result = start_training(
|
| 615 |
+
lora_name=LORA_NAME,
|
| 616 |
+
concept_sentence=CONCEPT_SENTENCE,
|
| 617 |
+
steps=45,
|
| 618 |
+
lr=1e-4,
|
| 619 |
+
rank=32,
|
| 620 |
+
model_to_train="flux",
|
| 621 |
+
low_vram=True,
|
| 622 |
+
dataset_folder=dataset_folder,
|
| 623 |
+
sample_1=config["sample_1"],
|
| 624 |
+
sample_2=config["sample_2"],
|
| 625 |
+
sample_3=config["sample_3"],
|
| 626 |
+
use_more_advanced_options=True,
|
| 627 |
+
more_advanced_options=config_path
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
return JSONResponse(content={"status": "success", "message": result})
|
| 631 |
+
|
| 632 |
+
except Exception as e:
|
| 633 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _run_lora_from_repo():
|
| 641 |
try:
|
| 642 |
# Set HF cache path if not already set
|
| 643 |
os.environ["HF_HOME"] = "/tmp/hf_cache"
|