rahul7star commited on
Commit
6d2d762
·
verified ·
1 Parent(s): 2aea684

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +424 -424
app.py CHANGED
@@ -48,432 +48,432 @@ def health_check():
48
  def healthz():
49
  return {"ok": True}
50
 
51
- @app.get("/docs", include_in_schema=False)
52
- def custom_docs():
53
- return JSONResponse(get_openapi(title="LoRA Autorun API", version="1.0.0", routes=app.routes))
54
-
55
-
56
-
57
- REPO_ID = "rahul7star/ohamlab"
58
- FOLDER = "demo"
59
- BASE_URL = f"https://huggingface.co/{REPO_ID}/resolve/main/"
60
-
61
- #show all images in a DIR at UI FE
62
- @app.get("/images")
63
- def list_images():
64
- try:
65
- all_files = list_repo_files(REPO_ID)
66
-
67
- folder_prefix = FOLDER.rstrip("/") + "/"
68
-
69
- files_in_folder = [
70
- f for f in all_files
71
- if f.startswith(folder_prefix)
72
- and "/" not in f[len(folder_prefix):] # no subfolder files
73
- and f.lower().endswith((".png", ".jpg", ".jpeg", ".webp"))
74
- ]
75
-
76
- urls = [BASE_URL + f for f in files_in_folder]
77
-
78
- return {"images": urls}
79
-
80
- except Exception as e:
81
- return {"error": str(e)}
82
-
83
- from datetime import datetime
84
- import tempfile
85
- import uuid
86
-
87
- # upload zip from UI
88
- @app.post("/upload-zip")
89
- async def upload_zip(file: UploadFile = File(...)):
90
- if not file.filename.endswith(".zip"):
91
- return {"error": "Please upload a .zip file"}
92
-
93
- # Save the ZIP to /tmp
94
- temp_zip_path = f"/tmp/{file.filename}"
95
- with open(temp_zip_path, "wb") as f:
96
- f.write(await file.read())
97
-
98
- # Create a unique subfolder name inside 'demo/'
99
- timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
100
- unique_id = uuid.uuid4().hex[:6]
101
- folder_name = f"upload_{timestamp}_{unique_id}"
102
- hf_folder_prefix = f"demo/{folder_name}"
103
-
104
- try:
105
- with tempfile.TemporaryDirectory() as extract_dir:
106
- # Extract zip
107
- with zipfile.ZipFile(temp_zip_path, 'r') as zip_ref:
108
- zip_ref.extractall(extract_dir)
109
-
110
- uploaded_files = []
111
-
112
- # Upload all extracted files
113
- for root_dir, _, files in os.walk(extract_dir):
114
- for name in files:
115
- file_path = os.path.join(root_dir, name)
116
- relative_path = os.path.relpath(file_path, extract_dir)
117
- repo_path = f"{hf_folder_prefix}/{relative_path}".replace("\\", "/")
118
-
119
- upload_file(
120
- path_or_fileobj=file_path,
121
- path_in_repo=repo_path,
122
- repo_id="rahul7star/ohamlab",
123
- repo_type="model",
124
- commit_message=f"Upload {relative_path} to {folder_name}",
125
- token=True,
126
- )
127
- uploaded_files.append(repo_path)
128
-
129
- return {
130
- "message": f"✅ Uploaded {len(uploaded_files)} files",
131
- "folder": folder_name,
132
- "files": uploaded_files,
133
- }
134
-
135
- except Exception as e:
136
- return {"error": f"❌ Failed to process zip: {str(e)}"}
137
 
138
 
139
- # upload a single file from UI
140
- from typing import List
141
- from fastapi import UploadFile, File, APIRouter
142
- import os
143
- from fastapi import UploadFile, File, APIRouter
144
- from typing import List
145
- from datetime import datetime
146
- import uuid, os
147
-
148
-
149
- @app.post("/upload")
150
- async def upload_images(
151
- background_tasks: BackgroundTasks,
152
- files: List[UploadFile] = File(...)
153
- ):
154
- # Step 1: Generate dynamic folder name
155
- timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
156
- unique_id = uuid.uuid4().hex[:6]
157
- folder_name = f"upload_{timestamp}_{unique_id}"
158
- hf_folder_prefix = f"demo/{folder_name}"
159
-
160
- responses = []
161
-
162
- # Step 2: Save and upload each image
163
- for file in files:
164
- filename = file.filename
165
- contents = await file.read()
166
- temp_path = f"/tmp/{filename}"
167
- with open(temp_path, "wb") as f:
168
- f.write(contents)
169
-
170
- try:
171
- upload_file(
172
- path_or_fileobj=temp_path,
173
- path_in_repo=f"{hf_folder_prefix}/{filename}",
174
- repo_id=T_REPO_ID,
175
- repo_type="model",
176
- commit_message=f"Upload {filename} to {hf_folder_prefix}",
177
- token=True,
178
- )
179
- responses.append({
180
- "filename": filename,
181
- "status": "✅ uploaded",
182
- "path": f"{hf_folder_prefix}/{filename}"
183
- })
184
- except Exception as e:
185
- responses.append({
186
- "filename": filename,
187
- "status": f"❌ failed: {str(e)}"
188
- })
189
-
190
- os.remove(temp_path)
191
-
192
- # Step 3: Add filter job to background
193
- def run_filter():
194
- try:
195
- result = filter_and_rename_images(folder=hf_folder_prefix)
196
- print(f"🧼 Filter result: {result}")
197
- except Exception as e:
198
- print(f"❌ Filter failed: {str(e)}")
199
-
200
- background_tasks.add_task(run_filter)
201
-
202
- return {
203
- "message": f"{len(files)} file(s) uploaded",
204
- "upload_folder": hf_folder_prefix,
205
- "results": responses,
206
- "note": "Filtering started in background"
207
- }
208
-
209
-
210
-
211
-
212
-
213
-
214
- #Tranining Data set start fitering data for traninig
215
-
216
-
217
- T_REPO_ID = "rahul7star/ohamlab"
218
- DESCRIPTION_TEXT = (
219
- "Ra3hul is wearing a black jacket over a striped white t-shirt with blue jeans. "
220
- "He is standing near a lake with his arms spread wide open, with mountains and cloudy skies in the background."
221
- )
222
-
223
- def is_image_file(filename: str) -> bool:
224
- return filename.lower().endswith((".png", ".jpg", ".jpeg", ".webp"))
225
-
226
- @app.post("/filter-images")
227
- def filter_and_rename_images(folder: str = Query("demo", description="Folder path in repo to scan")):
228
- try:
229
- all_files = list_repo_files(T_REPO_ID)
230
- folder_prefix = folder.rstrip("/") + "/"
231
- filter_folder = f"filter-{folder.rstrip('/')}"
232
- filter_prefix = filter_folder + "/"
233
-
234
- # Filter images only directly in the folder (no subfolders)
235
- image_files = [
236
- f for f in all_files
237
- if f.startswith(folder_prefix)
238
- and "/" not in f[len(folder_prefix):] # no deeper path
239
- and is_image_file(f)
240
- ]
241
-
242
- if not image_files:
243
- return {"error": f"No images found in folder '{folder}'"}
244
-
245
- uploaded_files = []
246
-
247
- for idx, orig_path in enumerate(image_files, start=1):
248
- # Download image content bytes (uses local cache)
249
- local_path = hf_hub_download(repo_id=T_REPO_ID, filename=orig_path)
250
- with open(local_path, "rb") as f:
251
- file_bytes = f.read()
252
-
253
- # Rename images as image1.jpeg, image2.jpeg, ...
254
- new_image_name = f"image{idx}.jpeg"
255
-
256
- # Upload renamed image from memory
257
- upload_file(
258
- path_or_fileobj=io.BytesIO(file_bytes),
259
- path_in_repo=filter_prefix + new_image_name,
260
- repo_id=T_REPO_ID,
261
- repo_type="model",
262
- commit_message=f"Upload renamed image {new_image_name} to {filter_folder}",
263
- token=True,
264
- )
265
- uploaded_files.append(filter_prefix + new_image_name)
266
-
267
- # Create and upload text file for each image
268
- txt_filename = f"image{idx}.txt"
269
- upload_file(
270
- path_or_fileobj=io.BytesIO(DESCRIPTION_TEXT.encode("utf-8")),
271
- path_in_repo=filter_prefix + txt_filename,
272
- repo_id=T_REPO_ID,
273
- repo_type="model",
274
- commit_message=f"Upload text file {txt_filename} to {filter_folder}",
275
- token=True,
276
- )
277
- uploaded_files.append(filter_prefix + txt_filename)
278
-
279
- return {
280
- "message": f"Processed and uploaded {len(image_files)} images and text files.",
281
- "files": uploaded_files,
282
- }
283
-
284
- except Exception as e:
285
- return {"error": str(e)}
286
 
287
 
288
 
289
- # ========== CONFIGURATION ==========
290
- REPO_ID = "rahul7star/ohamlab"
291
- FOLDER_IN_REPO = "filter-demo/upload_20250708_041329_9c5c81"
292
- CONCEPT_SENTENCE = "ohamlab style"
293
- LORA_NAME = "ohami_filter_autorun"
294
-
295
- # ========== FASTAPI APP ==========
296
-
297
- # ========== HELPERS ==========
298
- def create_dataset(images, *captions):
299
- destination_folder = f"datasets_{uuid.uuid4()}"
300
- os.makedirs(destination_folder, exist_ok=True)
301
-
302
- jsonl_file_path = os.path.join(destination_folder, "metadata.jsonl")
303
- with open(jsonl_file_path, "a") as jsonl_file:
304
- for index, image in enumerate(images):
305
- new_image_path = shutil.copy(str(image), destination_folder)
306
- caption = captions[index]
307
- file_name = os.path.basename(new_image_path)
308
- data = {"file_name": file_name, "prompt": caption}
309
- jsonl_file.write(json.dumps(data) + "\n")
310
-
311
- return destination_folder
312
-
313
- def recursive_update(d, u):
314
- for k, v in u.items():
315
- if isinstance(v, dict) and v:
316
- d[k] = recursive_update(d.get(k, {}), v)
317
- else:
318
- d[k] = v
319
- return d
320
-
321
- def start_training(
322
- lora_name,
323
- concept_sentence,
324
- steps,
325
- lr,
326
- rank,
327
- model_to_train,
328
- low_vram,
329
- dataset_folder,
330
- sample_1,
331
- sample_2,
332
- sample_3,
333
- use_more_advanced_options,
334
- more_advanced_options,
335
- ):
336
- try:
337
- user = whoami()
338
- username = user.get("name", "anonymous")
339
- push_to_hub = True
340
- except:
341
- username = "anonymous"
342
- push_to_hub = False
343
-
344
- slugged_lora_name = lora_name.replace(" ", "_").lower()
345
-
346
- # Load base config
347
- config = {
348
- "config": {
349
- "name": slugged_lora_name,
350
- "process": [
351
- {
352
- "model": {
353
- "low_vram": low_vram,
354
- "is_flux": True,
355
- "quantize": True,
356
- "name_or_path": "black-forest-labs/FLUX.1-dev"
357
- },
358
- "network": {
359
- "linear": rank,
360
- "linear_alpha": rank,
361
- "type": "lora"
362
- },
363
- "train": {
364
- "steps": steps,
365
- "lr": lr,
366
- "skip_first_sample": True,
367
- "batch_size": 1,
368
- "dtype": "bf16",
369
- "gradient_accumulation_steps": 1,
370
- "gradient_checkpointing": True,
371
- "noise_scheduler": "flowmatch",
372
- "optimizer": "adamw8bit",
373
- "ema_config": {
374
- "use_ema": True,
375
- "ema_decay": 0.99
376
- }
377
- },
378
- "datasets": [
379
- {"folder_path": dataset_folder}
380
- ],
381
- "save": {
382
- "dtype": "float16",
383
- "save_every": 10000,
384
- "push_to_hub": push_to_hub,
385
- "hf_repo_id": f"{username}/{slugged_lora_name}",
386
- "hf_private": True,
387
- "max_step_saves_to_keep": 4
388
- },
389
- "sample": {
390
- "guidance_scale": 3.5,
391
- "sample_every": steps,
392
- "sample_steps": 28,
393
- "width": 1024,
394
- "height": 1024,
395
- "walk_seed": True,
396
- "seed": 42,
397
- "sampler": "flowmatch",
398
- "prompts": [p for p in [sample_1, sample_2, sample_3] if p]
399
- },
400
- "trigger_word": concept_sentence
401
- }
402
- ]
403
- }
404
- }
405
-
406
- # Apply advanced YAML overrides if any
407
- if use_more_advanced_options and more_advanced_options:
408
- advanced_config = yaml.safe_load(more_advanced_options)
409
- config["config"]["process"][0] = recursive_update(config["config"]["process"][0], advanced_config)
410
-
411
- # Save YAML config
412
- os.makedirs("tmp_configs", exist_ok=True)
413
- config_path = f"tmp_configs/{uuid.uuid4()}_{slugged_lora_name}.yaml"
414
- with open(config_path, "w") as f:
415
- yaml.dump(config, f)
416
-
417
- # Simulate training
418
- print(f"[INFO] Starting training with config: {config_path}")
419
- print(json.dumps(config, indent=2))
420
- return f"Training started successfully with config: {config_path}"
421
-
422
- # ========== MAIN ENDPOINT ==========
423
- @app.post("/train-from-hf")
424
- def auto_run_lora_from_repo():
425
- try:
426
- local_dir = Path(f"/tmp/{LORA_NAME}-{uuid.uuid4()}")
427
- os.makedirs(local_dir, exist_ok=True)
428
-
429
- hf_hub_download(
430
- repo_id=REPO_ID,
431
- repo_type="dataset",
432
- subfolder=FOLDER_IN_REPO,
433
- local_dir=local_dir,
434
- local_dir_use_symlinks=False,
435
- force_download=False,
436
- etag_timeout=10,
437
- allow_patterns=["*.jpg", "*.png", "*.jpeg"],
438
- )
439
-
440
- image_dir = local_dir / FOLDER_IN_REPO
441
- image_paths = list(image_dir.rglob("*.jpg")) + list(image_dir.rglob("*.jpeg")) + list(image_dir.rglob("*.png"))
442
-
443
- if not image_paths:
444
- return JSONResponse(status_code=400, content={"error": "No images found in the HF repo folder."})
445
-
446
- captions = [
447
- f"Autogenerated caption for {img.stem} in the {CONCEPT_SENTENCE} [trigger]" for img in image_paths
448
- ]
449
-
450
- dataset_path = create_dataset(image_paths, *captions)
451
-
452
- result = start_training(
453
- lora_name=LORA_NAME,
454
- concept_sentence=CONCEPT_SENTENCE,
455
- steps=1000,
456
- lr=4e-4,
457
- rank=16,
458
- model_to_train="dev",
459
- low_vram=True,
460
- dataset_folder=dataset_path,
461
- sample_1=f"A stylized portrait using {CONCEPT_SENTENCE}",
462
- sample_2=f"A cat in the {CONCEPT_SENTENCE}",
463
- sample_3=f"A selfie processed in {CONCEPT_SENTENCE}",
464
- use_more_advanced_options=True,
465
- more_advanced_options="""
466
- training:
467
- seed: 42
468
- precision: bf16
469
- batch_size: 2
470
- augmentation:
471
- flip: true
472
- color_jitter: true
473
- """
474
- )
475
-
476
- return {"message": result}
477
-
478
- except Exception as e:
479
- return JSONResponse(status_code=500, content={"error": str(e)})
 
48
  def healthz():
49
  return {"ok": True}
50
 
51
+ # @app.get("/docs", include_in_schema=False)
52
+ # def custom_docs():
53
+ # return JSONResponse(get_openapi(title="LoRA Autorun API", version="1.0.0", routes=app.routes))
54
+
55
+
56
+
57
+ # REPO_ID = "rahul7star/ohamlab"
58
+ # FOLDER = "demo"
59
+ # BASE_URL = f"https://huggingface.co/{REPO_ID}/resolve/main/"
60
+
61
+ # #show all images in a DIR at UI FE
62
+ # @app.get("/images")
63
+ # def list_images():
64
+ # try:
65
+ # all_files = list_repo_files(REPO_ID)
66
+
67
+ # folder_prefix = FOLDER.rstrip("/") + "/"
68
+
69
+ # files_in_folder = [
70
+ # f for f in all_files
71
+ # if f.startswith(folder_prefix)
72
+ # and "/" not in f[len(folder_prefix):] # no subfolder files
73
+ # and f.lower().endswith((".png", ".jpg", ".jpeg", ".webp"))
74
+ # ]
75
+
76
+ # urls = [BASE_URL + f for f in files_in_folder]
77
+
78
+ # return {"images": urls}
79
+
80
+ # except Exception as e:
81
+ # return {"error": str(e)}
82
+
83
+ # from datetime import datetime
84
+ # import tempfile
85
+ # import uuid
86
+
87
+ # # upload zip from UI
88
+ # @app.post("/upload-zip")
89
+ # async def upload_zip(file: UploadFile = File(...)):
90
+ # if not file.filename.endswith(".zip"):
91
+ # return {"error": "Please upload a .zip file"}
92
+
93
+ # # Save the ZIP to /tmp
94
+ # temp_zip_path = f"/tmp/{file.filename}"
95
+ # with open(temp_zip_path, "wb") as f:
96
+ # f.write(await file.read())
97
+
98
+ # # Create a unique subfolder name inside 'demo/'
99
+ # timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
100
+ # unique_id = uuid.uuid4().hex[:6]
101
+ # folder_name = f"upload_{timestamp}_{unique_id}"
102
+ # hf_folder_prefix = f"demo/{folder_name}"
103
+
104
+ # try:
105
+ # with tempfile.TemporaryDirectory() as extract_dir:
106
+ # # Extract zip
107
+ # with zipfile.ZipFile(temp_zip_path, 'r') as zip_ref:
108
+ # zip_ref.extractall(extract_dir)
109
+
110
+ # uploaded_files = []
111
+
112
+ # # Upload all extracted files
113
+ # for root_dir, _, files in os.walk(extract_dir):
114
+ # for name in files:
115
+ # file_path = os.path.join(root_dir, name)
116
+ # relative_path = os.path.relpath(file_path, extract_dir)
117
+ # repo_path = f"{hf_folder_prefix}/{relative_path}".replace("\\", "/")
118
+
119
+ # upload_file(
120
+ # path_or_fileobj=file_path,
121
+ # path_in_repo=repo_path,
122
+ # repo_id="rahul7star/ohamlab",
123
+ # repo_type="model",
124
+ # commit_message=f"Upload {relative_path} to {folder_name}",
125
+ # token=True,
126
+ # )
127
+ # uploaded_files.append(repo_path)
128
+
129
+ # return {
130
+ # "message": f"✅ Uploaded {len(uploaded_files)} files",
131
+ # "folder": folder_name,
132
+ # "files": uploaded_files,
133
+ # }
134
+
135
+ # except Exception as e:
136
+ # return {"error": f"❌ Failed to process zip: {str(e)}"}
137
 
138
 
139
+ # # upload a single file from UI
140
+ # from typing import List
141
+ # from fastapi import UploadFile, File, APIRouter
142
+ # import os
143
+ # from fastapi import UploadFile, File, APIRouter
144
+ # from typing import List
145
+ # from datetime import datetime
146
+ # import uuid, os
147
+
148
+
149
+ # @app.post("/upload")
150
+ # async def upload_images(
151
+ # background_tasks: BackgroundTasks,
152
+ # files: List[UploadFile] = File(...)
153
+ # ):
154
+ # # Step 1: Generate dynamic folder name
155
+ # timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
156
+ # unique_id = uuid.uuid4().hex[:6]
157
+ # folder_name = f"upload_{timestamp}_{unique_id}"
158
+ # hf_folder_prefix = f"demo/{folder_name}"
159
+
160
+ # responses = []
161
+
162
+ # # Step 2: Save and upload each image
163
+ # for file in files:
164
+ # filename = file.filename
165
+ # contents = await file.read()
166
+ # temp_path = f"/tmp/{filename}"
167
+ # with open(temp_path, "wb") as f:
168
+ # f.write(contents)
169
+
170
+ # try:
171
+ # upload_file(
172
+ # path_or_fileobj=temp_path,
173
+ # path_in_repo=f"{hf_folder_prefix}/{filename}",
174
+ # repo_id=T_REPO_ID,
175
+ # repo_type="model",
176
+ # commit_message=f"Upload {filename} to {hf_folder_prefix}",
177
+ # token=True,
178
+ # )
179
+ # responses.append({
180
+ # "filename": filename,
181
+ # "status": "✅ uploaded",
182
+ # "path": f"{hf_folder_prefix}/{filename}"
183
+ # })
184
+ # except Exception as e:
185
+ # responses.append({
186
+ # "filename": filename,
187
+ # "status": f"❌ failed: {str(e)}"
188
+ # })
189
+
190
+ # os.remove(temp_path)
191
+
192
+ # # Step 3: Add filter job to background
193
+ # def run_filter():
194
+ # try:
195
+ # result = filter_and_rename_images(folder=hf_folder_prefix)
196
+ # print(f"🧼 Filter result: {result}")
197
+ # except Exception as e:
198
+ # print(f"❌ Filter failed: {str(e)}")
199
+
200
+ # background_tasks.add_task(run_filter)
201
+
202
+ # return {
203
+ # "message": f"{len(files)} file(s) uploaded",
204
+ # "upload_folder": hf_folder_prefix,
205
+ # "results": responses,
206
+ # "note": "Filtering started in background"
207
+ # }
208
+
209
+
210
+
211
+
212
+
213
+
214
+ # #Tranining Data set start fitering data for traninig
215
+
216
+
217
+ # T_REPO_ID = "rahul7star/ohamlab"
218
+ # DESCRIPTION_TEXT = (
219
+ # "Ra3hul is wearing a black jacket over a striped white t-shirt with blue jeans. "
220
+ # "He is standing near a lake with his arms spread wide open, with mountains and cloudy skies in the background."
221
+ # )
222
+
223
+ # def is_image_file(filename: str) -> bool:
224
+ # return filename.lower().endswith((".png", ".jpg", ".jpeg", ".webp"))
225
+
226
+ # @app.post("/filter-images")
227
+ # def filter_and_rename_images(folder: str = Query("demo", description="Folder path in repo to scan")):
228
+ # try:
229
+ # all_files = list_repo_files(T_REPO_ID)
230
+ # folder_prefix = folder.rstrip("/") + "/"
231
+ # filter_folder = f"filter-{folder.rstrip('/')}"
232
+ # filter_prefix = filter_folder + "/"
233
+
234
+ # # Filter images only directly in the folder (no subfolders)
235
+ # image_files = [
236
+ # f for f in all_files
237
+ # if f.startswith(folder_prefix)
238
+ # and "/" not in f[len(folder_prefix):] # no deeper path
239
+ # and is_image_file(f)
240
+ # ]
241
+
242
+ # if not image_files:
243
+ # return {"error": f"No images found in folder '{folder}'"}
244
+
245
+ # uploaded_files = []
246
+
247
+ # for idx, orig_path in enumerate(image_files, start=1):
248
+ # # Download image content bytes (uses local cache)
249
+ # local_path = hf_hub_download(repo_id=T_REPO_ID, filename=orig_path)
250
+ # with open(local_path, "rb") as f:
251
+ # file_bytes = f.read()
252
+
253
+ # # Rename images as image1.jpeg, image2.jpeg, ...
254
+ # new_image_name = f"image{idx}.jpeg"
255
+
256
+ # # Upload renamed image from memory
257
+ # upload_file(
258
+ # path_or_fileobj=io.BytesIO(file_bytes),
259
+ # path_in_repo=filter_prefix + new_image_name,
260
+ # repo_id=T_REPO_ID,
261
+ # repo_type="model",
262
+ # commit_message=f"Upload renamed image {new_image_name} to {filter_folder}",
263
+ # token=True,
264
+ # )
265
+ # uploaded_files.append(filter_prefix + new_image_name)
266
+
267
+ # # Create and upload text file for each image
268
+ # txt_filename = f"image{idx}.txt"
269
+ # upload_file(
270
+ # path_or_fileobj=io.BytesIO(DESCRIPTION_TEXT.encode("utf-8")),
271
+ # path_in_repo=filter_prefix + txt_filename,
272
+ # repo_id=T_REPO_ID,
273
+ # repo_type="model",
274
+ # commit_message=f"Upload text file {txt_filename} to {filter_folder}",
275
+ # token=True,
276
+ # )
277
+ # uploaded_files.append(filter_prefix + txt_filename)
278
+
279
+ # return {
280
+ # "message": f"Processed and uploaded {len(image_files)} images and text files.",
281
+ # "files": uploaded_files,
282
+ # }
283
+
284
+ # except Exception as e:
285
+ # return {"error": str(e)}
286
 
287
 
288
 
289
+ # # ========== CONFIGURATION ==========
290
+ # REPO_ID = "rahul7star/ohamlab"
291
+ # FOLDER_IN_REPO = "filter-demo/upload_20250708_041329_9c5c81"
292
+ # CONCEPT_SENTENCE = "ohamlab style"
293
+ # LORA_NAME = "ohami_filter_autorun"
294
+
295
+ # # ========== FASTAPI APP ==========
296
+
297
+ # # ========== HELPERS ==========
298
+ # def create_dataset(images, *captions):
299
+ # destination_folder = f"datasets_{uuid.uuid4()}"
300
+ # os.makedirs(destination_folder, exist_ok=True)
301
+
302
+ # jsonl_file_path = os.path.join(destination_folder, "metadata.jsonl")
303
+ # with open(jsonl_file_path, "a") as jsonl_file:
304
+ # for index, image in enumerate(images):
305
+ # new_image_path = shutil.copy(str(image), destination_folder)
306
+ # caption = captions[index]
307
+ # file_name = os.path.basename(new_image_path)
308
+ # data = {"file_name": file_name, "prompt": caption}
309
+ # jsonl_file.write(json.dumps(data) + "\n")
310
+
311
+ # return destination_folder
312
+
313
+ # def recursive_update(d, u):
314
+ # for k, v in u.items():
315
+ # if isinstance(v, dict) and v:
316
+ # d[k] = recursive_update(d.get(k, {}), v)
317
+ # else:
318
+ # d[k] = v
319
+ # return d
320
+
321
+ # def start_training(
322
+ # lora_name,
323
+ # concept_sentence,
324
+ # steps,
325
+ # lr,
326
+ # rank,
327
+ # model_to_train,
328
+ # low_vram,
329
+ # dataset_folder,
330
+ # sample_1,
331
+ # sample_2,
332
+ # sample_3,
333
+ # use_more_advanced_options,
334
+ # more_advanced_options,
335
+ # ):
336
+ # try:
337
+ # user = whoami()
338
+ # username = user.get("name", "anonymous")
339
+ # push_to_hub = True
340
+ # except:
341
+ # username = "anonymous"
342
+ # push_to_hub = False
343
+
344
+ # slugged_lora_name = lora_name.replace(" ", "_").lower()
345
+
346
+ # # Load base config
347
+ # config = {
348
+ # "config": {
349
+ # "name": slugged_lora_name,
350
+ # "process": [
351
+ # {
352
+ # "model": {
353
+ # "low_vram": low_vram,
354
+ # "is_flux": True,
355
+ # "quantize": True,
356
+ # "name_or_path": "black-forest-labs/FLUX.1-dev"
357
+ # },
358
+ # "network": {
359
+ # "linear": rank,
360
+ # "linear_alpha": rank,
361
+ # "type": "lora"
362
+ # },
363
+ # "train": {
364
+ # "steps": steps,
365
+ # "lr": lr,
366
+ # "skip_first_sample": True,
367
+ # "batch_size": 1,
368
+ # "dtype": "bf16",
369
+ # "gradient_accumulation_steps": 1,
370
+ # "gradient_checkpointing": True,
371
+ # "noise_scheduler": "flowmatch",
372
+ # "optimizer": "adamw8bit",
373
+ # "ema_config": {
374
+ # "use_ema": True,
375
+ # "ema_decay": 0.99
376
+ # }
377
+ # },
378
+ # "datasets": [
379
+ # {"folder_path": dataset_folder}
380
+ # ],
381
+ # "save": {
382
+ # "dtype": "float16",
383
+ # "save_every": 10000,
384
+ # "push_to_hub": push_to_hub,
385
+ # "hf_repo_id": f"{username}/{slugged_lora_name}",
386
+ # "hf_private": True,
387
+ # "max_step_saves_to_keep": 4
388
+ # },
389
+ # "sample": {
390
+ # "guidance_scale": 3.5,
391
+ # "sample_every": steps,
392
+ # "sample_steps": 28,
393
+ # "width": 1024,
394
+ # "height": 1024,
395
+ # "walk_seed": True,
396
+ # "seed": 42,
397
+ # "sampler": "flowmatch",
398
+ # "prompts": [p for p in [sample_1, sample_2, sample_3] if p]
399
+ # },
400
+ # "trigger_word": concept_sentence
401
+ # }
402
+ # ]
403
+ # }
404
+ # }
405
+
406
+ # # Apply advanced YAML overrides if any
407
+ # if use_more_advanced_options and more_advanced_options:
408
+ # advanced_config = yaml.safe_load(more_advanced_options)
409
+ # config["config"]["process"][0] = recursive_update(config["config"]["process"][0], advanced_config)
410
+
411
+ # # Save YAML config
412
+ # os.makedirs("tmp_configs", exist_ok=True)
413
+ # config_path = f"tmp_configs/{uuid.uuid4()}_{slugged_lora_name}.yaml"
414
+ # with open(config_path, "w") as f:
415
+ # yaml.dump(config, f)
416
+
417
+ # # Simulate training
418
+ # print(f"[INFO] Starting training with config: {config_path}")
419
+ # print(json.dumps(config, indent=2))
420
+ # return f"Training started successfully with config: {config_path}"
421
+
422
+ # # ========== MAIN ENDPOINT ==========
423
+ # @app.post("/train-from-hf")
424
+ # def auto_run_lora_from_repo():
425
+ # try:
426
+ # local_dir = Path(f"/tmp/{LORA_NAME}-{uuid.uuid4()}")
427
+ # os.makedirs(local_dir, exist_ok=True)
428
+
429
+ # hf_hub_download(
430
+ # repo_id=REPO_ID,
431
+ # repo_type="dataset",
432
+ # subfolder=FOLDER_IN_REPO,
433
+ # local_dir=local_dir,
434
+ # local_dir_use_symlinks=False,
435
+ # force_download=False,
436
+ # etag_timeout=10,
437
+ # allow_patterns=["*.jpg", "*.png", "*.jpeg"],
438
+ # )
439
+
440
+ # image_dir = local_dir / FOLDER_IN_REPO
441
+ # image_paths = list(image_dir.rglob("*.jpg")) + list(image_dir.rglob("*.jpeg")) + list(image_dir.rglob("*.png"))
442
+
443
+ # if not image_paths:
444
+ # return JSONResponse(status_code=400, content={"error": "No images found in the HF repo folder."})
445
+
446
+ # captions = [
447
+ # f"Autogenerated caption for {img.stem} in the {CONCEPT_SENTENCE} [trigger]" for img in image_paths
448
+ # ]
449
+
450
+ # dataset_path = create_dataset(image_paths, *captions)
451
+
452
+ # result = start_training(
453
+ # lora_name=LORA_NAME,
454
+ # concept_sentence=CONCEPT_SENTENCE,
455
+ # steps=1000,
456
+ # lr=4e-4,
457
+ # rank=16,
458
+ # model_to_train="dev",
459
+ # low_vram=True,
460
+ # dataset_folder=dataset_path,
461
+ # sample_1=f"A stylized portrait using {CONCEPT_SENTENCE}",
462
+ # sample_2=f"A cat in the {CONCEPT_SENTENCE}",
463
+ # sample_3=f"A selfie processed in {CONCEPT_SENTENCE}",
464
+ # use_more_advanced_options=True,
465
+ # more_advanced_options="""
466
+ # training:
467
+ # seed: 42
468
+ # precision: bf16
469
+ # batch_size: 2
470
+ # augmentation:
471
+ # flip: true
472
+ # color_jitter: true
473
+ # """
474
+ # )
475
+
476
+ # return {"message": result}
477
+
478
+ # except Exception as e:
479
+ # return JSONResponse(status_code=500, content={"error": str(e)})