Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -488,7 +488,6 @@ def auto_ensemble_process(audio, model_keys, seg_size=128, overlap=0.1, out_form
|
|
488 |
while retry_count <= max_retries:
|
489 |
try:
|
490 |
progress((model_idx + 0.1) / total_models, desc=f"Loading {model_key} for chunk {chunk_idx}")
|
491 |
-
# Check if model is cached
|
492 |
model_path = os.path.join(model_dir, model)
|
493 |
if not os.path.exists(model_path):
|
494 |
logger.info(f"Model {model} not cached, will download")
|
@@ -522,14 +521,13 @@ def auto_ensemble_process(audio, model_keys, seg_size=128, overlap=0.1, out_form
|
|
522 |
if retry_count > max_retries:
|
523 |
logger.error(f"Max retries reached for {model_key} chunk {chunk_idx}, skipping")
|
524 |
break
|
525 |
-
time.sleep(1)
|
526 |
finally:
|
527 |
separator = None
|
528 |
gc.collect()
|
529 |
if torch.cuda.is_available():
|
530 |
torch.cuda.empty_cache()
|
531 |
logger.info(f"Cleared CUDA cache after {model_key} chunk {chunk_idx}")
|
532 |
-
# Yield control to ZeroGPU scheduler
|
533 |
time.sleep(0.1)
|
534 |
progress(0.8, desc="Combining stems...")
|
535 |
for model_key, stems_dict in model_stems.items():
|
@@ -565,13 +563,19 @@ def auto_ensemble_process(audio, model_keys, seg_size=128, overlap=0.1, out_form
|
|
565 |
progress(0.9, desc="Running ensemble...")
|
566 |
logger.info(f"Running ensemble with args: {ensemble_args}")
|
567 |
try:
|
568 |
-
ensemble_files(ensemble_args)
|
569 |
-
|
|
|
|
|
570 |
progress(1.0, desc="Ensemble completed")
|
571 |
return output_file, f"Ensemble completed with {ensemble_method}, excluded: {exclude_stems if exclude_stems else 'None'}"
|
572 |
except Exception as e:
|
573 |
logger.error(f"Ensemble processing error: {e}")
|
574 |
-
|
|
|
|
|
|
|
|
|
575 |
except Exception as e:
|
576 |
logger.error(f"Ensemble error: {e}")
|
577 |
if "ZeroGPU" in str(e) or "aborted" in str(e).lower():
|
|
|
488 |
while retry_count <= max_retries:
|
489 |
try:
|
490 |
progress((model_idx + 0.1) / total_models, desc=f"Loading {model_key} for chunk {chunk_idx}")
|
|
|
491 |
model_path = os.path.join(model_dir, model)
|
492 |
if not os.path.exists(model_path):
|
493 |
logger.info(f"Model {model} not cached, will download")
|
|
|
521 |
if retry_count > max_retries:
|
522 |
logger.error(f"Max retries reached for {model_key} chunk {chunk_idx}, skipping")
|
523 |
break
|
524 |
+
time.sleep(1)
|
525 |
finally:
|
526 |
separator = None
|
527 |
gc.collect()
|
528 |
if torch.cuda.is_available():
|
529 |
torch.cuda.empty_cache()
|
530 |
logger.info(f"Cleared CUDA cache after {model_key} chunk {chunk_idx}")
|
|
|
531 |
time.sleep(0.1)
|
532 |
progress(0.8, desc="Combining stems...")
|
533 |
for model_key, stems_dict in model_stems.items():
|
|
|
563 |
progress(0.9, desc="Running ensemble...")
|
564 |
logger.info(f"Running ensemble with args: {ensemble_args}")
|
565 |
try:
|
566 |
+
ensemble.ensemble_files(ensemble_args)
|
567 |
+
if not os.path.exists(output_file):
|
568 |
+
raise RuntimeError(f"Ensemble output file not created: {output_file}")
|
569 |
+
logger.info(f"Ensemble completed, output: {output_file}")
|
570 |
progress(1.0, desc="Ensemble completed")
|
571 |
return output_file, f"Ensemble completed with {ensemble_method}, excluded: {exclude_stems if exclude_stems else 'None'}"
|
572 |
except Exception as e:
|
573 |
logger.error(f"Ensemble processing error: {e}")
|
574 |
+
if "numpy" in str(e).lower() or "copy" in str(e).lower():
|
575 |
+
error_msg = f"NumPy compatibility error: {e}. Try installing numpy<2.0.0 or contact support."
|
576 |
+
else:
|
577 |
+
error_msg = f"Ensemble processing error: {e}"
|
578 |
+
raise RuntimeError(error_msg)
|
579 |
except Exception as e:
|
580 |
logger.error(f"Ensemble error: {e}")
|
581 |
if "ZeroGPU" in str(e) or "aborted" in str(e).lower():
|