Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,41 +3,93 @@ import torch
|
|
3 |
import librosa
|
4 |
import numpy as np
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
-
import
|
7 |
-
import os
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
def process_audio(
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
iface = gr.Interface(
|
36 |
-
fn=process_audio,
|
37 |
-
inputs=gr.Audio(type="filepath", label="Sube tu audio"),
|
38 |
-
outputs=gr.Audio(label="Audio convertido"),
|
39 |
-
title="Demo de Yebama RVC",
|
40 |
-
description="Convierte tu voz con el modelo Yebama RVC."
|
41 |
-
)
|
42 |
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import librosa
|
4 |
import numpy as np
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
+
import soundfile as sf # Nuevo: Backend alternativo para audio
|
|
|
7 |
|
8 |
+
# Configuraci贸n global
|
9 |
+
SAMPLE_RATE = 44100
|
10 |
+
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
+
|
12 |
+
# Cach茅 para el modelo (evita descargas m煤ltiples)
|
13 |
+
MODEL_CACHE = {}
|
14 |
+
|
15 |
+
def load_rvc_model():
|
16 |
+
"""Carga el modelo RVC con sus archivos auxiliares"""
|
17 |
+
try:
|
18 |
+
if not MODEL_CACHE:
|
19 |
+
# Descargar archivos (con cach茅 local)
|
20 |
+
model_files = {
|
21 |
+
"model": hf_hub_download(repo_id="gitgato/yebama", filename="yebama_e200_s3200.pth"),
|
22 |
+
"index": hf_hub_download(repo_id="gitgato/yebama", filename="IVF403_Flat_nprobe_1_yebama_v2.index"),
|
23 |
+
"config": hf_hub_download(repo_id="gitgato/yebama", filename="config.json")
|
24 |
+
}
|
25 |
+
|
26 |
+
# --- AQU脥 VA TU C脫DIGO DE CARGA RVC REAL ---
|
27 |
+
# Ejemplo hipot茅tico (reemplaza con tu implementaci贸n):
|
28 |
+
# from rvc_inference import load_rvc_model
|
29 |
+
# MODEL_CACHE['model'] = load_rvc_model(**model_files, device=DEVICE)
|
30 |
+
|
31 |
+
# Placeholder para demostraci贸n:
|
32 |
+
MODEL_CACHE.update(model_files)
|
33 |
+
print("Modelo descargado (simulado)")
|
34 |
+
|
35 |
+
return MODEL_CACHE
|
36 |
+
except Exception as e:
|
37 |
+
raise gr.Error(f"Error cargando el modelo: {str(e)}")
|
38 |
|
39 |
+
def process_audio(audio_path):
|
40 |
+
"""Procesa el audio con el modelo RVC"""
|
41 |
+
try:
|
42 |
+
# 1. Cargar audio (con backend moderno)
|
43 |
+
audio, sr = librosa.load(audio_path, sr=SAMPLE_RATE, mono=True)
|
44 |
+
|
45 |
+
# 2. Cargar modelo (solo en la primera ejecuci贸n)
|
46 |
+
model_data = load_rvc_model()
|
47 |
+
|
48 |
+
# --- AQU脥 VA TU INFERENCIA RVC REAL ---
|
49 |
+
# Ejemplo hipot茅tico:
|
50 |
+
# processed_audio = model_data['model'].infer(audio)
|
51 |
+
|
52 |
+
# Placeholder: Eco de demostraci贸n
|
53 |
+
processed_audio = np.concatenate([audio, audio * 0.3]) # Simula efecto
|
54 |
+
|
55 |
+
# 3. Convertir a formato compatible con Gradio (float32)
|
56 |
+
return (SAMPLE_RATE, processed_audio.astype(np.float32))
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
raise gr.Error(f"Error procesando audio: {str(e)}")
|
60 |
+
|
61 |
+
# Interfaz mejorada
|
62 |
+
with gr.Blocks(title="Yebama RVC - Conversi贸n de Voz") as app:
|
63 |
+
gr.Markdown("## 馃帳 Yebama RVC - Conversi贸n de Voz")
|
64 |
+
gr.Markdown("Sube un audio para convertirlo con el modelo RVC")
|
65 |
|
66 |
+
with gr.Row():
|
67 |
+
input_audio = gr.Audio(
|
68 |
+
sources=["upload", "microphone"],
|
69 |
+
type="filepath",
|
70 |
+
label="Audio de Entrada",
|
71 |
+
show_download_button=False
|
72 |
+
)
|
73 |
+
output_audio = gr.Audio(
|
74 |
+
label="Resultado",
|
75 |
+
interactive=False,
|
76 |
+
format="wav"
|
77 |
+
)
|
78 |
|
79 |
+
btn = gr.Button("Convertir", variant="primary")
|
80 |
+
btn.click(
|
81 |
+
fn=process_audio,
|
82 |
+
inputs=input_audio,
|
83 |
+
outputs=output_audio
|
84 |
+
)
|
85 |
|
86 |
+
# Secci贸n de debug (opcional)
|
87 |
+
with gr.Accordion("馃攳 Debug Info", open=False):
|
88 |
+
gr.JSON(value=lambda: MODEL_CACHE.get('config', {}), label="Config del Modelo")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
+
# Lanzamiento con configuraci贸n 贸ptima
|
91 |
+
app.launch(
|
92 |
+
share=True,
|
93 |
+
server_port=7860,
|
94 |
+
show_error=True
|
95 |
+
)
|