Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -138,7 +138,7 @@ def match_loudness(audio_path, target_lufs=-14.0):
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
141 |
-
# === AI Mastering Chain β Genre EQ + Loudness Match ===
|
142 |
def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
|
143 |
audio = AudioSegment.from_file(audio_path)
|
144 |
|
@@ -154,6 +154,9 @@ def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
|
|
154 |
gain_db = target_lufs - loudness
|
155 |
final_audio = eq_audio + gain_db
|
156 |
|
|
|
|
|
|
|
157 |
out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
|
158 |
final_audio.export(out_path, format="wav")
|
159 |
return out_path
|
@@ -188,90 +191,64 @@ def auto_eq(audio, genre="Pop"):
|
|
188 |
|
189 |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
|
190 |
|
191 |
-
# ===
|
192 |
-
def
|
193 |
-
samples, sr = audiosegment_to_array(audio)
|
194 |
-
samples = samples.astype(np.float64)
|
195 |
-
|
196 |
-
# Low Band: 20β500Hz
|
197 |
-
sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr)
|
198 |
-
low_band = sosfilt(sos_low, samples)
|
199 |
-
low_compressed = low_band * (10 ** (low_gain / 20))
|
200 |
-
|
201 |
-
# Mid Band: 500β4000Hz
|
202 |
-
sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr)
|
203 |
-
mid_band = sosfilt(sos_mid, samples)
|
204 |
-
mid_compressed = mid_band * (10 ** (mid_gain / 20))
|
205 |
-
|
206 |
-
# High Band: 4000β20000Hz
|
207 |
-
sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr)
|
208 |
-
high_band = sosfilt(sos_high, samples)
|
209 |
-
high_compressed = high_band * (10 ** (high_gain / 20))
|
210 |
-
|
211 |
-
total = low_compressed + mid_compressed + high_compressed
|
212 |
-
return array_to_audiosegment(total.astype(np.int16), sr, channels=audio.channels)
|
213 |
-
|
214 |
-
# === Real-Time Spectrum Analyzer + EQ Preview ===
|
215 |
-
def visualize_spectrum(audio_path):
|
216 |
-
y, sr = torchaudio.load(audio_path)
|
217 |
-
y_np = y.numpy().flatten()
|
218 |
-
|
219 |
-
stft = librosa.stft(y_np)
|
220 |
-
db = librosa.amplitude_to_db(abs(stft))
|
221 |
-
|
222 |
-
plt.figure(figsize=(10, 4))
|
223 |
-
img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma")
|
224 |
-
plt.colorbar(img, format="%+2.0f dB")
|
225 |
-
plt.title("Frequency Spectrum")
|
226 |
-
plt.tight_layout()
|
227 |
-
buf = BytesIO()
|
228 |
-
plt.savefig(buf, format="png")
|
229 |
-
plt.close()
|
230 |
-
buf.seek(0)
|
231 |
-
return Image.open(buf)
|
232 |
-
|
233 |
-
# === Stereo Imaging Tool ===
|
234 |
-
def stereo_imaging(audio, mid_side_balance=0.5, stereo_wide=1.0):
|
235 |
-
mid = audio.pan(0)
|
236 |
-
side = audio.pan(0.3)
|
237 |
-
return audio.overlay(side, position=0)
|
238 |
-
|
239 |
-
# === Harmonic Saturation ===
|
240 |
-
def harmonic_saturation(audio, intensity=0.2):
|
241 |
samples = np.array(audio.get_array_of_samples()).astype(np.float32)
|
242 |
-
distorted = np.tanh(intensity * samples)
|
243 |
-
return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels)
|
244 |
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
-
# ===
|
252 |
-
def
|
253 |
-
|
|
|
|
|
254 |
|
255 |
-
|
256 |
-
|
257 |
-
try:
|
258 |
-
from moviepy.editor import TextClip, CompositeVideoClip, ColorClip, AudioFileClip
|
259 |
|
260 |
-
|
261 |
-
|
262 |
-
|
|
|
263 |
|
264 |
-
|
265 |
-
TextClip(word, fontsize=60, color='white').set_position('center').set_duration(end - start).set_start(start)
|
266 |
-
for word, start, end in words
|
267 |
-
]
|
268 |
-
|
269 |
-
final_video = CompositeVideoClip([video] + text_clips).set_audio(audio)
|
270 |
-
out_path = os.path.join(tempfile.gettempdir(), "karaoke.mp4")
|
271 |
-
final_video.write_videofile(out_path, codec="libx264", audio_codec="aac")
|
272 |
-
return out_path
|
273 |
-
except Exception as e:
|
274 |
-
return f"β οΈ Failed: {str(e)}"
|
275 |
|
276 |
# === Save/Load Project File (.aiproj) ===
|
277 |
def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
|
@@ -306,7 +283,7 @@ def load_project(project_file):
|
|
306 |
data["volumes"]["other"]
|
307 |
)
|
308 |
|
309 |
-
# === Process Audio Function
|
310 |
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
|
311 |
status = "π Loading audio..."
|
312 |
try:
|
@@ -419,12 +396,6 @@ preset_choices = {
|
|
419 |
|
420 |
preset_names = list(preset_choices.keys())
|
421 |
|
422 |
-
# === Vocal Doubler / Harmonizer ===
|
423 |
-
def vocal_doubler(audio):
|
424 |
-
shifted_up = apply_pitch_shift(audio, 0.3)
|
425 |
-
shifted_down = apply_pitch_shift(audio, -0.3)
|
426 |
-
return audio.overlay(shifted_up).overlay(shifted_down)
|
427 |
-
|
428 |
# === Main UI ===
|
429 |
with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
430 |
gr.Markdown("## π§ Ultimate AI Audio Studio\nUpload, edit, export β powered by AI!")
|
@@ -437,7 +408,7 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
437 |
gr.Audio(label="Upload Audio", type="filepath"),
|
438 |
gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
|
439 |
gr.Checkbox(label="Isolate Vocals After Effects"),
|
440 |
-
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0]
|
441 |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
442 |
],
|
443 |
outputs=[
|
@@ -465,25 +436,101 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
465 |
],
|
466 |
outputs=gr.Audio(label="Mastered Output", type="filepath"),
|
467 |
title="Genre-Based Mastering",
|
468 |
-
description="Apply genre-specific EQ + loudness matching
|
|
|
469 |
)
|
470 |
|
471 |
-
# ---
|
472 |
-
with gr.Tab("
|
473 |
gr.Interface(
|
474 |
-
fn=
|
475 |
inputs=[
|
476 |
gr.Audio(label="Upload Track", type="filepath"),
|
477 |
-
gr.
|
478 |
-
gr.Slider(minimum
|
479 |
-
gr.Slider(minimum=-12, maximum=12, value=0, label="High Gain (4kHz+)"),
|
480 |
],
|
481 |
-
outputs=gr.Audio(label="
|
482 |
-
title="
|
483 |
-
description="
|
484 |
)
|
485 |
|
486 |
-
# ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
487 |
with gr.Tab("π Real-Time Spectrum"):
|
488 |
gr.Interface(
|
489 |
fn=visualize_spectrum,
|
@@ -502,7 +549,7 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
502 |
],
|
503 |
outputs=gr.Audio(label="Normalized Output", type="filepath"),
|
504 |
title="Match Loudness Across Tracks",
|
505 |
-
description="
|
506 |
)
|
507 |
|
508 |
# --- Stereo Imaging Tool ===
|
@@ -519,36 +566,6 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
519 |
description="Control mid-side balance and widen stereo spread."
|
520 |
)
|
521 |
|
522 |
-
# --- Harmonic Saturation ===
|
523 |
-
with gr.Tab("𧬠Harmonic Saturation"):
|
524 |
-
gr.Interface(
|
525 |
-
fn=harmonic_saturation,
|
526 |
-
inputs=[
|
527 |
-
gr.Audio(label="Upload Track", type="filepath"),
|
528 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Saturation Intensity")
|
529 |
-
],
|
530 |
-
outputs=gr.Audio(label="Warm Output", type="filepath"),
|
531 |
-
title="Add Analog-Style Warmth",
|
532 |
-
description="Apply subtle distortion to enhance clarity and presence."
|
533 |
-
)
|
534 |
-
|
535 |
-
# --- Sidechain Compression ===
|
536 |
-
with gr.Tab("π Sidechain Compression"):
|
537 |
-
gr.Interface(
|
538 |
-
fn=sidechain_compressor,
|
539 |
-
inputs=[
|
540 |
-
gr.File(label="Main Track"),
|
541 |
-
gr.File(label="Sidechain Track"),
|
542 |
-
gr.Slider(minimum=-24, maximum=0, value=-16, label="Threshold (dB)"),
|
543 |
-
gr.Number(label="Ratio", value=4),
|
544 |
-
gr.Number(label="Attack (ms)", value=5),
|
545 |
-
gr.Number(label="Release (ms)", value=200)
|
546 |
-
],
|
547 |
-
outputs=gr.Audio(label="Ducked Output", type="filepath"),
|
548 |
-
title="Sidechain Compression",
|
549 |
-
description="Automatically duck background under voice or kick"
|
550 |
-
)
|
551 |
-
|
552 |
# --- Save/Load Mix Session (.aiproj) ===
|
553 |
with gr.Tab("π Save/Load Mix Session"):
|
554 |
gr.Interface(
|
@@ -586,7 +603,155 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
586 |
allow_flagging="never"
|
587 |
)
|
588 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
589 |
# --- Vocal Pitch Correction (Auto-Tune) ===
|
|
|
|
|
|
|
590 |
with gr.Tab("𧬠Vocal Pitch Correction"):
|
591 |
gr.Interface(
|
592 |
fn=auto_tune_vocal,
|
@@ -599,4 +764,31 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
599 |
description="Correct vocal pitch automatically"
|
600 |
)
|
601 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
602 |
demo.launch()
|
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
141 |
+
# === AI Mastering Chain β Genre EQ + Loudness Match + Limiting ===
|
142 |
def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
|
143 |
audio = AudioSegment.from_file(audio_path)
|
144 |
|
|
|
154 |
gain_db = target_lufs - loudness
|
155 |
final_audio = eq_audio + gain_db
|
156 |
|
157 |
+
# Apply final limiting
|
158 |
+
final_audio = apply_limiter(final_audio)
|
159 |
+
|
160 |
out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
|
161 |
final_audio.export(out_path, format="wav")
|
162 |
return out_path
|
|
|
191 |
|
192 |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
|
193 |
|
194 |
+
# === Harmonic Saturation / Exciter ===
|
195 |
+
def harmonic_saturation(audio, saturation_type="Tube", intensity=0.2):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
samples = np.array(audio.get_array_of_samples()).astype(np.float32)
|
|
|
|
|
197 |
|
198 |
+
if saturation_type == "Tube":
|
199 |
+
saturated = np.tanh(intensity * samples)
|
200 |
+
elif saturation_type == "Tape":
|
201 |
+
saturated = np.where(samples > 0, 1 - np.exp(-intensity * samples), -1 + np.exp(intensity * samples))
|
202 |
+
elif saturation_type == "Console":
|
203 |
+
saturated = np.clip(samples, -32768, 32768) * intensity
|
204 |
+
elif saturation_type == "Mix Bus":
|
205 |
+
saturated = np.log1p(np.abs(samples)) * np.sign(samples) * intensity
|
206 |
+
else:
|
207 |
+
saturated = samples
|
208 |
+
|
209 |
+
return array_to_audiosegment(saturated.astype(np.int16), audio.frame_rate, channels=audio.channels)
|
210 |
+
|
211 |
+
# === Vocal Isolation Helpers ===
|
212 |
+
def load_track_local(path, sample_rate, channels=2):
|
213 |
+
sig, rate = torchaudio.load(path)
|
214 |
+
if rate != sample_rate:
|
215 |
+
sig = torchaudio.functional.resample(sig, rate, sample_rate)
|
216 |
+
if channels == 1:
|
217 |
+
sig = sig.mean(0)
|
218 |
+
return sig
|
219 |
+
|
220 |
+
def save_track(path, wav, sample_rate):
|
221 |
+
path = Path(path)
|
222 |
+
torchaudio.save(str(path), wav, sample_rate)
|
223 |
+
|
224 |
+
def apply_vocal_isolation(audio_path):
|
225 |
+
model = pretrained.get_model(name='htdemucs')
|
226 |
+
wav = load_track_local(audio_path, model.samplerate, channels=2)
|
227 |
+
ref = wav.mean(0)
|
228 |
+
wav -= ref[:, None]
|
229 |
+
sources = apply_model(model, wav[None])[0]
|
230 |
+
wav += ref[:, None]
|
231 |
+
|
232 |
+
vocal_track = sources[3].cpu()
|
233 |
+
out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
|
234 |
+
save_track(out_path, vocal_track, model.samplerate)
|
235 |
+
return out_path
|
236 |
|
237 |
+
# === Stem Splitting (Drums, Bass, Other, Vocals) ===
|
238 |
+
def stem_split(audio_path):
|
239 |
+
model = pretrained.get_model(name='htdemucs')
|
240 |
+
wav = load_track_local(audio_path, model.samplerate, channels=2)
|
241 |
+
sources = apply_model(model, wav[None])[0]
|
242 |
|
243 |
+
output_dir = tempfile.mkdtemp()
|
244 |
+
stem_paths = []
|
|
|
|
|
245 |
|
246 |
+
for i, name in enumerate(['drums', 'bass', 'other', 'vocals']):
|
247 |
+
path = os.path.join(output_dir, f"{name}.wav")
|
248 |
+
save_track(path, sources[i].cpu(), model.samplerate)
|
249 |
+
stem_paths.append(gr.File(value=path))
|
250 |
|
251 |
+
return stem_paths
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
# === Save/Load Project File (.aiproj) ===
|
254 |
def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
|
|
|
283 |
data["volumes"]["other"]
|
284 |
)
|
285 |
|
286 |
+
# === Process Audio Function ===
|
287 |
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
|
288 |
status = "π Loading audio..."
|
289 |
try:
|
|
|
396 |
|
397 |
preset_names = list(preset_choices.keys())
|
398 |
|
|
|
|
|
|
|
|
|
|
|
|
|
399 |
# === Main UI ===
|
400 |
with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
401 |
gr.Markdown("## π§ Ultimate AI Audio Studio\nUpload, edit, export β powered by AI!")
|
|
|
408 |
gr.Audio(label="Upload Audio", type="filepath"),
|
409 |
gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
|
410 |
gr.Checkbox(label="Isolate Vocals After Effects"),
|
411 |
+
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0]),
|
412 |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
413 |
],
|
414 |
outputs=[
|
|
|
436 |
],
|
437 |
outputs=gr.Audio(label="Mastered Output", type="filepath"),
|
438 |
title="Genre-Based Mastering",
|
439 |
+
description="Apply genre-specific EQ + loudness matching + limiter",
|
440 |
+
allow_flagging="never"
|
441 |
)
|
442 |
|
443 |
+
# --- Harmonic Saturation / Exciter ===
|
444 |
+
with gr.Tab("𧬠Harmonic Saturation"):
|
445 |
gr.Interface(
|
446 |
+
fn=harmonic_saturation,
|
447 |
inputs=[
|
448 |
gr.Audio(label="Upload Track", type="filepath"),
|
449 |
+
gr.Dropdown(choices=["Tube", "Tape", "Console", "Mix Bus"], label="Saturation Type", value="Tube"),
|
450 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, label="Intensity")
|
|
|
451 |
],
|
452 |
+
outputs=gr.Audio(label="Warm Output", type="filepath"),
|
453 |
+
title="Add Analog-Style Warmth",
|
454 |
+
description="Enhance clarity and presence using saturation styles like Tube or Tape."
|
455 |
)
|
456 |
|
457 |
+
# --- Remix Mode ---
|
458 |
+
with gr.Tab("π Remix Mode"):
|
459 |
+
gr.Interface(
|
460 |
+
fn=stem_split,
|
461 |
+
inputs=gr.Audio(label="Upload Music Track", type="filepath"),
|
462 |
+
outputs=[
|
463 |
+
gr.File(label="Vocals"),
|
464 |
+
gr.File(label="Drums"),
|
465 |
+
gr.File(label="Bass"),
|
466 |
+
gr.File(label="Other")
|
467 |
+
],
|
468 |
+
title="Split Into Drums, Bass, Vocals, and More",
|
469 |
+
description="Use AI to separate musical elements like vocals, drums, and bass.",
|
470 |
+
flagging_mode="never",
|
471 |
+
clear_btn=None
|
472 |
+
)
|
473 |
+
|
474 |
+
# --- Batch Processing ---
|
475 |
+
with gr.Tab("π Batch Processing"):
|
476 |
+
gr.Interface(
|
477 |
+
fn=batch_process_audio,
|
478 |
+
inputs=[
|
479 |
+
gr.File(label="Upload Multiple Files", file_count="multiple"),
|
480 |
+
gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
|
481 |
+
gr.Checkbox(label="Isolate Vocals After Effects"),
|
482 |
+
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0]),
|
483 |
+
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
484 |
+
],
|
485 |
+
outputs=[
|
486 |
+
gr.File(label="Download ZIP of All Processed Files"),
|
487 |
+
gr.Textbox(label="Status", value="β
Ready", lines=1)
|
488 |
+
],
|
489 |
+
title="Batch Audio Processor",
|
490 |
+
description="Upload multiple files, apply effects in bulk, and download all results in a single ZIP.",
|
491 |
+
flagging_mode="never",
|
492 |
+
submit_btn="Process All Files",
|
493 |
+
clear_btn=None
|
494 |
+
)
|
495 |
+
|
496 |
+
# --- Vocal Pitch Correction β Auto-Tune Style ===
|
497 |
+
with gr.Tab("𧬠Vocal Pitch Correction"):
|
498 |
+
gr.Interface(
|
499 |
+
fn=auto_tune_vocal,
|
500 |
+
inputs=[
|
501 |
+
gr.File(label="Source Voice Clip"),
|
502 |
+
gr.Textbox(label="Target Key", value="C", lines=1)
|
503 |
+
],
|
504 |
+
outputs=gr.Audio(label="Pitch-Corrected Output", type="filepath"),
|
505 |
+
title="Auto-Tune Style Pitch Correction",
|
506 |
+
description="Correct vocal pitch automatically"
|
507 |
+
)
|
508 |
+
|
509 |
+
# --- Create Karaoke Video from Audio + Lyrics ===
|
510 |
+
with gr.Tab("πΉ Create Karaoke Video"):
|
511 |
+
gr.Interface(
|
512 |
+
fn=create_karaoke_video,
|
513 |
+
inputs=[
|
514 |
+
gr.Audio(label="Upload Track", type="filepath"),
|
515 |
+
gr.Textbox(label="Lyrics", lines=10),
|
516 |
+
gr.File(label="Background (Optional)")
|
517 |
+
],
|
518 |
+
outputs=gr.Video(label="Karaoke Video"),
|
519 |
+
title="Make Karaoke Videos from Audio + Lyrics",
|
520 |
+
description="Generate karaoke-style videos with real-time sync."
|
521 |
+
)
|
522 |
+
|
523 |
+
# --- Vocal Doubler / Harmonizer ===
|
524 |
+
with gr.Tab("π§ Vocal Doubler / Harmonizer"):
|
525 |
+
gr.Interface(
|
526 |
+
fn=vocal_doubler,
|
527 |
+
inputs=gr.Audio(label="Upload Vocal Clip", type="filepath"),
|
528 |
+
outputs=gr.Audio(label="Doubled Output", type="filepath"),
|
529 |
+
title="Add Vocal Doubling / Harmony",
|
530 |
+
description="Enhance vocals with doubling or harmony"
|
531 |
+
)
|
532 |
+
|
533 |
+
# --- Real-Time Spectrum Analyzer + Live EQ Preview ===
|
534 |
with gr.Tab("π Real-Time Spectrum"):
|
535 |
gr.Interface(
|
536 |
fn=visualize_spectrum,
|
|
|
549 |
],
|
550 |
outputs=gr.Audio(label="Normalized Output", type="filepath"),
|
551 |
title="Match Loudness Across Tracks",
|
552 |
+
description="Ensure consistent volume using EBU R128 standard"
|
553 |
)
|
554 |
|
555 |
# --- Stereo Imaging Tool ===
|
|
|
566 |
description="Control mid-side balance and widen stereo spread."
|
567 |
)
|
568 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
569 |
# --- Save/Load Mix Session (.aiproj) ===
|
570 |
with gr.Tab("π Save/Load Mix Session"):
|
571 |
gr.Interface(
|
|
|
603 |
allow_flagging="never"
|
604 |
)
|
605 |
|
606 |
+
# --- Prompt-Based Editing Tab ===
|
607 |
+
with gr.Tab("π§ Prompt-Based Editing"):
|
608 |
+
gr.Interface(
|
609 |
+
fn=process_prompt,
|
610 |
+
inputs=[
|
611 |
+
gr.File(label="Upload Audio", type="filepath"),
|
612 |
+
gr.Textbox(label="Describe What You Want", lines=5)
|
613 |
+
],
|
614 |
+
outputs=gr.Audio(label="Edited Output", type="filepath"),
|
615 |
+
title="Type Your Edits β AI Does the Rest",
|
616 |
+
description="Say what you want done and let AI handle it.",
|
617 |
+
allow_flagging="never"
|
618 |
+
)
|
619 |
+
|
620 |
+
# --- Vocal Presets for Singers ===
|
621 |
+
with gr.Tab("π€ Vocal Presets for Singers"):
|
622 |
+
gr.Interface(
|
623 |
+
fn=process_audio,
|
624 |
+
inputs=[
|
625 |
+
gr.Audio(label="Upload Vocal Track", type="filepath"),
|
626 |
+
gr.CheckboxGroup(choices=[
|
627 |
+
"Noise Reduction",
|
628 |
+
"Normalize",
|
629 |
+
"Compress Dynamic Range",
|
630 |
+
"Bass Boost",
|
631 |
+
"Treble Boost",
|
632 |
+
"Reverb",
|
633 |
+
"Auto Gain",
|
634 |
+
"Vocal Distortion",
|
635 |
+
"Harmony",
|
636 |
+
"Stage Mode"
|
637 |
+
]),
|
638 |
+
gr.Checkbox(label="Isolate Vocals After Effects"),
|
639 |
+
gr.Dropdown(choices=preset_names, label="Select Vocal Preset", value=preset_names[0]),
|
640 |
+
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
641 |
+
],
|
642 |
+
outputs=[
|
643 |
+
gr.Audio(label="Processed Vocal", type="filepath"),
|
644 |
+
gr.Image(label="Waveform Preview"),
|
645 |
+
gr.Textbox(label="Session Log (JSON)", lines=5),
|
646 |
+
gr.Textbox(label="Detected Genre", lines=1),
|
647 |
+
gr.Textbox(label="Status", value="β
Ready", lines=1)
|
648 |
+
],
|
649 |
+
title="Create Studio-Quality Vocal Tracks",
|
650 |
+
description="Apply singer-friendly presets and effects to enhance vocals.",
|
651 |
+
allow_flagging="never"
|
652 |
+
)
|
653 |
+
|
654 |
+
# --- Vocal Pitch Correction β Auto-Tune Style ===
|
655 |
+
def auto_tune_vocal(audio_path, target_key="C"):
|
656 |
+
try:
|
657 |
+
# Placeholder for real-time pitch detection
|
658 |
+
return apply_pitch_shift(AudioSegment.from_file(audio_path), 0.2)
|
659 |
+
except Exception as e:
|
660 |
+
return None
|
661 |
+
|
662 |
+
# --- Create Karaoke Video from Audio + Lyrics ===
|
663 |
+
def create_karaoke_video(audio_path, lyrics, bg_image=None):
|
664 |
+
try:
|
665 |
+
from moviepy.editor import TextClip, CompositeVideoClip, ColorClip, AudioFileClip
|
666 |
+
|
667 |
+
audio = AudioFileClip(audio_path)
|
668 |
+
video = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=audio.duration_seconds)
|
669 |
+
words = [(word.strip(), i * 3, (i+1)*3) for i, word in enumerate(lyrics.split())]
|
670 |
+
|
671 |
+
text_clips = [
|
672 |
+
TextClip(word, fontsize=60, color='white').set_position('center').set_duration(end - start).set_start(start)
|
673 |
+
for word, start, end in words
|
674 |
+
]
|
675 |
+
|
676 |
+
final_video = CompositeVideoClip([video] + text_clips).set_audio(audio)
|
677 |
+
out_path = os.path.join(tempfile.gettempdir(), "karaoke.mp4")
|
678 |
+
final_video.write_videofile(out_path, codec="libx264", audio_codec="aac")
|
679 |
+
return out_path
|
680 |
+
except Exception as e:
|
681 |
+
return f"β οΈ Failed: {str(e)}"
|
682 |
+
|
683 |
+
# --- Vocal Doubler / Harmonizer ===
|
684 |
+
def vocal_doubler(audio):
|
685 |
+
shifted_up = apply_pitch_shift(audio, 0.3)
|
686 |
+
shifted_down = apply_pitch_shift(audio, -0.3)
|
687 |
+
return audio.overlay(shifted_up).overlay(shifted_down)
|
688 |
+
|
689 |
+
# --- AI Suggest Preset Based on Genre ===
|
690 |
+
def suggest_preset_by_genre(audio_path):
|
691 |
+
try:
|
692 |
+
y, sr = torchaudio.load(audio_path)
|
693 |
+
mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1)
|
694 |
+
return ["Vocal Clarity", "Limiter", "Stereo Expansion"]
|
695 |
+
except Exception:
|
696 |
+
return ["Default"]
|
697 |
+
|
698 |
+
# --- AI Suggest Preset Based on Genre ===
|
699 |
+
with gr.Tab("π§ AI Suggest Preset"):
|
700 |
+
gr.Interface(
|
701 |
+
fn=suggest_preset_by_genre,
|
702 |
+
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
703 |
+
outputs=gr.Dropdown(choices=preset_names, label="Recommended Preset"),
|
704 |
+
title="Let AI Recommend Best Preset",
|
705 |
+
description="Upload a track and let AI recommend the best preset based on genre."
|
706 |
+
)
|
707 |
+
|
708 |
+
# --- Prompt-Based Editing ===
|
709 |
+
def process_prompt(audio_path, prompt):
|
710 |
+
audio = AudioSegment.from_file(audio_path)
|
711 |
+
|
712 |
+
if "noise" in prompt.lower() or "clean" in prompt.lower():
|
713 |
+
audio = apply_noise_reduction(audio)
|
714 |
+
|
715 |
+
if "normalize" in prompt.lower() or "loud" in prompt.lower():
|
716 |
+
audio = apply_normalize(audio)
|
717 |
+
|
718 |
+
if "bass" in prompt.lower() and ("boost" in prompt.lower()):
|
719 |
+
audio = apply_bass_boost(audio)
|
720 |
+
|
721 |
+
if "treble" in prompt.lower() or "high" in prompt.lower():
|
722 |
+
audio = apply_treble_boost(audio)
|
723 |
+
|
724 |
+
if "echo" in prompt.lower() or "reverb" in prompt.lower():
|
725 |
+
audio = apply_reverb(audio)
|
726 |
+
|
727 |
+
if "pitch" in prompt.lower() and "correct" in prompt.lower():
|
728 |
+
audio = apply_pitch_correction(audio)
|
729 |
+
|
730 |
+
if "harmony" in prompt.lower() or "double" in prompt.lower():
|
731 |
+
audio = apply_harmony(audio)
|
732 |
+
|
733 |
+
out_path = os.path.join(tempfile.gettempdir(), "prompt_output.wav")
|
734 |
+
audio.export(out_path, format="wav")
|
735 |
+
return out_path
|
736 |
+
|
737 |
+
# --- Prompt-Based Editing Tab ===
|
738 |
+
with gr.Tab("π§ Prompt-Based Editing"):
|
739 |
+
gr.Interface(
|
740 |
+
fn=process_prompt,
|
741 |
+
inputs=[
|
742 |
+
gr.File(label="Upload Audio", type="filepath"),
|
743 |
+
gr.Textbox(label="Describe What You Want", lines=5)
|
744 |
+
],
|
745 |
+
outputs=gr.Audio(label="Edited Output", type="filepath"),
|
746 |
+
title="Type Your Edits β AI Does the Rest",
|
747 |
+
description="Say what you want done and let AI handle it.",
|
748 |
+
allow_flagging="never"
|
749 |
+
)
|
750 |
+
|
751 |
# --- Vocal Pitch Correction (Auto-Tune) ===
|
752 |
+
def apply_pitch_correction(audio, target_key="C"):
|
753 |
+
return apply_pitch_shift(audio, 0.2)
|
754 |
+
|
755 |
with gr.Tab("𧬠Vocal Pitch Correction"):
|
756 |
gr.Interface(
|
757 |
fn=auto_tune_vocal,
|
|
|
764 |
description="Correct vocal pitch automatically"
|
765 |
)
|
766 |
|
767 |
+
# --- Real-Time Spectrum Analyzer + EQ Preview ===
|
768 |
+
def visualize_spectrum(audio_path):
|
769 |
+
y, sr = torchaudio.load(audio_path)
|
770 |
+
y_np = y.numpy().flatten()
|
771 |
+
stft = librosa.stft(y_np)
|
772 |
+
db = librosa.amplitude_to_db(abs(stft))
|
773 |
+
|
774 |
+
plt.figure(figsize=(10, 4))
|
775 |
+
img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma")
|
776 |
+
plt.colorbar(img, format="%+2.0f dB")
|
777 |
+
plt.title("Frequency Spectrum")
|
778 |
+
plt.tight_layout()
|
779 |
+
buf = BytesIO()
|
780 |
+
plt.savefig(buf, format="png")
|
781 |
+
plt.close()
|
782 |
+
buf.seek(0)
|
783 |
+
return Image.open(buf)
|
784 |
+
|
785 |
+
with gr.Tab("π Frequency Spectrum"):
|
786 |
+
gr.Interface(
|
787 |
+
fn=visualize_spectrum,
|
788 |
+
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
789 |
+
outputs=gr.Image(label="Spectrum Analysis"),
|
790 |
+
title="Real-Time Spectrum Analyzer",
|
791 |
+
description="See the frequency breakdown of your audio"
|
792 |
+
)
|
793 |
+
|
794 |
demo.launch()
|