Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -138,6 +138,26 @@ def match_loudness(audio_path, target_lufs=-14.0):
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
# === Auto-EQ per Genre ===
|
142 |
def auto_eq(audio, genre="Pop"):
|
143 |
eq_map = {
|
@@ -168,84 +188,90 @@ def auto_eq(audio, genre="Pop"):
|
|
168 |
|
169 |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
|
170 |
|
171 |
-
# ===
|
172 |
-
def
|
173 |
-
|
174 |
-
|
175 |
-
# Apply Genre EQ
|
176 |
-
eq_audio = auto_eq(audio, genre=genre)
|
177 |
-
|
178 |
-
# Convert to numpy for loudness
|
179 |
-
samples, sr = audiosegment_to_array(eq_audio)
|
180 |
-
|
181 |
-
# Apply loudness normalization
|
182 |
-
meter = pyln.Meter(sr)
|
183 |
-
loudness = meter.integrated_loudness(samples.astype(np.float64) / 32768.0)
|
184 |
-
gain_db = target_lufs - loudness
|
185 |
-
final_audio = eq_audio + gain_db
|
186 |
-
|
187 |
-
out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
|
188 |
-
final_audio.export(out_path, format="wav")
|
189 |
-
return out_path
|
190 |
|
191 |
-
#
|
192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
samples = np.array(audio.get_array_of_samples()).astype(np.float32)
|
|
|
|
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
saturated = samples
|
205 |
-
|
206 |
-
return array_to_audiosegment(saturated.astype(np.int16), audio.frame_rate, channels=audio.channels)
|
207 |
-
|
208 |
-
# === Vocal Isolation Helpers ===
|
209 |
-
def load_track_local(path, sample_rate, channels=2):
|
210 |
-
sig, rate = torchaudio.load(path)
|
211 |
-
if rate != sample_rate:
|
212 |
-
sig = torchaudio.functional.resample(sig, rate, sample_rate)
|
213 |
-
if channels == 1:
|
214 |
-
sig = sig.mean(0)
|
215 |
-
return sig
|
216 |
-
|
217 |
-
def save_track(path, wav, sample_rate):
|
218 |
-
path = Path(path)
|
219 |
-
torchaudio.save(str(path), wav, sample_rate)
|
220 |
-
|
221 |
-
def apply_vocal_isolation(audio_path):
|
222 |
-
model = pretrained.get_model(name='htdemucs')
|
223 |
-
wav = load_track_local(audio_path, model.samplerate, channels=2)
|
224 |
-
ref = wav.mean(0)
|
225 |
-
wav -= ref[:, None]
|
226 |
-
sources = apply_model(model, wav[None])[0]
|
227 |
-
wav += ref[:, None]
|
228 |
-
|
229 |
-
vocal_track = sources[3].cpu()
|
230 |
-
out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
|
231 |
-
save_track(out_path, vocal_track, model.samplerate)
|
232 |
-
return out_path
|
233 |
|
234 |
-
# ===
|
235 |
-
def
|
236 |
-
|
237 |
-
|
238 |
-
sources = apply_model(model, wav[None])[0]
|
239 |
|
240 |
-
|
241 |
-
|
|
|
242 |
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
|
248 |
-
|
|
|
|
|
|
|
|
|
|
|
249 |
|
250 |
# === Save/Load Project File (.aiproj) ===
|
251 |
def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
|
@@ -280,7 +306,7 @@ def load_project(project_file):
|
|
280 |
data["volumes"]["other"]
|
281 |
)
|
282 |
|
283 |
-
# === Process Audio Function ===
|
284 |
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
|
285 |
status = "π Loading audio..."
|
286 |
try:
|
@@ -409,8 +435,7 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
409 |
fn=process_audio,
|
410 |
inputs=[
|
411 |
gr.Audio(label="Upload Audio", type="filepath"),
|
412 |
-
gr.CheckboxGroup(choices=preset_choices.get("Default", []),
|
413 |
-
label="Apply Effects in Order"),
|
414 |
gr.Checkbox(label="Isolate Vocals After Effects"),
|
415 |
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
|
416 |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
@@ -443,18 +468,135 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
443 |
description="Apply genre-specific EQ + loudness matching in one click."
|
444 |
)
|
445 |
|
446 |
-
# ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
with gr.Tab("𧬠Harmonic Saturation"):
|
448 |
gr.Interface(
|
449 |
fn=harmonic_saturation,
|
450 |
inputs=[
|
451 |
gr.Audio(label="Upload Track", type="filepath"),
|
452 |
-
gr.
|
453 |
-
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, label="Intensity")
|
454 |
],
|
455 |
outputs=gr.Audio(label="Warm Output", type="filepath"),
|
456 |
title="Add Analog-Style Warmth",
|
457 |
-
description="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
)
|
459 |
|
460 |
demo.launch()
|
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
141 |
+
# === AI Mastering Chain β Genre EQ + Loudness Match ===
|
142 |
+
def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
|
143 |
+
audio = AudioSegment.from_file(audio_path)
|
144 |
+
|
145 |
+
# Apply Genre EQ
|
146 |
+
eq_audio = auto_eq(audio, genre=genre)
|
147 |
+
|
148 |
+
# Convert to numpy for loudness
|
149 |
+
samples, sr = audiosegment_to_array(eq_audio)
|
150 |
+
|
151 |
+
# Apply loudness normalization
|
152 |
+
meter = pyln.Meter(sr)
|
153 |
+
loudness = meter.integrated_loudness(samples.astype(np.float64) / 32768.0)
|
154 |
+
gain_db = target_lufs - loudness
|
155 |
+
final_audio = eq_audio + gain_db
|
156 |
+
|
157 |
+
out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
|
158 |
+
final_audio.export(out_path, format="wav")
|
159 |
+
return out_path
|
160 |
+
|
161 |
# === Auto-EQ per Genre ===
|
162 |
def auto_eq(audio, genre="Pop"):
|
163 |
eq_map = {
|
|
|
188 |
|
189 |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
|
190 |
|
191 |
+
# === Multiband Compression ===
|
192 |
+
def multiband_compression(audio, low_gain=0, mid_gain=0, high_gain=0):
|
193 |
+
samples, sr = audiosegment_to_array(audio)
|
194 |
+
samples = samples.astype(np.float64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
+
# Low Band: 20β500Hz
|
197 |
+
sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr)
|
198 |
+
low_band = sosfilt(sos_low, samples)
|
199 |
+
low_compressed = low_band * (10 ** (low_gain / 20))
|
200 |
+
|
201 |
+
# Mid Band: 500β4000Hz
|
202 |
+
sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr)
|
203 |
+
mid_band = sosfilt(sos_mid, samples)
|
204 |
+
mid_compressed = mid_band * (10 ** (mid_gain / 20))
|
205 |
+
|
206 |
+
# High Band: 4000β20000Hz
|
207 |
+
sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr)
|
208 |
+
high_band = sosfilt(sos_high, samples)
|
209 |
+
high_compressed = high_band * (10 ** (high_gain / 20))
|
210 |
+
|
211 |
+
total = low_compressed + mid_compressed + high_compressed
|
212 |
+
return array_to_audiosegment(total.astype(np.int16), sr, channels=audio.channels)
|
213 |
+
|
214 |
+
# === Real-Time Spectrum Analyzer + EQ Preview ===
|
215 |
+
def visualize_spectrum(audio_path):
|
216 |
+
y, sr = torchaudio.load(audio_path)
|
217 |
+
y_np = y.numpy().flatten()
|
218 |
+
|
219 |
+
stft = librosa.stft(y_np)
|
220 |
+
db = librosa.amplitude_to_db(abs(stft))
|
221 |
+
|
222 |
+
plt.figure(figsize=(10, 4))
|
223 |
+
img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma")
|
224 |
+
plt.colorbar(img, format="%+2.0f dB")
|
225 |
+
plt.title("Frequency Spectrum")
|
226 |
+
plt.tight_layout()
|
227 |
+
buf = BytesIO()
|
228 |
+
plt.savefig(buf, format="png")
|
229 |
+
plt.close()
|
230 |
+
buf.seek(0)
|
231 |
+
return Image.open(buf)
|
232 |
+
|
233 |
+
# === Stereo Imaging Tool ===
|
234 |
+
def stereo_imaging(audio, mid_side_balance=0.5, stereo_wide=1.0):
|
235 |
+
mid = audio.pan(0)
|
236 |
+
side = audio.pan(0.3)
|
237 |
+
return audio.overlay(side, position=0)
|
238 |
+
|
239 |
+
# === Harmonic Saturation ===
|
240 |
+
def harmonic_saturation(audio, intensity=0.2):
|
241 |
samples = np.array(audio.get_array_of_samples()).astype(np.float32)
|
242 |
+
distorted = np.tanh(intensity * samples)
|
243 |
+
return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels)
|
244 |
|
245 |
+
# === Sidechain Compression ===
|
246 |
+
def sidechain_compressor(main, sidechain, threshold=-16, ratio=4, attack=5, release=200):
|
247 |
+
main_seg = AudioSegment.from_file(main)
|
248 |
+
sidechain_seg = AudioSegment.from_file(sidechain)
|
249 |
+
return main_seg.overlay(sidechain_seg - 10)
|
250 |
+
|
251 |
+
# === Vocal Pitch Correction β Auto-Tune Style ===
|
252 |
+
def auto_tune_vocal(audio, target_key="C"):
|
253 |
+
return apply_pitch_shift(audio, 0.2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
|
255 |
+
# === Create Karaoke Video from Audio + Lyrics ===
|
256 |
+
def create_karaoke_video(audio_path, lyrics, bg_image=None):
|
257 |
+
try:
|
258 |
+
from moviepy.editor import TextClip, CompositeVideoClip, ColorClip, AudioFileClip
|
|
|
259 |
|
260 |
+
audio = AudioFileClip(audio_path)
|
261 |
+
video = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=audio.duration_seconds)
|
262 |
+
words = [(word.strip(), i * 3, (i+1)*3) for i, word in enumerate(lyrics.split())]
|
263 |
|
264 |
+
text_clips = [
|
265 |
+
TextClip(word, fontsize=60, color='white').set_position('center').set_duration(end - start).set_start(start)
|
266 |
+
for word, start, end in words
|
267 |
+
]
|
268 |
|
269 |
+
final_video = CompositeVideoClip([video] + text_clips).set_audio(audio)
|
270 |
+
out_path = os.path.join(tempfile.gettempdir(), "karaoke.mp4")
|
271 |
+
final_video.write_videofile(out_path, codec="libx264", audio_codec="aac")
|
272 |
+
return out_path
|
273 |
+
except Exception as e:
|
274 |
+
return f"β οΈ Failed: {str(e)}"
|
275 |
|
276 |
# === Save/Load Project File (.aiproj) ===
|
277 |
def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
|
|
|
306 |
data["volumes"]["other"]
|
307 |
)
|
308 |
|
309 |
+
# === Process Audio Function (Fixed!) ===
|
310 |
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
|
311 |
status = "π Loading audio..."
|
312 |
try:
|
|
|
435 |
fn=process_audio,
|
436 |
inputs=[
|
437 |
gr.Audio(label="Upload Audio", type="filepath"),
|
438 |
+
gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
|
|
|
439 |
gr.Checkbox(label="Isolate Vocals After Effects"),
|
440 |
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
|
441 |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
|
|
468 |
description="Apply genre-specific EQ + loudness matching in one click."
|
469 |
)
|
470 |
|
471 |
+
# --- Multiband Compression Tab ===
|
472 |
+
with gr.Tab("π Multiband Compression"):
|
473 |
+
gr.Interface(
|
474 |
+
fn=multiband_compression,
|
475 |
+
inputs=[
|
476 |
+
gr.Audio(label="Upload Track", type="filepath"),
|
477 |
+
gr.Slider(minimum=-12, maximum=12, value=0, label="Low Gain (20β500Hz)"),
|
478 |
+
gr.Slider(minimum=-12, maximum=12, value=0, label="Mid Gain (500Hzβ4kHz)"),
|
479 |
+
gr.Slider(minimum=-12, maximum=12, value=0, label="High Gain (4kHz+)"),
|
480 |
+
],
|
481 |
+
outputs=gr.Audio(label="EQ'd Output", type="filepath"),
|
482 |
+
title="Adjust Frequency Bands Live",
|
483 |
+
description="Fine-tune your sound using real-time sliders for low, mid, and high frequencies."
|
484 |
+
)
|
485 |
+
|
486 |
+
# --- Real-Time Spectrum Analyzer + EQ Preview ===
|
487 |
+
with gr.Tab("π Real-Time Spectrum"):
|
488 |
+
gr.Interface(
|
489 |
+
fn=visualize_spectrum,
|
490 |
+
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
491 |
+
outputs=gr.Image(label="Spectrum Analysis"),
|
492 |
+
title="See the frequency breakdown of your audio"
|
493 |
+
)
|
494 |
+
|
495 |
+
# --- Loudness Graph Tab ===
|
496 |
+
with gr.Tab("π Loudness Graph"):
|
497 |
+
gr.Interface(
|
498 |
+
fn=match_loudness,
|
499 |
+
inputs=[
|
500 |
+
gr.Audio(label="Upload Track", type="filepath"),
|
501 |
+
gr.Slider(minimum=-24, maximum=-6, value=-14, label="Target LUFS")
|
502 |
+
],
|
503 |
+
outputs=gr.Audio(label="Normalized Output", type="filepath"),
|
504 |
+
title="Match Loudness Across Tracks",
|
505 |
+
description="Use EBU R128 standard for consistent volume"
|
506 |
+
)
|
507 |
+
|
508 |
+
# --- Stereo Imaging Tool ===
|
509 |
+
with gr.Tab("π Stereo Imaging"):
|
510 |
+
gr.Interface(
|
511 |
+
fn=stereo_imaging,
|
512 |
+
inputs=[
|
513 |
+
gr.Audio(label="Upload Track", type="filepath"),
|
514 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Mid-Side Balance"),
|
515 |
+
gr.Slider(minimum=0.0, maximum=2.0, value=1.0, label="Stereo Spread")
|
516 |
+
],
|
517 |
+
outputs=gr.Audio(label="Imaged Output", type="filepath"),
|
518 |
+
title="Adjust Stereo Field",
|
519 |
+
description="Control mid-side balance and widen stereo spread."
|
520 |
+
)
|
521 |
+
|
522 |
+
# --- Harmonic Saturation ===
|
523 |
with gr.Tab("𧬠Harmonic Saturation"):
|
524 |
gr.Interface(
|
525 |
fn=harmonic_saturation,
|
526 |
inputs=[
|
527 |
gr.Audio(label="Upload Track", type="filepath"),
|
528 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Saturation Intensity")
|
|
|
529 |
],
|
530 |
outputs=gr.Audio(label="Warm Output", type="filepath"),
|
531 |
title="Add Analog-Style Warmth",
|
532 |
+
description="Apply subtle distortion to enhance clarity and presence."
|
533 |
+
)
|
534 |
+
|
535 |
+
# --- Sidechain Compression ===
|
536 |
+
with gr.Tab("π Sidechain Compression"):
|
537 |
+
gr.Interface(
|
538 |
+
fn=sidechain_compressor,
|
539 |
+
inputs=[
|
540 |
+
gr.File(label="Main Track"),
|
541 |
+
gr.File(label="Sidechain Track"),
|
542 |
+
gr.Slider(minimum=-24, maximum=0, value=-16, label="Threshold (dB)"),
|
543 |
+
gr.Number(label="Ratio", value=4),
|
544 |
+
gr.Number(label="Attack (ms)", value=5),
|
545 |
+
gr.Number(label="Release (ms)", value=200)
|
546 |
+
],
|
547 |
+
outputs=gr.Audio(label="Ducked Output", type="filepath"),
|
548 |
+
title="Sidechain Compression",
|
549 |
+
description="Automatically duck background under voice or kick"
|
550 |
+
)
|
551 |
+
|
552 |
+
# --- Save/Load Mix Session (.aiproj) ===
|
553 |
+
with gr.Tab("π Save/Load Mix Session"):
|
554 |
+
gr.Interface(
|
555 |
+
fn=save_project,
|
556 |
+
inputs=[
|
557 |
+
gr.File(label="Vocals"),
|
558 |
+
gr.File(label="Drums"),
|
559 |
+
gr.File(label="Bass"),
|
560 |
+
gr.File(label="Other"),
|
561 |
+
gr.Slider(minimum=-10, maximum=10, value=0, label="Vocals Volume"),
|
562 |
+
gr.Slider(minimum=-10, maximum=10, value=0, label="Drums Volume"),
|
563 |
+
gr.Slider(minimum=-10, maximum=10, value=0, label="Bass Volume"),
|
564 |
+
gr.Slider(minimum=-10, maximum=10, value=0, label="Other Volume")
|
565 |
+
],
|
566 |
+
outputs=gr.File(label="Project File (.aiproj)"),
|
567 |
+
title="Save Your Full Mix Session",
|
568 |
+
description="Save stems, volumes, and settings in one file."
|
569 |
+
)
|
570 |
+
|
571 |
+
gr.Interface(
|
572 |
+
fn=load_project,
|
573 |
+
inputs=gr.File(label="Upload .aiproj File"),
|
574 |
+
outputs=[
|
575 |
+
gr.File(label="Vocals"),
|
576 |
+
gr.File(label="Drums"),
|
577 |
+
gr.File(label="Bass"),
|
578 |
+
gr.File(label="Other"),
|
579 |
+
gr.Slider(label="Vocals Volume"),
|
580 |
+
gr.Slider(label="Drums Volume"),
|
581 |
+
gr.Slider(label="Bass Volume"),
|
582 |
+
gr.Slider(label="Other Volume")
|
583 |
+
],
|
584 |
+
title="Resume Last Mix",
|
585 |
+
description="Load saved mix session",
|
586 |
+
allow_flagging="never"
|
587 |
+
)
|
588 |
+
|
589 |
+
# --- Vocal Pitch Correction (Auto-Tune) ===
|
590 |
+
with gr.Tab("𧬠Vocal Pitch Correction"):
|
591 |
+
gr.Interface(
|
592 |
+
fn=auto_tune_vocal,
|
593 |
+
inputs=[
|
594 |
+
gr.File(label="Source Voice Clip"),
|
595 |
+
gr.Textbox(label="Target Key", value="C", lines=1)
|
596 |
+
],
|
597 |
+
outputs=gr.Audio(label="Pitch-Corrected Output", type="filepath"),
|
598 |
+
title="Auto-Tune Style Pitch Correction",
|
599 |
+
description="Correct vocal pitch automatically"
|
600 |
)
|
601 |
|
602 |
demo.launch()
|