tee342 commited on
Commit
aa6601e
Β·
verified Β·
1 Parent(s): e3f4db2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -220
app.py CHANGED
@@ -138,26 +138,6 @@ def match_loudness(audio_path, target_lufs=-14.0):
138
  adjusted.export(out_path, format="wav")
139
  return out_path
140
 
141
- # === AI Mastering Chain – Genre EQ + Loudness Match ===
142
- def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
143
- audio = AudioSegment.from_file(audio_path)
144
-
145
- # Apply Genre EQ
146
- eq_audio = auto_eq(audio, genre=genre)
147
-
148
- # Convert to numpy for loudness
149
- samples, sr = audiosegment_to_array(eq_audio)
150
-
151
- # Apply loudness normalization
152
- meter = pyln.Meter(sr)
153
- loudness = meter.integrated_loudness(samples.astype(np.float64) / 32768.0)
154
- gain_db = target_lufs - loudness
155
- final_audio = eq_audio + gain_db
156
-
157
- out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
158
- final_audio.export(out_path, format="wav")
159
- return out_path
160
-
161
  # === Auto-EQ per Genre ===
162
  def auto_eq(audio, genre="Pop"):
163
  eq_map = {
@@ -188,90 +168,84 @@ def auto_eq(audio, genre="Pop"):
188
 
189
  return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
190
 
191
- # === Multiband Compression ===
192
- def multiband_compression(audio, low_gain=0, mid_gain=0, high_gain=0):
193
- samples, sr = audiosegment_to_array(audio)
194
- samples = samples.astype(np.float64)
195
 
196
- # Low Band: 20–500Hz
197
- sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr)
198
- low_band = sosfilt(sos_low, samples)
199
- low_compressed = low_band * (10 ** (low_gain / 20))
200
-
201
- # Mid Band: 500–4000Hz
202
- sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr)
203
- mid_band = sosfilt(sos_mid, samples)
204
- mid_compressed = mid_band * (10 ** (mid_gain / 20))
205
-
206
- # High Band: 4000–20000Hz
207
- sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr)
208
- high_band = sosfilt(sos_high, samples)
209
- high_compressed = high_band * (10 ** (high_gain / 20))
210
-
211
- total = low_compressed + mid_compressed + high_compressed
212
- return array_to_audiosegment(total.astype(np.int16), sr, channels=audio.channels)
213
-
214
- # === Real-Time Spectrum Analyzer + EQ Preview ===
215
- def visualize_spectrum(audio_path):
216
- y, sr = torchaudio.load(audio_path)
217
- y_np = y.numpy().flatten()
218
-
219
- stft = librosa.stft(y_np)
220
- db = librosa.amplitude_to_db(abs(stft))
221
-
222
- plt.figure(figsize=(10, 4))
223
- img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma")
224
- plt.colorbar(img, format="%+2.0f dB")
225
- plt.title("Frequency Spectrum")
226
- plt.tight_layout()
227
- buf = BytesIO()
228
- plt.savefig(buf, format="png")
229
- plt.close()
230
- buf.seek(0)
231
- return Image.open(buf)
232
-
233
- # === Stereo Imaging Tool ===
234
- def stereo_imaging(audio, mid_side_balance=0.5, stereo_wide=1.0):
235
- mid = audio.pan(0)
236
- side = audio.pan(0.3)
237
- return audio.overlay(side, position=0)
238
-
239
- # === Harmonic Saturation ===
240
- def harmonic_saturation(audio, intensity=0.2):
241
- samples = np.array(audio.get_array_of_samples()).astype(np.float32)
242
- distorted = np.tanh(intensity * samples)
243
- return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels)
244
 
245
- # === Sidechain Compression ===
246
- def sidechain_compressor(main, sidechain, threshold=-16, ratio=4, attack=5, release=200):
247
- main_seg = AudioSegment.from_file(main)
248
- sidechain_seg = AudioSegment.from_file(sidechain)
249
- return main_seg.overlay(sidechain_seg - 10)
 
 
 
250
 
251
- # === Vocal Pitch Correction – Auto-Tune Style ===
252
- def auto_tune_vocal(audio, target_key="C"):
253
- return apply_pitch_shift(audio, 0.2)
254
 
255
- # === Create Karaoke Video from Audio + Lyrics ===
256
- def create_karaoke_video(audio_path, lyrics, bg_image=None):
257
- try:
258
- from moviepy.editor import TextClip, CompositeVideoClip, ColorClip, AudioFileClip
259
 
260
- audio = AudioFileClip(audio_path)
261
- video = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=audio.duration_seconds)
262
- words = [(word.strip(), i * 3, (i+1)*3) for i, word in enumerate(lyrics.split())]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
- text_clips = [
265
- TextClip(word, fontsize=60, color='white').set_position('center').set_duration(end - start).set_start(start)
266
- for word, start, end in words
267
- ]
 
268
 
269
- final_video = CompositeVideoClip([video] + text_clips).set_audio(audio)
270
- out_path = os.path.join(tempfile.gettempdir(), "karaoke.mp4")
271
- final_video.write_videofile(out_path, codec="libx264", audio_codec="aac")
272
- return out_path
273
- except Exception as e:
274
- return f"⚠️ Failed: {str(e)}"
 
 
 
275
 
276
  # === Save/Load Project File (.aiproj) ===
277
  def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
@@ -306,7 +280,7 @@ def load_project(project_file):
306
  data["volumes"]["other"]
307
  )
308
 
309
- # === Process Audio Function (Fixed!) ===
310
  def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
311
  status = "πŸ”Š Loading audio..."
312
  try:
@@ -435,7 +409,8 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
435
  fn=process_audio,
436
  inputs=[
437
  gr.Audio(label="Upload Audio", type="filepath"),
438
- gr.CheckboxGroup(choices=preset_choices.get("Default", []), label="Apply Effects in Order"),
 
439
  gr.Checkbox(label="Isolate Vocals After Effects"),
440
  gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
441
  gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
@@ -468,135 +443,18 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
468
  description="Apply genre-specific EQ + loudness matching in one click."
469
  )
470
 
471
- # --- Multiband Compression Tab ===
472
- with gr.Tab("πŸŽ› Multiband Compression"):
473
- gr.Interface(
474
- fn=multiband_compression,
475
- inputs=[
476
- gr.Audio(label="Upload Track", type="filepath"),
477
- gr.Slider(minimum=-12, maximum=12, value=0, label="Low Gain (20–500Hz)"),
478
- gr.Slider(minimum=-12, maximum=12, value=0, label="Mid Gain (500Hz–4kHz)"),
479
- gr.Slider(minimum=-12, maximum=12, value=0, label="High Gain (4kHz+)"),
480
- ],
481
- outputs=gr.Audio(label="EQ'd Output", type="filepath"),
482
- title="Adjust Frequency Bands Live",
483
- description="Fine-tune your sound using real-time sliders for low, mid, and high frequencies."
484
- )
485
-
486
- # --- Real-Time Spectrum Analyzer + EQ Preview ===
487
- with gr.Tab("πŸ“Š Real-Time Spectrum"):
488
- gr.Interface(
489
- fn=visualize_spectrum,
490
- inputs=gr.Audio(label="Upload Track", type="filepath"),
491
- outputs=gr.Image(label="Spectrum Analysis"),
492
- title="See the frequency breakdown of your audio"
493
- )
494
-
495
- # --- Loudness Graph Tab ===
496
- with gr.Tab("πŸ“ˆ Loudness Graph"):
497
- gr.Interface(
498
- fn=match_loudness,
499
- inputs=[
500
- gr.Audio(label="Upload Track", type="filepath"),
501
- gr.Slider(minimum=-24, maximum=-6, value=-14, label="Target LUFS")
502
- ],
503
- outputs=gr.Audio(label="Normalized Output", type="filepath"),
504
- title="Match Loudness Across Tracks",
505
- description="Use EBU R128 standard for consistent volume"
506
- )
507
-
508
- # --- Stereo Imaging Tool ===
509
- with gr.Tab("🎚 Stereo Imaging"):
510
- gr.Interface(
511
- fn=stereo_imaging,
512
- inputs=[
513
- gr.Audio(label="Upload Track", type="filepath"),
514
- gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Mid-Side Balance"),
515
- gr.Slider(minimum=0.0, maximum=2.0, value=1.0, label="Stereo Spread")
516
- ],
517
- outputs=gr.Audio(label="Imaged Output", type="filepath"),
518
- title="Adjust Stereo Field",
519
- description="Control mid-side balance and widen stereo spread."
520
- )
521
-
522
- # --- Harmonic Saturation ===
523
  with gr.Tab("🧬 Harmonic Saturation"):
524
  gr.Interface(
525
  fn=harmonic_saturation,
526
  inputs=[
527
  gr.Audio(label="Upload Track", type="filepath"),
528
- gr.Slider(minimum=0.0, maximum=1.0, value=0.2, label="Saturation Intensity")
 
529
  ],
530
  outputs=gr.Audio(label="Warm Output", type="filepath"),
531
  title="Add Analog-Style Warmth",
532
- description="Apply subtle distortion to enhance clarity and presence."
533
- )
534
-
535
- # --- Sidechain Compression ===
536
- with gr.Tab("πŸ” Sidechain Compression"):
537
- gr.Interface(
538
- fn=sidechain_compressor,
539
- inputs=[
540
- gr.File(label="Main Track"),
541
- gr.File(label="Sidechain Track"),
542
- gr.Slider(minimum=-24, maximum=0, value=-16, label="Threshold (dB)"),
543
- gr.Number(label="Ratio", value=4),
544
- gr.Number(label="Attack (ms)", value=5),
545
- gr.Number(label="Release (ms)", value=200)
546
- ],
547
- outputs=gr.Audio(label="Ducked Output", type="filepath"),
548
- title="Sidechain Compression",
549
- description="Automatically duck background under voice or kick"
550
- )
551
-
552
- # --- Save/Load Mix Session (.aiproj) ===
553
- with gr.Tab("πŸ“ Save/Load Mix Session"):
554
- gr.Interface(
555
- fn=save_project,
556
- inputs=[
557
- gr.File(label="Vocals"),
558
- gr.File(label="Drums"),
559
- gr.File(label="Bass"),
560
- gr.File(label="Other"),
561
- gr.Slider(minimum=-10, maximum=10, value=0, label="Vocals Volume"),
562
- gr.Slider(minimum=-10, maximum=10, value=0, label="Drums Volume"),
563
- gr.Slider(minimum=-10, maximum=10, value=0, label="Bass Volume"),
564
- gr.Slider(minimum=-10, maximum=10, value=0, label="Other Volume")
565
- ],
566
- outputs=gr.File(label="Project File (.aiproj)"),
567
- title="Save Your Full Mix Session",
568
- description="Save stems, volumes, and settings in one file."
569
- )
570
-
571
- gr.Interface(
572
- fn=load_project,
573
- inputs=gr.File(label="Upload .aiproj File"),
574
- outputs=[
575
- gr.File(label="Vocals"),
576
- gr.File(label="Drums"),
577
- gr.File(label="Bass"),
578
- gr.File(label="Other"),
579
- gr.Slider(label="Vocals Volume"),
580
- gr.Slider(label="Drums Volume"),
581
- gr.Slider(label="Bass Volume"),
582
- gr.Slider(label="Other Volume")
583
- ],
584
- title="Resume Last Mix",
585
- description="Load saved mix session",
586
- allow_flagging="never"
587
- )
588
-
589
- # --- Vocal Pitch Correction (Auto-Tune) ===
590
- with gr.Tab("🧬 Vocal Pitch Correction"):
591
- gr.Interface(
592
- fn=auto_tune_vocal,
593
- inputs=[
594
- gr.File(label="Source Voice Clip"),
595
- gr.Textbox(label="Target Key", value="C", lines=1)
596
- ],
597
- outputs=gr.Audio(label="Pitch-Corrected Output", type="filepath"),
598
- title="Auto-Tune Style Pitch Correction",
599
- description="Correct vocal pitch automatically"
600
  )
601
 
602
  demo.launch()
 
138
  adjusted.export(out_path, format="wav")
139
  return out_path
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  # === Auto-EQ per Genre ===
142
  def auto_eq(audio, genre="Pop"):
143
  eq_map = {
 
168
 
169
  return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
170
 
171
+ # === AI Mastering Chain – Genre EQ + Loudness Match ===
172
+ def ai_mastering_chain(audio_path, genre="Pop", target_lufs=-14.0):
173
+ audio = AudioSegment.from_file(audio_path)
 
174
 
175
+ # Apply Genre EQ
176
+ eq_audio = auto_eq(audio, genre=genre)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
+ # Convert to numpy for loudness
179
+ samples, sr = audiosegment_to_array(eq_audio)
180
+
181
+ # Apply loudness normalization
182
+ meter = pyln.Meter(sr)
183
+ loudness = meter.integrated_loudness(samples.astype(np.float64) / 32768.0)
184
+ gain_db = target_lufs - loudness
185
+ final_audio = eq_audio + gain_db
186
 
187
+ out_path = os.path.join(tempfile.gettempdir(), "mastered_output.wav")
188
+ final_audio.export(out_path, format="wav")
189
+ return out_path
190
 
191
+ # === Harmonic Saturation / Exciter ===
192
+ def harmonic_saturation(audio, saturation_type="Tube", intensity=0.2):
193
+ samples = np.array(audio.get_array_of_samples()).astype(np.float32)
 
194
 
195
+ if saturation_type == "Tube":
196
+ saturated = np.tanh(intensity * samples)
197
+ elif saturation_type == "Tape":
198
+ saturated = np.where(samples > 0, 1 - np.exp(-samples), -1 + np.exp(samples))
199
+ elif saturation_type == "Console":
200
+ saturated = np.clip(samples, -32768, 32768) * intensity
201
+ elif saturation_type == "Mix Bus":
202
+ saturated = np.log1p(np.abs(samples)) * np.sign(samples) * intensity
203
+ else:
204
+ saturated = samples
205
+
206
+ return array_to_audiosegment(saturated.astype(np.int16), audio.frame_rate, channels=audio.channels)
207
+
208
+ # === Vocal Isolation Helpers ===
209
+ def load_track_local(path, sample_rate, channels=2):
210
+ sig, rate = torchaudio.load(path)
211
+ if rate != sample_rate:
212
+ sig = torchaudio.functional.resample(sig, rate, sample_rate)
213
+ if channels == 1:
214
+ sig = sig.mean(0)
215
+ return sig
216
+
217
+ def save_track(path, wav, sample_rate):
218
+ path = Path(path)
219
+ torchaudio.save(str(path), wav, sample_rate)
220
+
221
+ def apply_vocal_isolation(audio_path):
222
+ model = pretrained.get_model(name='htdemucs')
223
+ wav = load_track_local(audio_path, model.samplerate, channels=2)
224
+ ref = wav.mean(0)
225
+ wav -= ref[:, None]
226
+ sources = apply_model(model, wav[None])[0]
227
+ wav += ref[:, None]
228
+
229
+ vocal_track = sources[3].cpu()
230
+ out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
231
+ save_track(out_path, vocal_track, model.samplerate)
232
+ return out_path
233
 
234
+ # === Stem Splitting (Drums, Bass, Other, Vocals) ===
235
+ def stem_split(audio_path):
236
+ model = pretrained.get_model(name='htdemucs')
237
+ wav = load_track_local(audio_path, model.samplerate, channels=2)
238
+ sources = apply_model(model, wav[None])[0]
239
 
240
+ output_dir = tempfile.mkdtemp()
241
+ stem_paths = []
242
+
243
+ for i, name in enumerate(['drums', 'bass', 'other', 'vocals']):
244
+ path = os.path.join(output_dir, f"{name}.wav")
245
+ save_track(path, sources[i].cpu(), model.samplerate)
246
+ stem_paths.append(gr.File(value=path))
247
+
248
+ return stem_paths
249
 
250
  # === Save/Load Project File (.aiproj) ===
251
  def save_project(vocals, drums, bass, other, vol_vocals, vol_drums, vol_bass, vol_other):
 
280
  data["volumes"]["other"]
281
  )
282
 
283
+ # === Process Audio Function ===
284
  def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format):
285
  status = "πŸ”Š Loading audio..."
286
  try:
 
409
  fn=process_audio,
410
  inputs=[
411
  gr.Audio(label="Upload Audio", type="filepath"),
412
+ gr.CheckboxGroup(choices=preset_choices.get("Default", []),
413
+ label="Apply Effects in Order"),
414
  gr.Checkbox(label="Isolate Vocals After Effects"),
415
  gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None),
416
  gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
 
443
  description="Apply genre-specific EQ + loudness matching in one click."
444
  )
445
 
446
+ # --- Harmonic Saturation / Exciter ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
  with gr.Tab("🧬 Harmonic Saturation"):
448
  gr.Interface(
449
  fn=harmonic_saturation,
450
  inputs=[
451
  gr.Audio(label="Upload Track", type="filepath"),
452
+ gr.Dropdown(choices=["Tube", "Tape", "Console", "Mix Bus"], label="Saturation Type", value="Tube"),
453
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.2, label="Intensity")
454
  ],
455
  outputs=gr.Audio(label="Warm Output", type="filepath"),
456
  title="Add Analog-Style Warmth",
457
+ description="Enhance clarity and presence using saturation styles like Tube, Tape, or Mix Bus."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
  )
459
 
460
  demo.launch()