Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -138,98 +138,82 @@ def match_loudness(audio_path, target_lufs=-14.0):
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
141 |
-
# ===
|
142 |
-
def
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
return
|
147 |
-
|
148 |
-
return
|
149 |
-
elif preset == "Speech":
|
150 |
-
return audio.compress_dynamic_range(threshold=-6, ratio=1.5)
|
151 |
-
return audio
|
152 |
-
|
153 |
-
# === Auto-EQ per Genre ===
|
154 |
-
def auto_eq(audio, genre="Pop"):
|
155 |
-
eq_map = {
|
156 |
-
"Pop": [(200, 500, -3), (2000, 4000, +4)], # Cut muddiness, boost vocals
|
157 |
-
"EDM": [(60, 250, +6), (8000, 12000, +3)], # Maximize bass & sparkle
|
158 |
-
"Rock": [(1000, 3000, +4), (7000, 10000, -3)], # Punchy mids, reduce sibilance
|
159 |
-
"Hip-Hop": [(20, 100, +6), (7000, 10000, -4)], # Deep lows, smooth highs
|
160 |
-
"Acoustic": [(100, 300, -3), (4000, 8000, +2)], # Natural tone
|
161 |
-
"Metal": [(100, 500, -4), (2000, 5000, +6), (7000, 12000, -3)], # Clear low-mids, crisp highs
|
162 |
-
"Trap": [(80, 120, +6), (3000, 6000, -4)], # Sub-bass boost, cut harsh highs
|
163 |
-
"LoFi": [(20, 200, +3), (1000, 3000, -2)], # Warmth, soft mids
|
164 |
-
"Default": []
|
165 |
-
}
|
166 |
-
|
167 |
-
from scipy.signal import butter, sosfilt
|
168 |
-
|
169 |
-
def band_eq(samples, sr, lowcut, highcut, gain):
|
170 |
-
sos = butter(10, [lowcut, highcut], btype='band', output='sos', fs=sr)
|
171 |
-
filtered = sosfilt(sos, samples)
|
172 |
-
return samples + gain * filtered
|
173 |
-
|
174 |
-
samples, sr = audiosegment_to_array(audio)
|
175 |
-
samples = samples.astype(np.float64)
|
176 |
-
|
177 |
-
for band in eq_map.get(genre, []):
|
178 |
-
low, high, gain = band
|
179 |
-
samples = band_eq(samples, sr, low, high, gain)
|
180 |
-
|
181 |
-
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels)
|
182 |
-
|
183 |
-
# === Prompt-Based Editing ===
|
184 |
-
def process_prompt(audio_path, prompt):
|
185 |
-
audio = AudioSegment.from_file(audio_path)
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
-
|
191 |
-
|
|
|
|
|
192 |
|
193 |
-
|
194 |
-
|
|
|
195 |
|
196 |
-
|
197 |
-
|
|
|
|
|
198 |
|
199 |
-
|
200 |
-
|
|
|
|
|
|
|
|
|
201 |
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
-
|
206 |
-
|
|
|
|
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
|
|
|
|
211 |
|
212 |
-
# ===
|
213 |
def suggest_preset_by_genre(audio_path):
|
214 |
try:
|
215 |
y, sr = torchaudio.load(audio_path)
|
216 |
mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1)
|
217 |
genre = "Pop"
|
218 |
-
return
|
219 |
except Exception:
|
220 |
-
return "
|
221 |
-
|
222 |
-
# === Vocal Pitch Correction – Auto-Tune Style ===
|
223 |
-
def apply_pitch_correction(audio, target_key="C"):
|
224 |
-
# Placeholder: In real use, this would align pitch to the nearest key note
|
225 |
-
return apply_pitch_shift(audio, 0.2)
|
226 |
-
|
227 |
-
# === Create Karaoke Video from Audio + Lyrics ===
|
228 |
-
def create_karaoke_video(audio_path, lyrics, bg_image=None):
|
229 |
-
print(f"Creating karaoke video with lyrics: {lyrics}")
|
230 |
-
return apply_auto_gain(AudioSegment.from_file(audio_path)).export(
|
231 |
-
os.path.join(tempfile.gettempdir(), "karaoke_output.wav"), format="wav"
|
232 |
-
)
|
233 |
|
234 |
# === Vocal Isolation Helpers ===
|
235 |
def load_track_local(path, sample_rate, channels=2):
|
@@ -304,8 +288,6 @@ if not preset_choices:
|
|
304 |
"ASMR Creator": ["Noise Gate", "Auto Gain", "Low-Pass Filter"],
|
305 |
"Voiceover Pro": ["Vocal Isolation", "TTS", "EQ Match"],
|
306 |
"8-bit Retro": ["Bitcrusher", "Echo", "Mono Downmix"],
|
307 |
-
|
308 |
-
# 🎤 Vocalist Presets
|
309 |
"🎙 Clean Vocal": ["Noise Reduction", "Normalize", "High Pass Filter (80Hz)"],
|
310 |
"🧪 Vocal Distortion": ["Vocal Distortion", "Reverb", "Compress Dynamic Range"],
|
311 |
"🎶 Singer's Harmony": ["Harmony", "Stereo Widening", "Pitch Shift"],
|
@@ -450,114 +432,6 @@ def generate_tts(text):
|
|
450 |
tts.tts_to_file(text=text, file_path=out_path)
|
451 |
return out_path
|
452 |
|
453 |
-
# === Save/Load Project File (.aiproj) ===
|
454 |
-
def save_project(audio_path, preset_name, effects):
|
455 |
-
project_data = {
|
456 |
-
"audio": AudioSegment.from_file(audio_path).raw_data,
|
457 |
-
"preset": preset_name,
|
458 |
-
"effects": effects
|
459 |
-
}
|
460 |
-
out_path = os.path.join(tempfile.gettempdir(), "project.aiproj")
|
461 |
-
with open(out_path, "wb") as f:
|
462 |
-
pickle.dump(project_data, f)
|
463 |
-
return out_path
|
464 |
-
|
465 |
-
def load_project(project_file):
|
466 |
-
with open(project_file.name, "rb") as f:
|
467 |
-
data = pickle.load(f)
|
468 |
-
return data["preset"], data["effects"]
|
469 |
-
|
470 |
-
# === Trim Silence Automatically (VAD) ===
|
471 |
-
def detect_silence(audio_file, silence_threshold=-50.0, min_silence_len=1000):
|
472 |
-
audio = AudioSegment.from_file(audio_file)
|
473 |
-
|
474 |
-
nonsilent_ranges = detect_nonsilent(
|
475 |
-
audio,
|
476 |
-
min_silence_len=int(min_silence_len),
|
477 |
-
silence_thresh=silence_threshold
|
478 |
-
)
|
479 |
-
|
480 |
-
if not nonsilent_ranges:
|
481 |
-
return audio.export(os.path.join(tempfile.gettempdir(), "trimmed.wav"), format="wav")
|
482 |
-
|
483 |
-
trimmed = audio[nonsilent_ranges[0][0]:nonsilent_ranges[-1][1]]
|
484 |
-
out_path = os.path.join(tempfile.gettempdir(), "trimmed.wav")
|
485 |
-
trimmed.export(out_path, format="wav")
|
486 |
-
return out_path
|
487 |
-
|
488 |
-
# === Mix Two Tracks ===
|
489 |
-
def mix_tracks(track1, track2, volume_offset=0):
|
490 |
-
a1 = AudioSegment.from_file(track1)
|
491 |
-
a2 = AudioSegment.from_file(track2)
|
492 |
-
mixed = a1.overlay(a2 - volume_offset)
|
493 |
-
out_path = os.path.join(tempfile.gettempdir(), "mixed.wav")
|
494 |
-
mixed.export(out_path, format="wav")
|
495 |
-
return out_path
|
496 |
-
|
497 |
-
# === Dummy Voice Cloning Tab – Works Locally Only ===
|
498 |
-
def clone_voice(*args):
|
499 |
-
return "⚠️ Voice cloning requires local install – use Python 3.9 or below"
|
500 |
-
|
501 |
-
# === Speaker Diarization ("Who Spoke When?") ===
|
502 |
-
try:
|
503 |
-
from pyannote.audio import Pipeline as DiarizationPipeline
|
504 |
-
from huggingface_hub import login
|
505 |
-
|
506 |
-
hf_token = os.getenv("HF_TOKEN")
|
507 |
-
if hf_token:
|
508 |
-
login(token=hf_token)
|
509 |
-
diarize_pipeline = DiarizationPipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=hf_token or True)
|
510 |
-
except Exception as e:
|
511 |
-
diarize_pipeline = None
|
512 |
-
print(f"⚠️ Failed to load diarization: {e}")
|
513 |
-
|
514 |
-
def diarize_and_transcribe(audio_path):
|
515 |
-
if not diarize_pipeline:
|
516 |
-
return "⚠️ Diarization pipeline not loaded – check HF token or install pyannote.audio"
|
517 |
-
|
518 |
-
# Run diarization
|
519 |
-
audio = AudioSegment.from_file(audio_path)
|
520 |
-
temp_wav = os.path.join(tempfile.gettempdir(), "diarize.wav")
|
521 |
-
audio.export(temp_wav, format="wav")
|
522 |
-
|
523 |
-
try:
|
524 |
-
diarization = diarize_pipeline(temp_wav)
|
525 |
-
|
526 |
-
result = whisper.transcribe(temp_wav)
|
527 |
-
|
528 |
-
segments = []
|
529 |
-
for turn, _, speaker in diarization.itertracks(yield_label=True):
|
530 |
-
text = " ".join([seg["text"] for seg in result["segments"] if seg["start"] >= turn.start and seg["end"] <= turn.end])
|
531 |
-
segments.append({
|
532 |
-
"speaker": speaker,
|
533 |
-
"start": turn.start,
|
534 |
-
"end": turn.end,
|
535 |
-
"text": text
|
536 |
-
})
|
537 |
-
|
538 |
-
return segments
|
539 |
-
except Exception as e:
|
540 |
-
return f"⚠️ Diarization failed: {str(e)}"
|
541 |
-
|
542 |
-
# === Real-Time Spectrum Analyzer + EQ Visualizer ===
|
543 |
-
def visualize_spectrum(audio_path):
|
544 |
-
y, sr = torchaudio.load(audio_path)
|
545 |
-
y_np = y.numpy().flatten()
|
546 |
-
|
547 |
-
stft = librosa.stft(y_np)
|
548 |
-
db = librosa.amplitude_to_db(abs(stft))
|
549 |
-
|
550 |
-
plt.figure(figsize=(10, 4))
|
551 |
-
img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma")
|
552 |
-
plt.colorbar(img, format="%+2.0f dB")
|
553 |
-
plt.title("Frequency Spectrum")
|
554 |
-
plt.tight_layout()
|
555 |
-
buf = BytesIO()
|
556 |
-
plt.savefig(buf, format="png")
|
557 |
-
plt.close()
|
558 |
-
buf.seek(0)
|
559 |
-
return Image.open(buf)
|
560 |
-
|
561 |
# === UI ===
|
562 |
effect_options = [
|
563 |
"Noise Reduction",
|
@@ -660,57 +534,10 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
660 |
description="Ensure consistent loudness across tracks using industry-standard normalization."
|
661 |
)
|
662 |
|
663 |
-
# ---
|
664 |
-
with gr.Tab("🎛 Dynamic Compression Presets"):
|
665 |
-
gr.Interface(
|
666 |
-
fn=apply_compression_preset,
|
667 |
-
inputs=[
|
668 |
-
gr.Audio(label="Upload Track", type="filepath"),
|
669 |
-
gr.Dropdown(choices=["Radio Ready", "Podcast Safe", "Club Mix", "Speech"], label="Preset")
|
670 |
-
],
|
671 |
-
outputs=gr.Audio(label="Compressed Output", type="filepath"),
|
672 |
-
title="Apply Pre-Tuned Compression Settings",
|
673 |
-
description="Choose from compression presets used in radio, podcasting, club mixes, and speech editing."
|
674 |
-
)
|
675 |
-
|
676 |
-
# --- AI Suggest Preset Based on Genre ===
|
677 |
-
with gr.Tab("🧠 AI Suggest Preset"):
|
678 |
-
gr.Interface(
|
679 |
-
fn=suggest_preset_by_genre,
|
680 |
-
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
681 |
-
outputs=gr.Dropdown(choices=preset_names, label="Recommended Preset"),
|
682 |
-
title="AI Recommends Best Preset",
|
683 |
-
description="Upload a track and let AI recommend the best preset based on detected genre."
|
684 |
-
)
|
685 |
-
|
686 |
-
# --- Real-Time Spectrum Analyzer + EQ ===
|
687 |
-
with gr.Tab("📊 Frequency Spectrum"):
|
688 |
-
gr.Interface(
|
689 |
-
fn=visualize_spectrum,
|
690 |
-
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
691 |
-
outputs=gr.Image(label="Spectrum Analysis"),
|
692 |
-
title="Real-Time Spectrum Analyzer",
|
693 |
-
description="See the frequency breakdown of your audio"
|
694 |
-
)
|
695 |
-
|
696 |
-
# --- Prompt-Based Editing Tab ===
|
697 |
-
with gr.Tab("🧠 Prompt-Based Editing"):
|
698 |
-
gr.Interface(
|
699 |
-
fn=process_prompt,
|
700 |
-
inputs=[
|
701 |
-
gr.File(label="Upload Audio", type="filepath"),
|
702 |
-
gr.Textbox(label="Describe What You Want", lines=5)
|
703 |
-
],
|
704 |
-
outputs=gr.Audio(label="Edited Output", type="filepath"),
|
705 |
-
title="Type Your Edits – AI Does the Rest",
|
706 |
-
description="Say what you want done and let AI handle it.",
|
707 |
-
allow_flagging="never"
|
708 |
-
)
|
709 |
-
|
710 |
-
# --- Vocal Pitch Correction (Auto-Tune) ===
|
711 |
with gr.Tab("🧬 Vocal Pitch Correction"):
|
712 |
gr.Interface(
|
713 |
-
fn=
|
714 |
inputs=[
|
715 |
gr.Audio(label="Upload Vocal Clip", type="filepath"),
|
716 |
gr.Textbox(label="Target Key", value="C", lines=1)
|
@@ -720,6 +547,19 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
720 |
description="Correct vocal pitch automatically"
|
721 |
)
|
722 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
723 |
# --- Create Karaoke Video from Audio + Lyrics ===
|
724 |
with gr.Tab("📹 Create Karaoke Video"):
|
725 |
gr.Interface(
|
@@ -734,111 +574,49 @@ with gr.Blocks(title="AI Audio Studio", css="style.css") as demo:
|
|
734 |
description="Generate karaoke-style videos with real-time sync."
|
735 |
)
|
736 |
|
737 |
-
# ---
|
738 |
-
with gr.Tab("
|
739 |
gr.Interface(
|
740 |
-
fn=
|
741 |
inputs=[
|
742 |
-
gr.
|
743 |
-
gr.
|
744 |
-
gr.
|
745 |
-
gr.Dropdown(choices=preset_names, label="Select Vocal Preset", value=preset_names[0]),
|
746 |
-
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3")
|
747 |
-
],
|
748 |
-
outputs=[
|
749 |
-
gr.Audio(label="Processed Vocal", type="filepath"),
|
750 |
-
gr.Image(label="Waveform Preview"),
|
751 |
-
gr.Textbox(label="Session Log (JSON)", lines=5),
|
752 |
-
gr.Textbox(label="Detected Genre", lines=1),
|
753 |
-
gr.Textbox(label="Status", value="✅ Ready", lines=1)
|
754 |
],
|
755 |
-
|
756 |
-
|
757 |
-
|
758 |
)
|
759 |
|
760 |
-
# --- Voice Cloning (Local Only) ===
|
761 |
-
with gr.Tab("🎭 Voice Cloning (Local Only)"):
|
762 |
gr.Interface(
|
763 |
-
fn=
|
764 |
-
inputs=
|
765 |
-
|
766 |
-
gr.
|
767 |
-
gr.
|
768 |
],
|
769 |
-
|
770 |
-
|
771 |
-
description="Clone voice from source to target speaker using AI"
|
772 |
)
|
773 |
|
774 |
-
# ---
|
775 |
-
|
776 |
-
with gr.Tab("🧏♂️ Who Spoke When?"):
|
777 |
-
gr.Interface(
|
778 |
-
fn=diarize_and_transcribe,
|
779 |
-
inputs=gr.Audio(label="Upload Interview/Podcast", type="filepath"),
|
780 |
-
outputs=gr.JSON(label="Diarized Transcript"),
|
781 |
-
title="Split By Speaker + Transcribe",
|
782 |
-
description="Detect speakers and transcribe their speech automatically."
|
783 |
-
)
|
784 |
-
|
785 |
-
# --- TTS Voice Generator ===
|
786 |
-
with gr.Tab("💬 TTS Voice Generator"):
|
787 |
gr.Interface(
|
788 |
-
fn=
|
789 |
-
inputs=gr.
|
790 |
-
outputs=gr.Audio(label="
|
791 |
-
title="
|
792 |
-
description="
|
793 |
-
)
|
794 |
-
|
795 |
-
# --- Auto-Save / Resume Sessions ===
|
796 |
-
session_state = gr.State()
|
797 |
-
|
798 |
-
def save_or_resume_session(audio, preset, effects, action="save"):
|
799 |
-
if action == "save":
|
800 |
-
return {"audio": audio, "preset": preset, "effects": effects}, None, None, None
|
801 |
-
elif action == "load" and isinstance(audio, dict):
|
802 |
-
return (
|
803 |
-
None,
|
804 |
-
audio.get("audio"),
|
805 |
-
audio.get("preset"),
|
806 |
-
audio.get("effects")
|
807 |
-
)
|
808 |
-
return None, None, None, None
|
809 |
-
|
810 |
-
with gr.Tab("🧾 Auto-Save & Resume"):
|
811 |
-
gr.Markdown("Save your current state and resume later.")
|
812 |
-
|
813 |
-
action_radio = gr.Radio(["save", "load"], label="Action", value="save")
|
814 |
-
audio_input = gr.Audio(label="Upload or Load Audio", type="filepath")
|
815 |
-
preset_dropdown = gr.Dropdown(choices=preset_names, label="Used Preset", value=preset_names[0] if preset_names else None)
|
816 |
-
effect_checkbox = gr.CheckboxGroup(choices=effect_options, label="Applied Effects")
|
817 |
-
action_btn = gr.Button("Save or Load Session")
|
818 |
-
|
819 |
-
session_data = gr.State()
|
820 |
-
loaded_audio = gr.Audio(label="Loaded Audio", type="filepath")
|
821 |
-
loaded_preset = gr.Dropdown(choices=preset_names, label="Loaded Preset")
|
822 |
-
loaded_effects = gr.CheckboxGroup(choices=effect_options, label="Loaded Effects")
|
823 |
-
|
824 |
-
action_btn.click(
|
825 |
-
fn=save_or_resume_session,
|
826 |
-
inputs=[audio_input, preset_dropdown, effect_checkbox, action_radio],
|
827 |
-
outputs=[session_data, loaded_audio, loaded_preset, loaded_effects]
|
828 |
)
|
829 |
|
830 |
-
# ---
|
831 |
-
with gr.Tab("
|
832 |
gr.Interface(
|
833 |
-
fn=
|
834 |
-
inputs=
|
835 |
-
|
836 |
-
|
837 |
-
|
838 |
-
],
|
839 |
-
outputs=gr.File(label="Trimmed Output"),
|
840 |
-
title="Auto-Detect & Remove Silence",
|
841 |
-
description="Detect and trim silence at start/end or between words"
|
842 |
)
|
843 |
|
844 |
demo.launch()
|
|
|
138 |
adjusted.export(out_path, format="wav")
|
139 |
return out_path
|
140 |
|
141 |
+
# === AI Vocal Pitch Correction – Auto-Tune Style ===
|
142 |
+
def auto_tune_vocal(audio_path, target_key="C"):
|
143 |
+
try:
|
144 |
+
# Placeholder for real-time pitch detection
|
145 |
+
semitones = 0.2
|
146 |
+
return apply_pitch_shift(AudioSegment.from_file(audio_path), semitones)
|
147 |
+
except Exception as e:
|
148 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
# === Real-Time EQ with Curve Drawing ===
|
151 |
+
def draw_eq_curve(freqs, gains):
|
152 |
+
fig, ax = plt.subplots(figsize=(10, 4))
|
153 |
+
ax.plot(freqs, gains, color='blue', lw=2)
|
154 |
+
ax.set_xscale('log')
|
155 |
+
ax.set_title("EQ Curve")
|
156 |
+
ax.set_xlabel("Frequency (Hz)")
|
157 |
+
ax.set_ylabel("Gain (dB)")
|
158 |
+
buf = BytesIO()
|
159 |
+
plt.savefig(buf, format="png")
|
160 |
+
plt.close()
|
161 |
+
buf.seek(0)
|
162 |
+
return Image.open(buf)
|
163 |
|
164 |
+
# === Create Karaoke Video from Audio + Lyrics ===
|
165 |
+
def create_karaoke_video(audio_path, lyrics, bg_image=None):
|
166 |
+
try:
|
167 |
+
from moviepy.editor import TextClip, CompositeVideoClip, ColorClip, AudioFileClip
|
168 |
|
169 |
+
audio = AudioFileClip(audio_path)
|
170 |
+
video = ColorClip(size=(1280, 720), color=(0, 0, 0), duration=audio.duration_seconds)
|
171 |
+
words = [(word.strip(), i * 3, (i+1)*3) for i, word in enumerate(lyrics.split())]
|
172 |
|
173 |
+
text_clips = [
|
174 |
+
TextClip(word, fontsize=60, color='white').set_position('center').set_duration(end - start).set_start(start)
|
175 |
+
for word, start, end in words
|
176 |
+
]
|
177 |
|
178 |
+
final_video = CompositeVideoClip([video] + text_clips).set_audio(audio)
|
179 |
+
out_path = os.path.join(tempfile.gettempdir(), "karaoke.mp4")
|
180 |
+
final_video.write_videofile(out_path, codec="libx264", audio_codec="aac")
|
181 |
+
return out_path
|
182 |
+
except Exception as e:
|
183 |
+
return f"⚠️ Failed: {str(e)}"
|
184 |
|
185 |
+
# === Save/Load Project File (.aiproj) ===
|
186 |
+
def save_project(audio_path, preset_name, effects):
|
187 |
+
project_data = {
|
188 |
+
"audio": AudioSegment.from_file(audio_path).raw_data,
|
189 |
+
"preset": preset_name,
|
190 |
+
"effects": effects
|
191 |
+
}
|
192 |
+
out_path = os.path.join(tempfile.gettempdir(), "project.aiproj")
|
193 |
+
with open(out_path, "wb") as f:
|
194 |
+
pickle.dump(project_data, f)
|
195 |
+
return out_path
|
196 |
|
197 |
+
def load_project(project_file):
|
198 |
+
with open(project_file.name, "rb") as f:
|
199 |
+
data = pickle.load(f)
|
200 |
+
return data["preset"], data["effects"]
|
201 |
|
202 |
+
# === Vocal Doubler / Harmonizer ===
|
203 |
+
def vocal_doubler(audio):
|
204 |
+
shifted_up = apply_pitch_shift(audio, 0.3)
|
205 |
+
shifted_down = apply_pitch_shift(audio, -0.3)
|
206 |
+
return audio.overlay(shifted_up).overlay(shifted_down)
|
207 |
|
208 |
+
# === Genre Detection + Preset Suggestions ===
|
209 |
def suggest_preset_by_genre(audio_path):
|
210 |
try:
|
211 |
y, sr = torchaudio.load(audio_path)
|
212 |
mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1)
|
213 |
genre = "Pop"
|
214 |
+
return ["Vocal Clarity", "Limiter", "Stereo Expansion"]
|
215 |
except Exception:
|
216 |
+
return ["Default"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
# === Vocal Isolation Helpers ===
|
219 |
def load_track_local(path, sample_rate, channels=2):
|
|
|
288 |
"ASMR Creator": ["Noise Gate", "Auto Gain", "Low-Pass Filter"],
|
289 |
"Voiceover Pro": ["Vocal Isolation", "TTS", "EQ Match"],
|
290 |
"8-bit Retro": ["Bitcrusher", "Echo", "Mono Downmix"],
|
|
|
|
|
291 |
"🎙 Clean Vocal": ["Noise Reduction", "Normalize", "High Pass Filter (80Hz)"],
|
292 |
"🧪 Vocal Distortion": ["Vocal Distortion", "Reverb", "Compress Dynamic Range"],
|
293 |
"🎶 Singer's Harmony": ["Harmony", "Stereo Widening", "Pitch Shift"],
|
|
|
432 |
tts.tts_to_file(text=text, file_path=out_path)
|
433 |
return out_path
|
434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
# === UI ===
|
436 |
effect_options = [
|
437 |
"Noise Reduction",
|
|
|
534 |
description="Ensure consistent loudness across tracks using industry-standard normalization."
|
535 |
)
|
536 |
|
537 |
+
# --- AI Vocal Pitch Correction (Auto-Tune) ===
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
538 |
with gr.Tab("🧬 Vocal Pitch Correction"):
|
539 |
gr.Interface(
|
540 |
+
fn=auto_tune_vocal,
|
541 |
inputs=[
|
542 |
gr.Audio(label="Upload Vocal Clip", type="filepath"),
|
543 |
gr.Textbox(label="Target Key", value="C", lines=1)
|
|
|
547 |
description="Correct vocal pitch automatically"
|
548 |
)
|
549 |
|
550 |
+
# --- Real-Time EQ Curve Drawing ===
|
551 |
+
with gr.Tab("🎛 Draw Custom EQ Curve"):
|
552 |
+
gr.Interface(
|
553 |
+
fn=draw_eq_curve,
|
554 |
+
inputs=[
|
555 |
+
gr.Slider(minimum=20, maximum=20000, value=[20, 20000], label="Freq Range (Hz)"),
|
556 |
+
gr.Slider(minimum=-12, maximum=12, value=0, label="Gain (dB)"),
|
557 |
+
],
|
558 |
+
outputs=gr.Image(label="EQ Curve"),
|
559 |
+
title="Draw Your Own Frequency Curve",
|
560 |
+
description="Customize your sound with visual EQ curve drawing."
|
561 |
+
)
|
562 |
+
|
563 |
# --- Create Karaoke Video from Audio + Lyrics ===
|
564 |
with gr.Tab("📹 Create Karaoke Video"):
|
565 |
gr.Interface(
|
|
|
574 |
description="Generate karaoke-style videos with real-time sync."
|
575 |
)
|
576 |
|
577 |
+
# --- Save/Load Project File (.aiproj) ===
|
578 |
+
with gr.Tab("📁 Save/Load Project"):
|
579 |
gr.Interface(
|
580 |
+
fn=save_project,
|
581 |
inputs=[
|
582 |
+
gr.File(label="Original Audio"),
|
583 |
+
gr.Dropdown(choices=preset_names, label="Used Preset", value=preset_names[0]),
|
584 |
+
gr.CheckboxGroup(choices=effect_options, label="Applied Effects")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
585 |
],
|
586 |
+
outputs=gr.File(label="Project File (.aiproj)"),
|
587 |
+
title="Save Everything Together",
|
588 |
+
description="Save your session, effects, and settings in one file to reuse later."
|
589 |
)
|
590 |
|
|
|
|
|
591 |
gr.Interface(
|
592 |
+
fn=load_project,
|
593 |
+
inputs=gr.File(label="Upload .aiproj File"),
|
594 |
+
outputs=[
|
595 |
+
gr.Dropdown(choices=preset_names, label="Loaded Preset"),
|
596 |
+
gr.CheckboxGroup(choices=effect_options, label="Loaded Effects")
|
597 |
],
|
598 |
+
title="Resume Last Project",
|
599 |
+
description="Load your saved session"
|
|
|
600 |
)
|
601 |
|
602 |
+
# --- Vocal Doubler / Harmonizer ===
|
603 |
+
with gr.Tab("🎧 Vocal Doubler / Harmonizer"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
604 |
gr.Interface(
|
605 |
+
fn=vocal_doubler,
|
606 |
+
inputs=gr.Audio(label="Upload Vocal Clip", type="filepath"),
|
607 |
+
outputs=gr.Audio(label="Doubled Output", type="filepath"),
|
608 |
+
title="Add Vocal Doubling / Harmony",
|
609 |
+
description="Enhance vocals with doubling or harmony"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
610 |
)
|
611 |
|
612 |
+
# --- AI Suggest Preset Based on Genre ===
|
613 |
+
with gr.Tab("🧠 AI Suggest Preset"):
|
614 |
gr.Interface(
|
615 |
+
fn=suggest_preset_by_genre,
|
616 |
+
inputs=gr.Audio(label="Upload Track", type="filepath"),
|
617 |
+
outputs=gr.Dropdown(choices=preset_names, label="Recommended Preset"),
|
618 |
+
title="AI Recommends Best Preset",
|
619 |
+
description="Upload a track and let AI recommend the best preset based on genre."
|
|
|
|
|
|
|
|
|
620 |
)
|
621 |
|
622 |
demo.launch()
|