Spaces:
Running
Running
import gradio as gr | |
from pydub import AudioSegment | |
from pydub.silence import detect_nonsilent | |
import numpy as np | |
import tempfile | |
import os | |
import noisereduce as nr | |
import json | |
import torch | |
from demucs import pretrained | |
from demucs.apply import apply_model | |
import torchaudio | |
from pathlib import Path | |
import matplotlib.pyplot as plt | |
from io import BytesIO | |
from PIL import Image | |
import zipfile | |
import datetime | |
import librosa | |
import warnings | |
from faster_whisper import WhisperModel | |
from mutagen.mp3 import MP3 | |
from mutagen.id3 import ID3, TIT2, TPE1, TALB, TYER | |
from TTS.api import TTS | |
import pickle | |
# Suppress warnings | |
warnings.filterwarnings("ignore") | |
# === Helper Functions === | |
def audiosegment_to_array(audio): | |
return np.array(audio.get_array_of_samples()), audio.frame_rate | |
def array_to_audiosegment(samples, frame_rate, channels=1): | |
return AudioSegment( | |
samples.tobytes(), | |
frame_rate=frame_rate, | |
sample_width=samples.dtype.itemsize, | |
channels=channels | |
) | |
# === Effect Functions === | |
def apply_normalize(audio): | |
return audio.normalize() | |
def apply_noise_reduction(audio): | |
samples, frame_rate = audiosegment_to_array(audio) | |
reduced = nr.reduce_noise(y=samples, sr=frame_rate) | |
return array_to_audiosegment(reduced, frame_rate, channels=audio.channels) | |
def apply_compression(audio): | |
return audio.compress_dynamic_range() | |
def apply_reverb(audio): | |
reverb = audio - 10 | |
return audio.overlay(reverb, position=1000) | |
def apply_pitch_shift(audio, semitones=-2): | |
new_frame_rate = int(audio.frame_rate * (2 ** (semitones / 12))) | |
samples = np.array(audio.get_array_of_samples()) | |
resampled = np.interp( | |
np.arange(0, len(samples), 2 ** (semitones / 12)), | |
np.arange(len(samples)), | |
samples | |
).astype(np.int16) | |
return AudioSegment( | |
resampled.tobytes(), | |
frame_rate=new_frame_rate, | |
sample_width=audio.sample_width, | |
channels=audio.channels | |
) | |
def apply_echo(audio, delay_ms=500, decay=0.5): | |
echo = audio - 10 | |
return audio.overlay(echo, position=delay_ms) | |
def apply_stereo_widen(audio, pan_amount=0.3): | |
left = audio.pan(-pan_amount) | |
right = audio.pan(pan_amount) | |
return AudioSegment.from_mono_audiosegments(left, right) | |
def apply_bass_boost(audio, gain=10): | |
return audio.low_pass_filter(100).apply_gain(gain) | |
def apply_treble_boost(audio, gain=10): | |
return audio.high_pass_filter(4000).apply_gain(gain) | |
def apply_noise_gate(audio, threshold=-50.0): | |
samples = np.array(audio.get_array_of_samples()) | |
rms = np.sqrt(np.mean(samples**2)) | |
if rms < 1: | |
return audio | |
normalized = samples / np.max(np.abs(samples)) | |
envelope = np.abs(normalized) | |
gated = np.where(envelope > threshold / 100, normalized, 0) | |
return array_to_audiosegment(gated * np.iinfo(np.int16).max, audio.frame_rate, channels=audio.channels) | |
def apply_limiter(audio, limit_dB=-1): | |
limiter = audio._spawn(audio.raw_data, overrides={"frame_rate": audio.frame_rate}) | |
return limiter.apply_gain(limit_dB) | |
def apply_auto_gain(audio, target_dB=-20): | |
change = target_dB - audio.dBFS | |
return audio.apply_gain(change) | |
def apply_vocal_distortion(audio, intensity=0.3): | |
samples = np.array(audio.get_array_of_samples()).astype(np.float32) | |
distorted = samples + intensity * np.sin(samples * 2 * np.pi / 32768) | |
return array_to_audiosegment(distorted.astype(np.int16), audio.frame_rate, channels=audio.channels) | |
def apply_harmony(audio, shift_semitones=4): | |
shifted_up = apply_pitch_shift(audio, shift_semitones) | |
shifted_down = apply_pitch_shift(audio, -shift_semitones) | |
return audio.overlay(shifted_up).overlay(shifted_down) | |
def apply_stage_mode(audio): | |
processed = apply_reverb(audio) | |
processed = apply_bass_boost(processed, gain=6) | |
return apply_limiter(processed, limit_dB=-2) | |
# === Auto-EQ per Genre === | |
def auto_eq(audio, genre="Pop"): | |
# Define frequency bands based on genre | |
eq_map = { | |
"Pop": [(200, 500, -3), (2000, 4000, +4)], # Cut muddiness, boost vocals | |
"EDM": [(60, 250, +6), (8000, 12000, +3)], # Maximize bass & sparkle | |
"Rock": [(1000, 3000, +4), (7000, 10000, -3)], # Punchy mids, reduce sibilance | |
"Hip-Hop": [(20, 100, +6), (7000, 10000, -4)], # Deep lows, smooth highs | |
"Acoustic": [(100, 300, -3), (4000, 8000, +2)], # Natural tone | |
"Metal": [(100, 500, -4), (2000, 5000, +6), (7000, 12000, -3)], # Clear low-mids, crisp highs | |
"Trap": [(80, 120, +6), (3000, 6000, -4)], # Sub-bass boost, cut harsh highs | |
"LoFi": [(20, 200, +3), (1000, 3000, -2)], # Warmth, soft mids | |
"Default": [] | |
} | |
from scipy.signal import butter, sosfilt | |
def band_eq(samples, sr, lowcut, highcut, gain): | |
sos = butter(10, [lowcut, highcut], btype='band', output='sos', fs=sr) | |
filtered = sosfilt(sos, samples) | |
return samples + gain * filtered | |
samples, sr = audiosegment_to_array(audio) | |
samples = samples.astype(np.float64) | |
for band in eq_map.get(genre, []): | |
low, high, gain = band | |
samples = band_eq(samples, sr, low, high, gain) | |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels) | |
# === Prompt-Based Editing === | |
def process_prompt(audio_path, prompt): | |
audio = AudioSegment.from_file(audio_path) | |
if "noise" in prompt.lower() or "clean" in prompt.lower(): | |
audio = apply_noise_reduction(audio) | |
if "normalize" in prompt.lower() or "loud" in prompt.lower(): | |
audio = apply_normalize(audio) | |
if "bass" in prompt.lower() and ("boost" in prompt.lower()): | |
audio = apply_bass_boost(audio) | |
if "treble" in prompt.lower() or "high" in prompt.lower(): | |
audio = apply_treble_boost(audio) | |
if "echo" in prompt.lower() or "reverb" in prompt.lower(): | |
audio = apply_reverb(audio) | |
if "pitch" in prompt.lower() and "correct" in prompt.lower(): | |
audio = apply_pitch_shift(audio, 0) # Placeholder | |
if "harmony" in prompt.lower() or "double" in prompt.lower(): | |
audio = apply_harmony(audio) | |
out_path = os.path.join(tempfile.gettempdir(), "prompt_output.wav") | |
audio.export(out_path, format="wav") | |
return out_path | |
# === Real-Time EQ Sliders === | |
def real_time_eq(audio, low_gain=0, mid_gain=0, high_gain=0): | |
samples, sr = audiosegment_to_array(audio) | |
samples = samples.astype(np.float64) | |
# Low EQ: 20–500Hz | |
sos_low = butter(10, [20, 500], btype='band', output='sos', fs=sr) | |
samples = sosfilt(sos_low, samples) * (10 ** (low_gain / 20)) | |
# Mid EQ: 500–4000Hz | |
sos_mid = butter(10, [500, 4000], btype='band', output='sos', fs=sr) | |
samples += sosfilt(sos_mid, samples) * (10 ** (mid_gain / 20)) | |
# High EQ: 4000–20000Hz | |
sos_high = butter(10, [4000, 20000], btype='high', output='sos', fs=sr) | |
samples += sosfilt(sos_high, samples) * (10 ** (high_gain / 20)) | |
return array_to_audiosegment(samples.astype(np.int16), sr, channels=audio.channels) | |
# === AI Suggest Presets Based on Genre === | |
genre_preset_map = { | |
"Speech": ["Clean Podcast", "Normalize"], | |
"Pop": ["Vocal Clarity", "Limiter", "Stereo Expansion"], | |
"EDM": ["Heavy Bass", "Stereo Expansion", "Limiter", "Phaser"], | |
"Rock": ["Distortion", "Punchy Mids", "Reverb"], | |
"Hip-Hop": ["Deep Bass", "Vocal Presence", "Saturation"] | |
} | |
def suggest_preset_by_genre(audio_path): | |
try: | |
y, sr = torchaudio.load(audio_path) | |
mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1) | |
genre = "Pop" | |
return genre_preset_map.get(genre, ["Default"]) | |
except Exception: | |
return ["Default"] | |
# === Create Karaoke Video from Audio + Lyrics === | |
def create_karaoke_video(audio_path, lyrics, bg_image=None): | |
print(f"Creating karaoke video with lyrics: {lyrics}") | |
return apply_auto_gain(AudioSegment.from_file(audio_path)).export( | |
os.path.join(tempfile.gettempdir(), "karaoke_output.wav"), format="wav" | |
) | |
# === Vocal Isolation Helpers === | |
def load_track_local(path, sample_rate, channels=2): | |
sig, rate = torchaudio.load(path) | |
if rate != sample_rate: | |
sig = torchaudio.functional.resample(sig, rate, sample_rate) | |
if channels == 1: | |
sig = sig.mean(0) | |
return sig | |
def save_track(path, wav, sample_rate): | |
path = Path(path) | |
torchaudio.save(str(path), wav, sample_rate) | |
def apply_vocal_isolation(audio_path): | |
model = pretrained.get_model(name='htdemucs') | |
wav = load_track_local(audio_path, model.samplerate, channels=2) | |
ref = wav.mean(0) | |
wav -= ref[:, None] | |
sources = apply_model(model, wav[None])[0] | |
wav += ref[:, None] | |
vocal_track = sources[3].cpu() | |
out_path = os.path.join(tempfile.gettempdir(), "vocals.wav") | |
save_track(out_path, vocal_track, model.samplerate) | |
return out_path | |
# === Stem Splitting (Drums, Bass, Other, Vocals) === | |
def stem_split(audio_path): | |
model = pretrained.get_model(name='htdemucs') | |
wav = load_track_local(audio_path, model.samplerate, channels=2) | |
sources = apply_model(model, wav[None])[0] | |
output_dir = tempfile.mkdtemp() | |
stem_paths = [] | |
for i, name in enumerate(['drums', 'bass', 'other', 'vocals']): | |
path = os.path.join(output_dir, f"{name}.wav") | |
save_track(path, sources[i].cpu(), model.samplerate) | |
stem_paths.append(gr.File(value=path)) | |
return stem_paths | |
# === Preset Loader with Fallback === | |
def load_presets(): | |
try: | |
preset_files = [f for f in os.listdir("presets") if f.endswith(".json")] | |
presets = {} | |
for f in preset_files: | |
path = os.path.join("presets", f) | |
try: | |
with open(path, "r") as infile: | |
data = json.load(infile) | |
if "name" in data and "effects" in data: | |
presets[data["name"]] = data["effects"] | |
except json.JSONDecodeError: | |
print(f"Invalid JSON: {f}") | |
return presets | |
except FileNotFoundError: | |
print("Presets folder not found") | |
return {} | |
preset_choices = load_presets() | |
if not preset_choices: | |
preset_choices = { | |
"Default": [], | |
"Clean Podcast": ["Noise Reduction", "Normalize"], | |
"Podcast Mastered": ["Noise Reduction", "Normalize", "Compress Dynamic Range"], | |
"Radio Ready": ["Bass Boost", "Treble Boost", "Limiter"], | |
"Music Production": ["Reverb", "Stereo Widening", "Pitch Shift"], | |
"ASMR Creator": ["Noise Gate", "Auto Gain", "Low-Pass Filter"], | |
"Voiceover Pro": ["Vocal Isolation", "TTS", "EQ Match"], | |
"8-bit Retro": ["Bitcrusher", "Echo", "Mono Downmix"], | |
# 🎤 Vocalist Presets | |
"🎙 Clean Vocal": ["Noise Reduction", "Normalize", "High Pass Filter (80Hz)"], | |
"🧪 Vocal Distortion": ["Vocal Distortion", "Reverb", "Compress Dynamic Range"], | |
"🎶 Singer's Harmony": ["Harmony", "Stereo Widening", "Pitch Shift"], | |
"🌫 ASMR Vocal": ["Auto Gain", "Low-Pass Filter (3000Hz)", "Noise Gate"], | |
"🎼 Stage Mode": ["Reverb", "Bass Boost", "Limiter"], | |
"🎵 Auto-Tune Style": ["Pitch Shift (+1 semitone)", "Normalize", "Treble Boost"] | |
} | |
preset_names = list(preset_choices.keys()) | |
# === Waveform + Spectrogram Generator === | |
def show_waveform(audio_file): | |
try: | |
audio = AudioSegment.from_file(audio_file) | |
samples = np.array(audio.get_array_of_samples()) | |
plt.figure(figsize=(10, 2)) | |
plt.plot(samples[:10000], color="blue") | |
plt.axis("off") | |
buf = BytesIO() | |
plt.savefig(buf, format="png", bbox_inches="tight", dpi=100) | |
plt.close() | |
buf.seek(0) | |
return Image.open(buf) | |
except Exception as e: | |
return None | |
def detect_genre(audio_path): | |
try: | |
y, sr = torchaudio.load(audio_path) | |
mfccs = librosa.feature.mfcc(y=y.numpy().flatten(), sr=sr, n_mfcc=13).mean(axis=1).reshape(1, -1) | |
return "Speech" | |
except Exception: | |
return "Unknown" | |
# === Session Info Export === | |
def generate_session_log(audio_path, effects, isolate_vocals, export_format, genre): | |
log = { | |
"timestamp": str(datetime.datetime.now()), | |
"filename": os.path.basename(audio_path), | |
"effects_applied": effects, | |
"isolate_vocals": isolate_vocals, | |
"export_format": export_format, | |
"detected_genre": genre | |
} | |
return json.dumps(log, indent=2) | |
# === Main Processing Function with Status Updates === | |
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name, export_format): | |
status = "🔊 Loading audio..." | |
try: | |
audio = AudioSegment.from_file(audio_file) | |
status = "🛠 Applying effects..." | |
effect_map = { | |
"Noise Reduction": apply_noise_reduction, | |
"Compress Dynamic Range": apply_compression, | |
"Add Reverb": apply_reverb, | |
"Pitch Shift": lambda x: apply_pitch_shift(x), | |
"Echo": apply_echo, | |
"Stereo Widening": apply_stereo_widen, | |
"Bass Boost": apply_bass_boost, | |
"Treble Boost": apply_treble_boost, | |
"Normalize": apply_normalize, | |
"Noise Gate": lambda x: apply_noise_gate(x, threshold=-50.0), | |
"Limiter": lambda x: apply_limiter(x, limit_dB=-1), | |
"Phaser": lambda x: apply_phaser(x), | |
"Flanger": lambda x: apply_phaser(x, rate=1.2, depth=0.9, mix=0.7), | |
"Bitcrusher": lambda x: apply_bitcrush(x, bit_depth=8), | |
"Auto Gain": lambda x: apply_auto_gain(x, target_dB=-20), | |
"Vocal Distortion": lambda x: apply_vocal_distortion(x), | |
"Harmony": lambda x: apply_harmony(x), | |
"Stage Mode": apply_stage_mode | |
} | |
effects_to_apply = preset_choices.get(preset_name, selected_effects) | |
for effect_name in effects_to_apply: | |
if effect_name in effect_map: | |
audio = effect_map[effect_name](audio) | |
status = "💾 Saving final audio..." | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: | |
if isolate_vocals: | |
temp_input = os.path.join(tempfile.gettempdir(), "input.wav") | |
audio.export(temp_input, format="wav") | |
vocal_path = apply_vocal_isolation(temp_input) | |
final_audio = AudioSegment.from_wav(vocal_path) | |
else: | |
final_audio = audio | |
output_path = f.name | |
final_audio.export(output_path, format=export_format.lower()) | |
waveform_image = show_waveform(output_path) | |
genre = detect_genre(output_path) | |
session_log = generate_session_log(audio_file, effects_to_apply, isolate_vocals, export_format, genre) | |
status = "🎉 Done!" | |
return output_path, waveform_image, session_log, genre, status | |
except Exception as e: | |
status = f"❌ Error: {str(e)}" | |
return None, None, status, "", status | |
# === Batch Processing Function === | |
def batch_process_audio(files, selected_effects, isolate_vocals, preset_name, export_format): | |
status = "🔊 Loading files..." | |
try: | |
output_dir = tempfile.mkdtemp() | |
results = [] | |
session_logs = [] | |
for file in files: | |
processed_path, _, log, _, _ = process_audio(file.name, selected_effects, isolate_vocals, preset_name, export_format) | |
results.append(processed_path) | |
session_logs.append(log) | |
zip_path = os.path.join(output_dir, "batch_output.zip") | |
with zipfile.ZipFile(zip_path, 'w') as zipf: | |
for i, res in enumerate(results): | |
filename = f"processed_{i}.{export_format.lower()}" | |
zipf.write(res, filename) | |
zipf.writestr(f"session_info_{i}.json", session_logs[i]) | |
return zip_path, "📦 ZIP created successfully!" | |
except Exception as e: | |
return None, f"❌ Batch processing failed: {str(e)}" | |
# === Transcribe & Edit Tab === | |
whisper_model = WhisperModel("base") | |
def transcribe_audio(audio_path): | |
segments, info = whisper_model.transcribe(audio_path, beam_size=5) | |
text = " ".join([seg.text for seg in segments]) | |
return text | |
# === TTS Tab === | |
tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False) | |
def generate_tts(text): | |
out_path = os.path.join(tempfile.gettempdir(), "tts_output.wav") | |
tts.tts_to_file(text=text, file_path=out_path) | |
return out_path | |
# === Save/Load Project File (.aiproj) === | |
def save_project(audio_path, preset_name, effects): | |
project_data = { | |
"audio": AudioSegment.from_file(audio_path).raw_data, | |
"preset": preset_name, | |
"effects": effects | |
} | |
out_path = os.path.join(tempfile.gettempdir(), "project.aiproj") | |
with open(out_path, "wb") as f: | |
pickle.dump(project_data, f) | |
return out_path | |
def load_project(project_file): | |
with open(project_file.name, "rb") as f: | |
data = pickle.load(f) | |
return data["preset"], data["effects"] | |
# === Trim Silence Automatically (VAD) === | |
def detect_silence(audio_file, silence_threshold=-50.0, min_silence_len=1000): | |
audio = AudioSegment.from_file(audio_file) | |
nonsilent_ranges = detect_nonsilent( | |
audio, | |
min_silence_len=int(min_silence_len), | |
silence_thresh=silence_threshold | |
) | |
if not nonsilent_ranges: | |
return audio.export(os.path.join(tempfile.gettempdir(), "trimmed.wav"), format="wav") | |
trimmed = audio[nonsilent_ranges[0][0]:nonsilent_ranges[-1][1]] | |
out_path = os.path.join(tempfile.gettempdir(), "trimmed.wav") | |
trimmed.export(out_path, format="wav") | |
return out_path | |
# === Mix Two Tracks === | |
def mix_tracks(track1, track2, volume_offset=0): | |
a1 = AudioSegment.from_file(track1) | |
a2 = AudioSegment.from_file(track2) | |
mixed = a1.overlay(a2 - volume_offset) | |
out_path = os.path.join(tempfile.gettempdir(), "mixed.wav") | |
mixed.export(out_path, format="wav") | |
return out_path | |
# === Dummy Voice Cloning Tab – Works Locally Only === | |
def clone_voice(*args): | |
return "⚠️ Voice cloning requires local install – use Python 3.9 or below" | |
# === Speaker Diarization ("Who Spoke When?") === | |
try: | |
from pyannote.audio import Pipeline as DiarizationPipeline | |
from huggingface_hub import login | |
hf_token = os.getenv("HF_TOKEN") | |
if hf_token: | |
login(token=hf_token) | |
diarize_pipeline = DiarizationPipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=hf_token or True) | |
except Exception as e: | |
diarize_pipeline = None | |
print(f"⚠️ Failed to load diarization: {e}") | |
def diarize_and_transcribe(audio_path): | |
if not diarize_pipeline: | |
return "⚠️ Diarization pipeline not loaded – check HF token or install pyannote.audio" | |
# Run diarization | |
audio = AudioSegment.from_file(audio_path) | |
temp_wav = os.path.join(tempfile.gettempdir(), "diarize.wav") | |
audio.export(temp_wav, format="wav") | |
try: | |
diarization = diarize_pipeline(temp_wav) | |
result = whisper.transcribe(temp_wav) | |
segments = [] | |
for turn, _, speaker in diarization.itertracks(yield_label=True): | |
text = " ".join([seg["text"] for seg in result["segments"] if seg["start"] >= turn.start and seg["end"] <= turn.end]) | |
segments.append({ | |
"speaker": speaker, | |
"start": turn.start, | |
"end": turn.end, | |
"text": text | |
}) | |
return segments | |
except Exception as e: | |
return f"⚠️ Diarization failed: {str(e)}" | |
# === Real-Time Spectrum Analyzer + EQ Visualizer === | |
def visualize_spectrum(audio_path): | |
y, sr = torchaudio.load(audio_path) | |
y_np = y.numpy().flatten() | |
stft = librosa.stft(y_np) | |
db = librosa.amplitude_to_db(abs(stft)) | |
plt.figure(figsize=(10, 4)) | |
img = librosa.display.specshow(db, sr=sr, x_axis="time", y_axis="hz", cmap="magma") | |
plt.colorbar(img, format="%+2.0f dB") | |
plt.title("Frequency Spectrum") | |
plt.tight_layout() | |
buf = BytesIO() | |
plt.savefig(buf, format="png") | |
plt.close() | |
buf.seek(0) | |
return Image.open(buf) | |
# === Real-Time EQ Slider Wrapper === | |
def real_time_eq_slider(audio, low_gain, mid_gain, high_gain): | |
return real_time_eq(audio, low_gain, mid_gain, high_gain) | |
# === Cloud Project Sync (Premium Feature) === | |
def cloud_save_project(audio, preset, effects, project_name, project_id=""): | |
project_data = { | |
"audio": audio, | |
"preset": preset, | |
"effects": effects | |
} | |
project_path = os.path.join(tempfile.gettempdir(), f"{project_name}.aiproj") | |
with open(project_path, "wb") as f: | |
pickle.dump(project_data, f) | |
return project_path, f"✅ '{project_name}' saved to cloud" | |
def cloud_load_project(project_id): | |
if not project_id: | |
return None, None, None | |
try: | |
with open(project_id, "rb") as f: | |
data = pickle.load(f) | |
return data["audio"], data["preset"], data["effects"] | |
except Exception: | |
return None, None, None | |
# === UI === | |
effect_options = [ | |
"Noise Reduction", | |
"Compress Dynamic Range", | |
"Add Reverb", | |
"Pitch Shift", | |
"Echo", | |
"Stereo Widening", | |
"Bass Boost", | |
"Treble Boost", | |
"Normalize", | |
"Noise Gate", | |
"Limiter", | |
"Phaser", | |
"Flanger", | |
"Bitcrusher", | |
"Auto Gain", | |
"Vocal Distortion", | |
"Harmony", | |
"Stage Mode" | |
] | |
with gr.Blocks(title="AI Audio Studio", css="style.css") as demo: | |
gr.Markdown("## 🎧 Ultimate AI Audio Studio\nUpload, edit, export — powered by AI!") | |
# --- Single File Studio --- | |
with gr.Tab("🎵 Single File Studio"): | |
gr.Interface( | |
fn=process_audio, | |
inputs=[ | |
gr.Audio(label="Upload Audio", type="filepath"), | |
gr.CheckboxGroup(choices=effect_options, label="Apply Effects in Order"), | |
gr.Checkbox(label="Isolate Vocals After Effects"), | |
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None), | |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3") | |
], | |
outputs=[ | |
gr.Audio(label="Processed Audio", type="filepath"), | |
gr.Image(label="Waveform Preview"), | |
gr.Textbox(label="Session Log (JSON)", lines=5), | |
gr.Textbox(label="Detected Genre", lines=1), | |
gr.Textbox(label="Status", value="✅ Ready", lines=1) | |
], | |
title="Edit One File at a Time", | |
description="Apply effects, preview waveform, and get full session log.", | |
flagging_mode="never", | |
submit_btn="Process Audio", | |
clear_btn=None | |
) | |
# --- Batch Processing --- | |
with gr.Tab("🔊 Batch Processing"): | |
gr.Interface( | |
fn=batch_process_audio, | |
inputs=[ | |
gr.File(label="Upload Multiple Files", file_count="multiple"), | |
gr.CheckboxGroup(choices=effect_options, label="Apply Effects in Order"), | |
gr.Checkbox(label="Isolate Vocals After Effects"), | |
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0]), | |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3") | |
], | |
outputs=[ | |
gr.File(label="Download ZIP of All Processed Files"), | |
gr.Textbox(label="Status", value="✅ Ready", lines=1) | |
], | |
title="Batch Audio Processor", | |
description="Upload multiple files, apply effects in bulk, and download all results in a single ZIP.", | |
flagging_mode="never", | |
submit_btn="Process All Files", | |
clear_btn=None | |
) | |
# --- Remix Mode --- | |
with gr.Tab("🎛 Remix Mode"): | |
gr.Interface( | |
fn=stem_split, | |
inputs=gr.Audio(label="Upload Music Track", type="filepath"), | |
outputs=[ | |
gr.File(label="Vocals"), | |
gr.File(label="Drums"), | |
gr.File(label="Bass"), | |
gr.File(label="Other") | |
], | |
title="Split Into Drums, Bass, Vocals, and More", | |
description="Use AI to separate musical elements like vocals, drums, and bass.", | |
flagging_mode="never", | |
clear_btn=None | |
) | |
# --- Genre Mastering Tab === | |
with gr.Tab("🎧 Genre Mastering"): | |
gr.Interface( | |
fn=lambda audio, genre: auto_eq(audio, genre), | |
inputs=[ | |
gr.Audio(label="Upload Track", type="filepath"), | |
gr.Dropdown(choices=list(genre_preset_map.keys()), label="Select Genre", value="Pop") | |
], | |
outputs=gr.Audio(label="Mastered Output", type="filepath"), | |
title="Genre-Specific Mastering", | |
description="Apply professionally tuned mastering settings for popular music genres." | |
) | |
# --- Real-Time EQ === | |
with gr.Tab("🎛 Real-Time EQ"): | |
gr.Interface( | |
fn=real_time_eq_slider, | |
inputs=[ | |
gr.Audio(label="Upload Track", type="filepath"), | |
gr.Slider(minimum=-12, maximum=12, value=0, label="Low Gain (-200–500Hz)"), | |
gr.Slider(minimum=-12, maximum=12, value=0, label="Mid Gain (500Hz–4kHz)"), | |
gr.Slider(minimum=-12, maximum=12, value=0, label="High Gain (4kHz+)"), | |
], | |
outputs=gr.Audio(label="EQ'd Output", type="filepath"), | |
title="Adjust Frequency Bands Live", | |
description="Fine-tune your sound using real-time sliders for low, mid, and high frequencies." | |
) | |
# --- Spectrum Visualizer === | |
with gr.Tab("📊 Frequency Spectrum"): | |
gr.Interface( | |
fn=visualize_spectrum, | |
inputs=gr.Audio(label="Upload Track", type="filepath"), | |
outputs=gr.Image(label="Spectrum Analysis"), | |
title="Real-Time Spectrum Analyzer", | |
description="See the frequency breakdown of your audio" | |
) | |
# --- Prompt-Based Editing Tab === | |
with gr.Tab("🧠 Prompt-Based Editing"): | |
gr.Interface( | |
fn=process_prompt, | |
inputs=[ | |
gr.File(label="Upload Audio", type="filepath"), | |
gr.Textbox(label="Describe What You Want", lines=5) | |
], | |
outputs=gr.Audio(label="Edited Output", type="filepath"), | |
title="Type Your Edits – AI Does the Rest", | |
description="Say what you want done and let AI handle it.", | |
allow_flagging="never" | |
) | |
# --- Vocal Presets for Singers === | |
with gr.Tab("🎤 Vocal Presets for Singers"): | |
gr.Interface( | |
fn=process_audio, | |
inputs=[ | |
gr.Audio(label="Upload Vocal Track", type="filepath"), | |
gr.CheckboxGroup(choices=[ | |
"Noise Reduction", | |
"Normalize", | |
"Compress Dynamic Range", | |
"Bass Boost", | |
"Treble Boost", | |
"Reverb", | |
"Auto Gain", | |
"Vocal Distortion", | |
"Harmony", | |
"Stage Mode" | |
]), | |
gr.Checkbox(label="Isolate Vocals After Effects"), | |
gr.Dropdown(choices=preset_names, label="Select Vocal Preset", value=preset_names[0]), | |
gr.Dropdown(choices=["MP3", "WAV"], label="Export Format", value="MP3") | |
], | |
outputs=[ | |
gr.Audio(label="Processed Vocal", type="filepath"), | |
gr.Image(label="Waveform Preview"), | |
gr.Textbox(label="Session Log (JSON)", lines=5), | |
gr.Textbox(label="Detected Genre", lines=1), | |
gr.Textbox(label="Status", value="✅ Ready", lines=1) | |
], | |
title="Create Studio-Quality Vocal Tracks", | |
description="Apply singer-friendly presets and effects to enhance vocals.", | |
allow_flagging="never" | |
) | |
# --- Voice Cloning (Local Only) === | |
with gr.Tab("🎭 Voice Cloning (Local Only)"): | |
gr.Interface( | |
fn=clone_voice, | |
inputs=[ | |
gr.File(label="Source Voice Clip"), | |
gr.File(label="Target Voice Clip"), | |
gr.Textbox(label="Text to Clone", lines=5) | |
], | |
outputs=gr.Audio(label="Cloned Output", type="filepath"), | |
title="Replace One Voice With Another", | |
description="Clone voice from source to target speaker using AI" | |
) | |
# --- Speaker Diarization ("Who Spoke When?") === | |
if diarize_pipeline: | |
with gr.Tab("🧏♂️ Who Spoke When?"): | |
gr.Interface( | |
fn=diarize_and_transcribe, | |
inputs=gr.Audio(label="Upload Interview/Podcast", type="filepath"), | |
outputs=gr.JSON(label="Diarized Transcript"), | |
title="Split By Speaker + Transcribe", | |
description="Detect speakers and transcribe their speech automatically." | |
) | |
# --- TTS Voice Generator === | |
with gr.Tab("💬 TTS Voice Generator"): | |
gr.Interface( | |
fn=generate_tts, | |
inputs=gr.Textbox(label="Enter Text", lines=5), | |
outputs=gr.Audio(label="Generated Speech", type="filepath"), | |
title="Text-to-Speech Generator", | |
description="Type anything and turn it into natural-sounding speech." | |
) | |
# --- Auto-Save / Resume Sessions === | |
session_state = gr.State() | |
def save_or_resume_session(audio, preset, effects, action="save"): | |
if action == "save": | |
return {"audio": audio, "preset": preset, "effects": effects}, None, None, None | |
elif action == "load" and isinstance(audio, dict): | |
return ( | |
None, | |
audio.get("audio"), | |
audio.get("preset"), | |
audio.get("effects") | |
) | |
return None, None, None, None | |
with gr.Tab("🧾 Auto-Save & Resume"): | |
gr.Markdown("Save your current state and resume later.") | |
action_radio = gr.Radio(["save", "load"], label="Action", value="save") | |
audio_input = gr.Audio(label="Upload or Load Audio", type="filepath") | |
preset_dropdown = gr.Dropdown(choices=preset_names, label="Used Preset", value=preset_names[0] if preset_names else None) | |
effect_checkbox = gr.CheckboxGroup(choices=effect_options, label="Applied Effects") | |
action_btn = gr.Button("Save or Load Session") | |
session_data = gr.State() | |
loaded_audio = gr.Audio(label="Loaded Audio", type="filepath") | |
loaded_preset = gr.Dropdown(choices=preset_names, label="Loaded Preset") | |
loaded_effects = gr.CheckboxGroup(choices=effect_options, label="Loaded Effects") | |
action_btn.click( | |
fn=save_or_resume_session, | |
inputs=[audio_input, preset_dropdown, effect_checkbox, action_radio], | |
outputs=[session_data, loaded_audio, loaded_preset, loaded_effects] | |
) | |
# --- VAD – Detect & Remove Silence === | |
with gr.Tab("✂️ Trim Silence Automatically"): | |
gr.Interface( | |
fn=detect_silence, | |
inputs=[ | |
gr.File(label="Upload Track"), | |
gr.Slider(minimum=-100, maximum=-10, value=-50, label="Silence Threshold (dB)"), | |
gr.Number(label="Min Silence Length (ms)", value=1000) | |
], | |
outputs=gr.File(label="Trimmed Output"), | |
title="Auto-Detect & Remove Silence", | |
description="Detect and trim silence at start/end or between words" | |
) | |
# --- Save/Load Project File (.aiproj) === | |
with gr.Tab("📁 Save/Load Project"): | |
gr.Interface( | |
fn=save_project, | |
inputs=[ | |
gr.File(label="Original Audio"), | |
gr.Dropdown(choices=preset_names, label="Used Preset", value=preset_names[0]), | |
gr.CheckboxGroup(choices=effect_options, label="Applied Effects") | |
], | |
outputs=gr.File(label="Project File (.aiproj)"), | |
title="Save Everything Together", | |
description="Save your session, effects, and settings in one file to reuse later." | |
) | |
gr.Interface( | |
fn=load_project, | |
inputs=gr.File(label="Upload .aiproj File"), | |
outputs=[ | |
gr.Dropdown(choices=preset_names, label="Loaded Preset"), | |
gr.CheckboxGroup(choices=effect_options, label="Loaded Effects") | |
], | |
title="Resume Last Project", | |
description="Load your saved session" | |
) | |
# --- Cloud Project Sync (Premium Feature) === | |
with gr.Tab("☁️ Cloud Project Sync"): | |
gr.Markdown("Save your projects online and resume them from any device.") | |
gr.Interface( | |
fn=cloud_save_project, | |
inputs=[ | |
gr.File(label="Upload Audio", type="filepath"), | |
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0]), | |
gr.CheckboxGroup(choices=effect_options, label="Effects Applied"), | |
gr.Textbox(label="Project Name"), | |
gr.Textbox(label="Project ID (Optional)") | |
], | |
outputs=[ | |
gr.File(label="Downloadable Project File"), | |
gr.Textbox(label="Status", value="✅ Ready", lines=1) | |
], | |
title="Save to Cloud", | |
description="Save your project online and share it across devices." | |
) | |
gr.Interface( | |
fn=cloud_load_project, | |
inputs=gr.Textbox(label="Enter Project ID"), | |
outputs=[ | |
gr.Audio(label="Loaded Audio", type="filepath"), | |
gr.Dropdown(choices=preset_names, label="Loaded Preset"), | |
gr.CheckboxGroup(choices=effect_options, label="Loaded Effects") | |
], | |
title="Load from Cloud", | |
description="Resume a project from the cloud", | |
allow_flagging="never" | |
) | |
# --- AI Suggest Presets Based on Genre === | |
with gr.Tab("🧠 AI Suggest Preset"): | |
gr.Interface( | |
fn=suggest_preset_by_genre, | |
inputs=gr.Audio(label="Upload Track", type="filepath"), | |
outputs=gr.Dropdown(choices=preset_names, label="Recommended Preset"), | |
title="Let AI Recommend Best Preset", | |
description="Upload a track and let AI recommend the best preset based on genre." | |
) | |
# --- Create Karaoke Video from Audio + Lyrics === | |
with gr.Tab("📹 Create Karaoke Video"): | |
gr.Interface( | |
fn=create_karaoke_video, | |
inputs=[ | |
gr.Audio(label="Upload Track", type="filepath"), | |
gr.Textbox(label="Lyrics", lines=10), | |
gr.File(label="Background (Optional)"), | |
], | |
outputs=gr.Video(label="Karaoke Video"), | |
title="Make Karaoke Videos from Audio + Lyrics", | |
description="Generate karaoke-style videos with real-time sync." | |
) | |
demo.launch() |