|
import gradio as gr |
|
import torch |
|
import torchaudio |
|
from df import enhance, init_df |
|
|
|
|
|
model, df_state, _ = init_df() |
|
|
|
def denoise_audio(audio): |
|
|
|
waveform, sample_rate = torchaudio.load(audio) |
|
|
|
|
|
enhanced_audio = enhance(model, df_state, waveform) |
|
|
|
|
|
output_file = "enhanced_output.wav" |
|
torchaudio.save(output_file, enhanced_audio, sample_rate) |
|
|
|
return output_file |
|
|
|
|
|
iface = gr.Interface( |
|
fn=denoise_audio, |
|
inputs=gr.Audio(source="upload", type="filepath"), |
|
outputs="file", |
|
title="DeepFilterNet Audio Denoising", |
|
description="Upload an audio file to remove noise using DeepFilterNet." |
|
) |
|
|
|
iface.launch() |
|
|