|
import torch |
|
import torchaudio |
|
from sgmse.model import ScoreModel |
|
import gradio as gr |
|
from sgmse.util.other import pad_spec |
|
|
|
|
|
class Args: |
|
device = 'cpu' |
|
corrector = 'langevin' |
|
N = 50 |
|
corrector_steps = 1 |
|
snr = 0.1 |
|
pad_mode = 'reflect' |
|
|
|
args = Args() |
|
|
|
|
|
model = ScoreModel.load_from_checkpoint("https://huggingface.co./sp-uhh/speech-enhancement-sgmse/resolve/main/train_vb_29nqe0uh_epoch%3D115.ckpt") |
|
|
|
def enhance_speech(audio_file): |
|
|
|
y, sr = torchaudio.load(audio_file) |
|
T_orig = y.size(1) |
|
|
|
|
|
norm_factor = y.abs().max() |
|
y = y / norm_factor |
|
|
|
|
|
Y = torch.unsqueeze(model._forward_transform(model._stft(y.to(args.device))), 0) |
|
Y = pad_spec(Y, mode=args.pad_mode) |
|
|
|
|
|
sampler = model.get_pc_sampler( |
|
'reverse_diffusion', args.corrector, Y.to(args.device), N=args.N, |
|
corrector_steps=args.corrector_steps, snr=args.snr) |
|
sample, _ = sampler() |
|
|
|
|
|
x_hat = model.to_audio(sample.squeeze(), T_orig) |
|
|
|
|
|
x_hat = x_hat * norm_factor |
|
|
|
|
|
output_file = 'enhanced_output.wav' |
|
torchaudio.save(output_file, x_hat.cpu().numpy(), sr) |
|
|
|
return output_file |
|
|
|
|
|
inputs = gr.Audio(label="Input Audio", type="filepath") |
|
outputs = gr.Audio(label="Output Audio", type="filepath") |
|
title = "Speech Enhancement using SGMSE" |
|
description = "This Gradio demo uses the SGMSE model for speech enhancement. Upload your audio file to enhance it." |
|
article = "<p style='text-align: center'><a href='https://huggingface.co./SP-UHH/speech-enhancement-sgmse' target='_blank'>Model Card</a></p>" |
|
|
|
|
|
gr.Interface(fn=enhance_speech, inputs=inputs, outputs=outputs, title=title, description=description, article=article).launch() |
|
|