@article {4702, title = {Speech Denoising with Deep Feature Losses}, journal = {arXiv:1806.10522}, year = {2018}, note = {\ }, month = {06/2018}, type = {arXiv eprint}, abstract = {

We present an end-to-end deep learning approach to denoising speech signals by processing the raw waveform directly. Given input audio containing speech corrupted by an additive background signal, the system aims to produce a processed signal that contains only the speech content. Recent approaches have shown promising results using various deep network architectures. In this paper, we propose to train a fully-convolutional context aggregation network using a deep feature loss. That loss is based on comparing the internal feature activations in a different network, trained for acoustic environment detection and domestic audio tagging. Our approach outperforms the state-of-the-art in objective speech quality metrics and in large-scale perceptual experiments with human listeners. It also outperforms an identical network trained using traditional regression losses. The advantage of the new approach is particularly pronounced for the hardest data with the most intrusive background noise, for which denoising is most needed and most challenging.
CodeAudio examples

}, url = {https://arxiv.org/abs/1806.10522}, author = {Fran{\c c}ois G. Germain and Qifeng Chen and Vladlen Koltun} }