@inproceedings{136a1f81dfc9422baddc315070b8cd94,
title = "Audiovisual vocal outburst classification in noisy conditions",
abstract = "In this study, we investigate an audiovisual approach for classification of vocal outbursts (non-linguistic vocalisations) in noisy conditions using Long Short-Term Memory (LSTM) Recurrent Neural Networks and Support Vector Machines. Fusion of geometric shape features and acoustic low-level descriptors is performed on the feature level. Three different types of acoustic noise are considered: babble, office and street noise. Experiments are conducted on every noise type to asses the benefit of the fusion in each case. As database for evaluations serves the INTERSPEECH 2010 Paralinguistic Challenge{\textquoteright}s Audiovisual Interest Corpus of human-to-human natural conversation. The results show that even when training is performed on noise corrupted audio which matches the test conditions the addition of visual features is still beneficial.",
keywords = "EWI-23055, METIS-296292, IR-84320, HMI-MI: MULTIMODAL INTERACTIONS",
author = "Florian Eyben and Stavros Petridis and Bj{\"o}rn Schuller and Maja Pantic",
note = "10.1109/ICASSP.2012.6289067 ; null ; Conference date: 25-03-2012 Through 30-03-2012",
year = "2012",
month = mar,
day = "25",
doi = "10.1109/ICASSP.2012.6289067",
language = "Undefined",
isbn = "978-1-4673-0045-2",
publisher = "IEEE Computer Society",
pages = "5097--5100",
booktitle = "Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2012",
address = "United States",
}