@inproceedings{5af4b25842f046ae9cb20404a628972f,
title = "Supporting Engagement and Floor Control in Hybrid Meetings",
abstract = "Remote participants in hybrid meetings often have problems to follow what is going on in the (physical) meeting room they are connected with. This paper describes a videoconferencing system for participation in hybrid meetings. The system has been developed as a research vehicle to see how technology based on automatic real-time recognition of conversational behavior in meetings can be used to improve engagement and floor control by remote participants. The system uses modules for online speech recognition, real-time visual focus of attention as well as a module that signals who is being addressed by the speaker. A built-in keyword spotter allows an automatic meeting assistant to call the remote participant{\textquoteright}s attention when a topic of interest is raised, pointing at the transcription of the fragment to help him catch-up.",
keywords = "HMI-MI: MULTIMODAL INTERACTIONS, EC Grant Agreement nr.: FP6/0033812, METIS-265736, IR-67693, EWI-14716",
author = "{op den Akker}, {Hendrikus J.A.} and D.H.W. Hofs and G.H.W. Hondorp and {op den Akker}, Harm and Jakob Zwiers and Antinus Nijholt",
note = "10.1007/978-3-642-03320-9_26 ; null ; Conference date: 14-07-2009",
year = "2009",
month = jul,
day = "14",
doi = "10.1007/978-3-642-03320-9_26",
language = "Undefined",
isbn = "978-3-642-03319-3",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "276--290",
editor = "Anna Esposito and Robert Vich",
booktitle = "Cross-Modal Analysis of Speech, Gestures, Gaze and Facial Expressions",
address = "Netherlands",
}