@book{3b9b48016c554ffeae81e5d984ebdffe,
title = "Development of Multimodal Interfaces: Active Listening and Synchrony",
abstract = "This volume brings together, through a peer-revision process, the advanced research results obtained by the European COST Action 2102: Cross-Modal Analysis of Verbal and Nonverbal Communication, primarily discussed for the first time at the Second COST 2102 International Training School on “Development of Multimodal Interfaces: Active Listening and Synchrony��? held in Dublin, Ireland, March 23–27 2009. The school was sponsored by COST (European Cooperation in the Field of Scientific and Technical Research, www.cost.esf.org ) in the domain of Information and Communication Technologies (ICT) for disseminating the advances of the research activities developed within the COST Action 2102: “Cross-Modal Analysis of Verbal and Nonverbal Communication��? (cost2102.cs.stir.ac.uk) COST Action 2102 in its third year of life brought together about 60 European and 6 overseas scientific laboratories whose aim is to develop interactive dialogue systems and intelligent virtual avatars graphically embodied in a 2D and/or 3D interactive virtual world, capable of interacting intelligently with the environment, other avatars, and particularly with human users. The main focus of the school was the development of multimodal interfaces. Traditional approaches to multimodal interface design tend to assume a “ping-pong��? or “push-to-talk��? approach to speech interaction wherein either the system or the human interlocutor is active at any one time. This is contrary to many recent findings in conversation and discourse analysis, where the definition of a “turn��? or even an “utterance��? is found to be very complex. People don{\textquoteright}t “take turns��? to talk in a typical conversational interaction, but they each contribute actively to the joint emergence of a “common understanding.��? The sub-theme of the school was “Synchrony and Active Listening��? selected with the idea to identify contributions that actively give support to the ongoing research into the dynamics of human spoken interaction, to the production of multimodal conversation data and to the subsequent analysis and modelling of interaction dynamics, with the dual goal of appropriately designing multimodal interfaces, as well as providing new approaches and developmental paradigms.",
keywords = "active listening, Multi-modal interaction, METIS-270732, EWI-17439, IR-70724, HMI-MI: MULTIMODAL INTERACTIONS, Synchrony, cross-modality",
author = "Anna Esposito and Carl Vogel and Amir Hussain",
editor = "Nick Campbell and Antinus Nijholt",
note = "10.1007/978-3-642-12397-9 ",
year = "2010",
month = mar,
day = "27",
doi = "10.1007/978-3-642-12397-9",
language = "Undefined",
isbn = "978-3-642-12396-2",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
}