@inproceedings{3eaad527548347d58588fd5ebcc5bd0a,
title = "Classifying motor imagery in presence of speech",
abstract = "In the near future, brain-computer interface (BCI) applications for non-disabled users will require multimodal interaction and tolerance to dynamic environment. However, this conflicts with the highly sensitive recording techniques used for BCIs, such as electroencephalography (EEG). Advanced machine learning and signal processing techniques are required to decorrelate desired brain signals from the rest. This paper proposes a signal processing pipeline and two classification methods suitable for multiclass EEG analysis. The methods were tested in an experiment on separating left/right hand imagery in presence/absence of speech. The analyses showed that the presence of speech during motor imagery did not affect the classification accuracy significantly and regardless of the presence of speech, the proposed methods were able to separate left and right hand imagery with an accuracy of 60%. The best overall accuracy achieved for the 5-class separation of all the tasks was 47% and both proposed methods performed equally well. In addition, the analysis of event-related spectral power changes revealed characteristics related to motor imagery and speech.",
keywords = "METIS-271125, EWI-18787, IR-74665",
author = "Hayrettin G{\"u}rk{\"o}k and Mannes Poel and Jakob Zwiers",
note = "10.1109/IJCNN.2010.5595733 ; 2010 IEEE International Joint Conference on Neural Networks, IJCNN 2010 ; Conference date: 18-07-2010 Through 23-07-2010",
year = "2010",
month = oct,
day = "14",
doi = "10.1109/IJCNN.2010.5595733",
language = "Undefined",
isbn = "978-1-4244-6916-1",
publisher = "IEEE",
pages = "1235--1242",
booktitle = "The 2010 International Joint Conference on Neural Networks (IJCNN)",
address = "United States",
}