Javascript is required
@article{1, title = {Fundamentals of {EEG} measurement}, author = {M. Teplan}, journal = {Meas. Sci. Rev.}, volume = {2}, number = {2}, pages = {1--11}, year = {2002}}@book{2, title = {Niedermeyer's Electroencephalography: Basic Principles, Clinical Applications, and Related Fields}, editor = {D. L. Schomer and F. H. Lopes da Silva}, edition = {7}, year = {2017}, publisher = {Oxford University Press}, doi = {10.1093/med/9780190228484.001.0001}, url = {}}. [Crossref]
@article{3, title = {Review of challenges associated with the {EEG} artifact removal methods}, author = {W. Mumtaz and S. Rasheed and A. Irfan}, journal = {Biomed. Signal Process. Control}, volume = {68}, pages = {102741}, year = {2021}, doi = {10.1016/j.bspc.2021.102741}, url = {}}. [Crossref]
@article{4, title = {{EEG} signal complexity measurements to enhance {BCI}-based stroke patients' rehabilitation}, author = {N. K. Al-Qazzaz and A. A. Aldoori and S. H. B. M. Ali and S. A. Ahmad and A. K. Mohammed and M. I. Mohyee}, journal = {Sensors}, volume = {23}, number = {8}, pages = {3889}, year = {2023}, doi = {10.3390/s23083889}, url = {}}. [Crossref]
@article{5, title = {Deep learning for electroencephalogram ({EEG}) classification tasks: {A} review}, author = {A. Craik and Y. He and J. L. Contreras-Vidal}, journal = {J. Neural Eng.}, volume = {16}, number = {3}, pages = {031001}, year = {2019}, doi = {10.1088/1741-2552/ab0ab5}, url = {}}. [Crossref]
@article{6, title = {Deep learning-based electroencephalography analysis: {A} systematic review}, author = {Y. Roy and H. Banville and I. Albuquerque and A. Gramfort and T. H. Falk and J. Faubert}, journal = {J. Neural Eng.}, volume = {16}, number = {5}, pages = {051001}, year = {2019}, doi = {10.1088/1741-2552/ab260c}, url = {}}. [Crossref]
@article{7, title = {Deep learning with convolutional neural networks for {EEG} decoding and visualization}, author = {R. T. Schirrmeister and J. T. Springenberg and L. D. J. Fiederer and M. Glasstetter and K. Eggensperger and M. Tangermann and F. Hutter and W. Burgard and T. Ball}, journal = {Hum. Brain Mapp.}, volume = {38}, number = {11}, pages = {5391--5420}, year = {2017}, doi = {10.1002/hbm.23730}, url = {}}. [Crossref]
@article{8, title = {The applied principles of {EEG} analysis methods in neuroscience and clinical neurology}, author = {H. Zhang and Q. Q. Zhou and H. Chen and X. Q. Hu and W. G. Li and Y. Bai and J. X. Han and Y. Wang and Z. H. Liang and D. Chen and F. Y. Cong and J. Q. Yan and X. L. Li}, journal = {Mil. Med. Res.}, volume = {10}, number = {1}, pages = {67}, year = {2023}, doi = {10.1186/s40779-023-00502-7}, url = {}}. [Crossref]
@article{9, title = {Neural decoding of {EEG} signals with machine learning: {A} systematic review}, author = {M. Saeidi and W. Karwowski and F. V. Farahani and K. Fiok and R. Taiar and P. A. Hancock and A. Al-Juaid}, journal = {Brain Sci.}, volume = {11}, number = {11}, pages = {1525}, year = {2021}, doi = {10.3390/brainsci11111525}, url = {}}. [Crossref]
@article{10, title = {{EEG} Signal Processing for {Alzheimer's} Disorders Using Discrete Wavelet Transform and Machine Learning Approaches}, author = {K. AlSharabi and Y. Bin Salamah and A. M. Abdurraqeeb and M. Aljalal and F. A. Alturki}, journal = {IEEE Access}, volume = {10}, pages = {89781--89797}, year = {2022}, doi = {10.1109/ACCESS.2022.3198988}, url = {}}. [Crossref]
@article{11, title = {{EEG}-based clinical decision support system for {Alzheimer's} disorders diagnosis using {EMD} and deep learning techniques}, author = {K. AlSharabi and Y. B. Salamah and M. Aljalal and A. M. Abdurraqeeb and F. A. Alturki}, journal = {Front. Hum. Neurosci.}, volume = {17}, pages = {1190203}, year = {2023}, doi = {10.3389/fnhum.2023.1190203}, url = {}}. [Crossref]
@article{12, title = {Retraining and evaluation of machine learning and deep learning models for seizure classification from {EEG} data}, author = {J. P. Carvajal-Dossman and L. Guio and D. García-Orjuela and J. J. Guzmán-Porras and K. Garces and A. Naranjo and S. J. Maradei-Anaya and J. Duitama}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {15345}, year = {2025}, doi = {10.1038/s41598-025-98389-y}, url = {}}. [Crossref]
@article{13, title = {The combination of brain-computer interfaces and artificial intelligence: Applications and challenges}, author = {X. Y. Zhang and Z. Y. Ma and H. J. Zheng and T. K. Li and K. X. Chen and X. Wang and C. T. Liu and L. X. Xu and X. H. Wu and D. R. Lin and H. Lin}, journal = {Ann. Transl. Med.}, volume = {8}, number = {11}, pages = {712}, year = {2020}, doi = {10.21037/atm.2019.11.109}, url = {}}@book{14, title = {{EEG} Normal Waveforms}, author = {C. S. Nayak and A. C. Anilkumar}, year = {2025}, publisher = {StatPearls Publishing}, url = {http://www.ncbi.nlm.nih.gov/books/NBK539805/}}. [Crossref]
@article{15, title = {Removal of Artifacts from {EEG} Signals: {A} Review}, author = {X. Jiang and G. B. Bian and Z. Tian}, journal = {Sensors}, volume = {19}, number = {5}, pages = {987}, year = {2019}, doi = {10.3390/s19050987}, url = {}}. [Crossref]
@article{16, title = {Protocol for semi-automatic {EEG} preprocessing incorporating independent component analysis and principal component analysis}, author = {G. Ouyang and Y. Li}, journal = {STAR Protoc.}, volume = {6}, number = {1}, pages = {103682}, year = {2025}, doi = {10.1016/j.xpro.2025.103682}, url = {}}. [Crossref]
@article{17, title = {Electroencephalographic imaging of higher brain function}, author = {A. Gevins and M. E. Smith and L. K. McEvoy and H. Leong and J. Le}, journal = {Phil. Trans. R. Soc. Lond. B}, volume = {354}, number = {1387}, pages = {1125--1134}, year = {1999}, doi = {10.1098/rstb.1999.0468}, url = {}}. [Crossref]
@article{18, title = {Electroencephalography ({EEG}) and event-related potentials ({ERPs}) with human participants}, author = {G. A. Light and L. E. Williams and F. Minow and J. Sprock and A. Rissling and R. Sharp and D. L. Braff}, journal = {Curr. Protoc. Neurosci.}, volume = {52}, number = {1}, pages = {6--25}, year = {2010}, doi = {10.1002/0471142301.ns0625s52}, url = {}}. [Crossref]
@article{19, title = {Neural biomarker diagnosis and prediction to mild cognitive impairment and {Alzheimer's} disease using {EEG} technology}, author = {B. Jiao and R. H. Li and H. Zhou and K. Q. Qing and H. Liu and H. F. Pan and Y. Q. Lei and W. J. Fu and X. A. Wang and X. W. Xiao and X. X. Liu and Q. J. Yang and X. X. Liao and Y. F. Zhou and L. J. Fang and Y. B. Dong and Y. H. Yang and H. Y. Jiang and S. Huang and L. Shen}, journal = {Alz. Res. Therapy}, volume = {15}, pages = {32}, year = {2023}, doi = {10.1186/s13195-023-01181-1}, url = {}}. [Crossref]
@article{20, title = {Effects of Fatigue Based on Electroencephalography Signal during Laparoscopic Surgical Simulation}, author = {N. Z. Ndaro and S.-Y. Wang}, journal = {Minim. Invasive Surg.}, volume = {2018}, number = {1}, pages = {2389158}, year = {2018}, doi = {10.1155/2018/2389158}, url = {}}. [Crossref]
@article{21, title = {Emotion detection using electroencephalography signals and a zero-time windowing-based epoch estimation and relevant electrode identification}, author = {S. Gannouni and A. Aledaily and K. Belwafi and H. Aboalsamh}, journal = {Sci. Rep.}, volume = {11}, number = {1}, pages = {7071}, year = {2021}, doi = {10.1038/s41598-021-86345-5}, url = {}}. [Crossref]
@article{22, title = {{EEG} power spectral measures of cognitive workload: {A} meta‐analysis}, author = {S. Chikhi and N. Matton and S. Blanchet}, journal = {Psychophysiology}, volume = {59}, number = {6}, pages = {e14009}, year = {2022}, doi = {10.1111/psyp.14009}, url = {}}. [Crossref]
@article{23, title = {Spotlight on sleep stage classification based on {EEG}}, author = {I. Lambert and L. Peter-Derex}, journal = {Nat. Sci. Sleep}, volume = {15}, pages = {479--490}, year = {2023}, doi = {10.2147/NSS.S401270}, url = {}}@incollection{24, title = {Physiology, sleep stages}, author = {A. K. Patel and V. Reddy and K. R. Shumway and J. F. Araujo}, booktitle = {StatPearls}, year = {2024}, publisher = {StatPearls Publishing}, url = {http://www.ncbi.nlm.nih.gov/books/NBK526132/}}. [Crossref]
@article{25, title = {Current Status, Challenges, and Possible Solutions of {EEG}-Based Brain-Computer Interface: {A} Comprehensive Review}, author = {M. Rashid and N. Sulaiman and A. PP Abdul Majeed and R. M. Musa and A. F. Ab. Nasir and B. S. Bari and S. Khatun}, journal = {Front. Neurorob.}, volume = {14}, pages = {25}, year = {2020}, doi = {10.3389/fnbot.2020.00025}, url = {}}. [Crossref]
@article{26, title = {Neurofeedback: {A} comprehensive review on system design, methodology and clinical applications}, author = {H. Marzbani and H. R. Marateb and M. Mansourian}, journal = {Basic Clin. Neurosci.}, volume = {7}, number = {2}, pages = {143--158}, year = {2016}, doi = {10.15412/J.BCN.03070208}, url = {}}. [Crossref]
@article{27, title = {Advances in {P300} brain--computer interface spellers: Toward paradigm design and performance evaluation}, author = {J. H. Pan and X. N. Chen and N. M. Ban and J. S. He and J. Y. Chen and H. Y. Huang}, journal = {Front. Hum. Neurosci.}, volume = {16}, pages = {1077717}, year = {2022}, doi = {10.3389/fnhum.2022.1077717}, url = {}}. [Crossref]
@article{28, title = {{EEG}-Based {BCIs} on Motor Imagery Paradigm Using Wearable Technologies: {A} Systematic Review}, author = {A. Saibene and M. Caglioni and S. Corchs and F. Gasparini}, journal = {Sensors}, volume = {23}, number = {5}, pages = {2798}, year = {2023}, doi = {10.3390/s23052798}, url = {}}. [Crossref]
@article{29, title = {A review of classification algorithms for {EEG}-based brain--computer interfaces}, author = {F. Lotte and M. Congedo and A. Lécuyer and F. Lamarche and B. Arnaldi}, journal = {J. Neural Eng.}, volume = {4}, number = {2}, pages = {R1}, year = {2007}, doi = {10.1088/1741-2560/4/2/R01}, url = {}}. [Crossref]
@article{30, title = {Support-vector networks}, author = {C. Cortes and V. Vapnik}, journal = {Mach. Learn.}, volume = {20}, number = {3}, pages = {273--297}, year = {1995}, doi = {10.1007/BF00994018}, url = {}}. [Crossref]
@article{31, title = {Optimal spatial filtering of single trial {EEG} during imagined hand movement}, author = {H. Ramoser and J. Muller-Gerking and G. Pfurtscheller}, journal = {IEEE Trans. Rehab. Eng.}, volume = {8}, number = {4}, pages = {441--446}, year = {2000}, doi = {10.1109/86.895946}, url = {}}. [Crossref]
@article{32, title = {{LEDPatNet19}: Automated Emotion Recognition Model based on Nonlinear {LED} Pattern Feature Extraction Function using {EEG} Signals}, author = {T. Tuncer and S. Dogan and A. Subasi}, journal = {Cogn. Neurodyn.}, volume = {16}, number = {4}, pages = {779--790}, year = {2022}, doi = {10.1007/s11571-021-09748-0}, url = {}}. [Crossref]
@article{33, title = {{EEG} signal classification using wavelet feature extraction and a mixture of expert model}, author = {A. Subasi}, journal = {Expert Syst. Appl.}, volume = {32}, number = {4}, pages = {1084--1093}, year = {2007}, doi = {10.1016/j.eswa.2006.02.005}, url = {}}. [Crossref]
@article{34, title = {A tutorial review of functional connectivity analysis methods and their interpretational pitfalls}, author = {A. M. Bastos and J.-M. Schoffelen}, journal = {Front. Syst. Neurosci.}, volume = {9}, pages = {175}, year = {2016}, doi = {10.3389/fnsys.2015.00175}, url = {}}. [Crossref]
@article{35, title = {Random forests}, author = {L. Breiman}, journal = {Mach. Learn.}, volume = {45}, number = {1}, pages = {5--32}, year = {2001}, doi = {10.1023/A:1010933404324}, url = {}}. [Crossref]
@article{36, title = {Gradient-based learning applied to document recognition}, author = {Y. Lecun and L. Bottou and Y. Bengio and P. Haffner}, journal = {Proc. IEEE}, volume = {86}, number = {11}, pages = {2278--2324}, year = {2002}, doi = {10.1109/5.726791}, url = {}}@inproceedings{37, title = {Chrono{N}et: {A} Deep Recurrent Neural Network for Abnormal {EEG} Identification}, author = {S. Roy and I. Kiral-Kornek and S. Harrer}, editor = {D. Riaño and S. Wilk and A. ten Teije}, booktitle = {Artificial Intelligence in Medicine}, series = {Lecture Notes in Computer Science}, volume = {11526}, pages = {47--56}, year = {2019}, address = {Cham, Germany}, doi = {10.1007/978-3-030-21642-9_8}, url = {https://doi.org/10.1007/978-3-030-21642-9_8}}. [Crossref]
@article{38, title = {{EEGNet}: {A} compact convolutional neural network for {EEG}-based brain--computer interfaces}, author = {V. J. Lawhern and A. J. Solon and N. R. Waytowich and S. M. Gordon and C. P. Hung and B. J. Lance}, journal = {J. Neural Eng.}, volume = {15}, number = {5}, pages = {056013}, year = {2018}, doi = {10.1088/1741-2552/aace8c}, url = {}}. [Crossref]
@article{39, title = {Learning Representations from {EEG} with Deep Recurrent-Convolutional Neural Networks}, author = {P. Bashivan and I. Rish and M. Yeasin and N. Codella}, journal = {arXiv}, year = {2015}, doi = {10.48550/arXiv.1511.06448}, url = {},}. [Crossref]
@article{40, title = {Long short-term memory}, author = {S. Hochreiter and J. Schmidhuber}, journal = {Neural Comput.}, volume = {9}, number = {8}, pages = {1735--1780}, year = {1997}, doi = {10.1162/neco.1997.9.8.1735}, url = {}}. [Crossref]
@article{41, title = {On the Properties of Neural Machine Translation: Encoder-Decoder Approaches}, author = {K. Cho and B. van Merrienboer and D. Bahdanau and Y. Bengio}, journal = {arXiv}, year = {2014}, doi = {10.48550/arXiv.1409.1259}, url = {},}. [Crossref]
@article{42, title = {Enhancing {EEG} signals classification using {LSTM‐CNN} architecture}, author = {S. M. Omar and M. Kimwele and A. Olowolayemo and D. M. Kaburu}, journal = {Eng. Rep.}, volume = {6}, number = {9}, pages = {e12827}, year = {2024}, doi = {10.1002/eng2.12827}, url = {}}. [Crossref]
@article{43, title = {{EEGformer}: {A} transformer–based brain activity classification method using {EEG} signal}, author = {Z. J. Wan and M. Y. Li and S. C. Liu and J. J. Huang and H. Tan and W. F. Duan}, journal = {Front. Neurosci.}, volume = {17}, pages = {1148855}, year = {2023}, doi = {10.3389/fnins.2023.1148855}, url = {}}. [Crossref]
@article{44, title = {Attention Is All You Need}, author = {A. Vaswani and N. Shazeer and N. Parmar and J. Uszkoreit and L. Jones and A. N. Gomez and {\L}. Kaiser and I. Polosukhin}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.1706.03762}, url = {},}. [Crossref]
@article{45, title = {Transformers in {EEG} Analysis: {A} review of architectures and applications in motor imagery, seizure, and emotion classification}, author = {E. Vafaei and M. Hosseini}, journal = {Sensors}, volume = {25}, number = {5}, pages = {1293}, year = {2025}, doi = {10.3390/s25051293}, url = {}}. [Crossref]
@article{46, title = {{MP-SeizNet}: {A} multi-path {CNN} {Bi-LSTM} Network for seizure-type classification using {EEG}}, author = {H. Albaqami and G. M. Hassan and A. Datta}, journal = {Biomed. Signal Process. Control}, volume = {84}, pages = {104780}, year = {2023}, doi = {10.1016/j.bspc.2023.104780}, url = {}}. [Crossref]
@article{47, title = {{EEG}-based Brain-Computer Interfaces: {An} Overview of Basic Concepts and Clinical Applications in Neurorehabilitation}, author = {S. Machado and F. Ara{\'u}jo and F. Paes and B. Velasques and M. Cunha and H. Budde and L. F. Basile and R. Anghinah and O. Arias-Carri{\'o}n and M. Cagy and R. Piedade and T. A. de Graaf and A. T. Sack and P. Ribeiro}, journal = {Rev. Neurosci.}, volume = {21}, number = {6}, pages = {451--468}, year = {2010}, doi = {10.1515/REVNEURO.2010.21.6.451}, url = {}}. [Crossref]
@article{48, title = {A survey on robots controlled by motor imagery brain-computer interfaces}, author = {J. Zhang and M. Wang}, journal = {Cogn. Robot.}, volume = {1}, pages = {12--24}, year = {2021}, doi = {10.1016/j.cogr.2021.02.001}, url = {}}. [Crossref]
@article{49, title = {Optimizing Motor Imagery Parameters for Robotic Arm Control by Brain-Computer Interface}, author = {{\"U}. Hayta and D. C. Irimia and C. Guger and {\u{I}}. Erkutlu and {\u{I}}. H. G{\"u}zelbey}, journal = {Brain Sci.}, volume = {12}, number = {7}, pages = {833}, year = {2022}, doi = {10.3390/brainsci12070833}, url = {}}. [Crossref]
@article{50, title = {Trends in {EEG} signal feature extraction applications}, author = {A. K. Singh and S. Krishnan}, journal = {Front. Artif. Intell.}, volume = {5}, pages = {1072801}, year = {2023}, doi = {10.3389/frai.2022.1072801}, url = {}}. [Crossref]
@article{51, title = {Machine learning techniques for electroencephalogram based brain-computer interface: {A} systematic literature review}, author = {Pawan and R. Dhiman}, journal = {Meas. Sens.}, volume = {28}, pages = {100823}, year = {2023}, doi = {10.1016/j.measen.2023.100823}, url = {}}. [Crossref]
@article{52, title = {Development of real-time brain-computer interface control system for robot}, author = {Y. An and J. Wong and S. H. Ling}, journal = {Appl. Soft Comput.}, volume = {159}, pages = {111648}, year = {2024}, doi = {10.1016/j.asoc.2024.111648}, url = {}}. [Crossref]
@article{53, title = {Explainable artificial intelligence approaches for brain-computer interfaces: {A} review and design space}, author = {P. Rajpura and H. Cecotti and Y. K. Meena}, journal = {J. Neural Eng.}, volume = {21}, number = {4}, pages = {0401003}, year = {2024}, doi = {10.1088/1741-2552/ad6593}, url = {}}. [Crossref]
@article{54, title = {Interpretable and robust {AI} in {EEG} systems: {A} survey}, author = {X. L. Zhou and C. Y. Liu and J. N. Zhou and Z. R. Wang and L. M. Zhai and Z. Y. Jia and C. T. Guan and Y. Liu}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.2304.10755}, url = {},}. [Crossref]
@article{55, title = {A review of epileptic seizure detection using machine learning classifiers}, author = {M. K. Siddiqui and R. Morales-Menendez and X. Huang and N. Hussain}, journal = {Brain Inf.}, volume = {7}, number = {1}, pages = {5}, year = {2020}, doi = {10.1186/s40708-020-00105-1}, url = {}}. [Crossref]
@article{56, title = {A p300-detection method based on logistic regression and a convolutional neural network}, author = {Q. Li and Y. Wu and Y. Yu and D. Zhao and M. Q. Sun and Z. L. Zhang and J. L. Wu}, journal = {Front. Comput. Neurosci.}, volume = {16}, pages = {909553}, year = {2022}, doi = {10.3389/fncom.2022.909553}, url = {}}. [Crossref]
@article{57, title = {Evaluation of machine learning algorithms for classification of {EEG} signals}, author = {F. J. Ram{\'i}rez-Arias and E. E. Garc{\'i}a-Guerrero and E. Tlelo-Cuautle and J. M. Colores-Vargas and E. Garc{\'i}a-Canseco and O. R. L{\'o}pez-Bonilla and G. M. Galindo-Aldana and E. Inzunza-Gonz{\'a}lez}, journal = {Technologies}, volume = {10}, number = {4}, pages = {79}, year = {2022}, doi = {10.3390/technologies10040079}, url = {}}. [Crossref]
@article{58, title = {{EEG} datasets for seizure detection and prediction—{A} review}, author = {S. Wong and A. Simmons and J. Rivera‐Villicana and S. Barnett and S. Sivathamboo and P. Perucca and Z. Y. Ge and P. Kwan and L. Kuhlmann and R. Vasa and K. Mouzakis and T. J. O’Brien}, journal = {Epilepsia Open}, volume = {8}, number = {2}, pages = {252--267}, year = {2023}, doi = {10.1002/epi4.12704}, url = {}}. [Crossref]
@article{59, title = {A comprehensive review of deep learning in {EEG}-based emotion recognition: Classifications, trends, and practical implications}, author = {W. Z. Ma and Y. J. Zheng and T. H. Li and Z. P. Li and Y. Li and L. J. Wang}, journal = {PeerJ Comput. Sci.}, volume = {10}, pages = {e2065}, year = {2024}, doi = {10.7717/peerj-cs.2065}, url = {}}. [Crossref]
@article{60, title = {A novel {AI}-driven {EEG} generalized classification model for cross-subject and cross-scene analysis}, author = {J. J. Li and C. H. Lee and Y. H. Zhou and T. G. Liu and T. P. Jung and X. L. Wan and D. N. Duan and D. Wen}, journal = {Adv. Eng. Inform.}, volume = {63}, pages = {102971}, year = {2025}, doi = {10.1016/j.aei.2024.102971}, url = {}}. [Crossref]
@article{61, title = {A Depth of Anaesthesia Index from Linear Regression of {EEG} Parameters}, author = {A. Kumar and S. Anand}, journal = {J. Clin. Monit. Comput.}, volume = {20}, number = {2}, pages = {67--73}, year = {2006}, doi = {10.1007/s10877-005-9004-x}, url = {}}. [Crossref]
@article{62, title = {A regression method for {EEG}-based cross-dataset fatigue detection}, author = {D. Y. Yuan and J. W. Yue and X. F. Xiong and Y. B. Jiang and P. Zan and C. Y. Li}, journal = {Front. Physiol.}, volume = {14}, pages = {1196919}, year = {2023}, doi = {10.3389/fphys.2023.1196919}, url = {}}. [Crossref]
@article{63, title = {Comparison of {LSTM}- and {GRU}-Type {RNN} Networks for Attention and Meditation Prediction on Raw {EEG} Data from Low-Cost Headsets}, author = {F. Rivas and J. E. Sierra-Garcia and J. M. Camara}, journal = {Electronics}, volume = {14}, number = {4}, pages = {707}, year = {2025}, doi = {10.3390/electronics14040707}, url = {}}@inproceedings{64, title = {Generative Adversarial Networks}, author = {I. J. Goodfellow and J. Pouget-Abadie and M. Mirza and B. Xu and D. Warde-Farley and S. Ozair and A. Courville and Y. Bengio}, booktitle = {Advances in Neural Information Processing Systems}, year = {2014}, url = {https://proceedings.neurips.cc/paper_files/paper/2014/file/f033ed80deb0234979a61f95710dbe25-Paper.pdf}}. [Crossref]
@article{65, title = {Auto-Encoding Variational Bayes}, author = {D. P. Kingma and M. Welling}, journal = {arXiv}, year = {2022}, doi = {10.48550/arXiv.1312.6114}, url = {},}. [Crossref]
@article{66, title = {Generative adversarial networks in {EEG} analysis: An overview}, author = {A. G. Habashi and A. M. Azab and S. Eldawlatly and G. M. Aly}, journal = {J. Neuroeng. Rehabil.}, volume = {20}, number = {1}, pages = {40}, year = {2023}, doi = {10.1186/s12984-023-01169-w}, url = {}}. [Crossref]
@article{67, title = {Virtual Electroencephalogram Acquisition: {A} Review on Electroencephalogram Generative Methods}, author = {Z. S. You and Y. Z. Guo and X. L. Zhang and Y. F. Zhao}, journal = {Sensors}, volume = {25}, number = {10}, pages = {3178}, year = {2025}, doi = {10.3390/s25103178}, url = {}}. [Crossref]
@article{68, title = {{EEGGAN-Net}: Enhancing {EEG} signal classification through data augmentation}, author = {J. X. Song and Q. Zhai and C. Wang and J. Z. Liu}, journal = {Front. Hum. Neurosci.}, volume = {18}, pages = {1430086}, year = {2024}, doi = {10.3389/fnhum.2024.1430086}, url = {}}. [Crossref]
@article{69, title = {{ATGAN}: Attention-based temporal {GAN} for {EEG} data augmentation in personal identification}, author = {S. Zhang and L. Sun and X. Q. Mao and M. Zhao and Y. D. Hu}, journal = {EURASIP J. Adv. Signal Process.}, volume = {2024}, number = {1}, pages = {94}, year = {2024}, doi = {10.1186/s13634-024-01188-2}, url = {}}. [Crossref]
@article{70, title = {Synthetic {ALS-EEG} Data Augmentation for {ALS} Diagnosis Using Conditional {WGAN} with Weight Clipping}, author = {A. Mutlu and {\c{S}}. Do{\u{g}}an and T. Tuncer}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2506.16243}, url = {},}. [Crossref]
@article{71, title = {Domain-Specific Denoising Diffusion Probabilistic Models for Brain Dynamics}, author = {Y. Q. Duan and J. Z. Zhou and Z. Wang and Y.-C. Chang and Y.-K. Wang and C.-T. Lin}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.2305.04200}, url = {},}. [Crossref]
@article{72, title = {Survey on the research direction of {EEG}-based signal processing}, author = {C. Z. Sun and C. Z. Mou}, journal = {Front. Neurosci.}, volume = {17}, pages = {1203059}, year = {2023}, doi = {10.3389/fnins.2023.1203059}, url = {}}. [Crossref]
@article{73, title = {Cross-subject {EEG} emotion recognition using multi-source domain manifold feature selection}, author = {Q. S. She and X. S. Shi and F. Fang and Y. L. Ma and Y. C. Zhang}, journal = {Comput. Biol. Med.}, volume = {159}, pages = {106860}, year = {2023}, doi = {10.1016/j.compbiomed.2023.106860}, url = {}}. [Crossref]
@article{74, title = {Data augmentation for {EEG}-based emotion recognition using generative adversarial networks}, author = {G. C. Bao and B. Yan and L. Tong and J. Shu and L. Y. Wang and K. Yang and Y. Zeng}, journal = {Front. Comput. Neurosci.}, volume = {15}, pages = {723843}, year = {2021}, doi = {10.3389/fncom.2021.723843}, url = {}}. [Crossref]
@article{75, title = {Data augmentation strategies for {EEG}-based motor imagery decoding}, author = {O. George and R. Smith and P. Madiraju and N. Yahyasoltani and S. I. Ahamed}, journal = {Heliyon}, volume = {8}, number = {8}, year = {2022}, doi = {10.1016/j.heliyon.2022.e10240}, url = {}}. [Crossref]
@article{76, title = {Generating realistic neurophysiological time series with denoising diffusion probabilistic models}, author = {J. Vetter and J. H. Macke and R. Gao}, journal = {Patterns}, volume = {5}, number = {9}, pages = {101047}, year = {2024}, doi = {10.1016/j.patter.2024.101047}, url = {}}. [Crossref]
@article{77, title = {{EEGDfus}: {A} Conditional Diffusion Model for Fine-Grained {EEG} Denoising}, author = {X. Y. Huang and C. Li and A. P. Liu and R. B. Qian and X. Chen}, journal = {IEEE J. Biomed. Health Inform.}, volume = {29}, number = {4}, pages = {2557--2569}, year = {2025}, doi = {10.1109/JBHI.2024.3504717}, url = {}}. [Crossref]
@article{78, title = {Improving {EEG} Classification Through Randomly Reassembling Original and Generated Data with Transformer-based Diffusion Models}, author = {M. Z. Chen and Y. Y. Gui and Y. Q. Su and Y. S. Zhu and G. B. Luo and Y. C. Yang}, journal = {arXiv}, year = {2024}, doi = {10.48550/arXiv.2407.20253}, url = {},}. [Crossref]
@article{79, title = {Neurophysiological data augmentation for {EEG-fNIRS} multimodal features based on a denoising diffusion probabilistic model}, author = {L. Chen and Z. Yin and X. L. Gu and X. W. Zhang and X. S. Cao and C. J. Zhang and X. O. Li}, journal = {Comput. Methods Programs Biomed.}, volume = {261}, pages = {108594}, year = {2025}, doi = {10.1016/j.cmpb.2025.108594}, url = {}}. [Crossref]
@article{80, title = {Synthesizing {EEG} Signals from Event-Related Potential Paradigms with Conditional Diffusion Models}, author = {G. Klein and P. Guetschel and G. Silvestri and M. Tangermann}, journal = {arXiv}, year = {2024}, doi = {10.3217/978-3-99161-014-4-077}, url = {},}. [Crossref]
@article{81, title = {{EEGDM}: {EEG} Representation Learning via Generative Diffusion Model}, author = {J. H. Puah and S. K. Goh and Z. Zhang and Z. Ye and C. K. Chan and K. S. Lim and S. L. Fong and K. S. Woon and C. T. Guan}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2508.14086}, url = {},}. [Crossref]
@article{82, title = {{EEG} microstate sequences from different clustering algorithms are information-theoretically invariant}, author = {F. Von Wegner and P. Knaut and H. Laufs}, journal = {Front. Comput. Neurosci.}, volume = {12}, pages = {70}, year = {2018}, doi = {10.3389/fncom.2018.00070}, url = {}}. [Crossref]
@article{83, title = {A comprehensive study of auto-encoders for anomaly detection: Efficiency and trade-offs}, author = {A. A. Neloy and M. Turgeon}, journal = {Mach. Learn. Appl.}, volume = {17}, pages = {100572}, year = {2024}, doi = {10.1016/j.mlwa.2024.100572}, url = {}}. [Crossref]
@article{84, title = {{EEG} signal classification using {PCA}, {ICA}, {LDA} and support vector machines}, author = {A. Subasi and M. Ismail Gursoy}, journal = {Expert Syst. Appl.}, volume = {37}, number = {12}, pages = {8659--8666}, year = {2010}, doi = {10.1016/j.eswa.2010.06.065}, url = {}}@inproceedings{85, title = {{kNN} and {SVM} Classification for {EEG}: {A} Review}, author = {M. N. A. H. Sha'abani and N. Fuad and N. Jamal and M. F. Ismail}, booktitle = {InECCE2019 Lecture Notes in Electrical Engineering}, volume = {632}, pages = {555--565}, year = {2020}, address = {Kuantan, Malaysia}, doi = {10.1007/978-981-15-2317-5_47}, url = {https://doi.org/10.1007/978-981-15-2317-5_47}}. [Crossref]
@article{86, title = {Applying machine learning {EEG} signal classification to emotion‑related brain anticipatory activity}, author = {M. Bilucaglia and G. M. Duma and G. Mento and L. Semenzato and P. E. Tressoldi}, journal = {F1000Res.}, volume = {9}, pages = {173}, year = {2021}, doi = {10.12688/f1000research.22202.3}, url = {}}. [Crossref]
@article{87, title = {{EEG}-based emotion recognition using tunable {Q} wavelet transform and rotation forest ensemble classifier}, author = {A. Subasi and T. Tuncer and S. Dogan and D. Tanko and U. Sakoglu}, journal = {Biomed. Signal Process. Control}, volume = {68}, pages = {102648}, year = {2021}, doi = {10.1016/j.bspc.2021.102648}, url = {}}. [Crossref]
@article{88, title = {{MI-EEGNET}: {A} novel convolutional neural network for motor imagery classification}, author = {M. Riyad and M. Khalil and A. Adib}, journal = {J. Neurosci. Methods}, volume = {353}, pages = {109037}, year = {2021}, doi = {10.1016/j.jneumeth.2020.109037}, url = {}}. [Crossref]
@article{89, title = {{EEG}-based epilepsy detection using {CNN-SVM} and {DNN-SVM} with feature dimensionality reduction by {PCA}}, author = {Y. Berrich and Z. Guennoun}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {14313}, year = {2025}, doi = {10.1038/s41598-025-95831-z}, url = {}}. [Crossref]
@article{90, title = {{GCNs-Net}: {A} Graph Convolutional Neural Network Approach for Decoding Time-Resolved {EEG} Motor Imagery Signals}, author = {Y. M. Hou and S. Y. Jia and X. M. Lun and Z. Q. Hao and Y. Shi and Y. Li and R. Zeng and J. L. Lv}, journal = {IEEE Trans. Neural Netw. Learn. Syst.}, volume = {35}, number = {6}, pages = {7312--7323}, year = {2022}, doi = {10.1109/TNNLS.2022.3202569}, url = {}}. [Crossref]
@article{91, title = {{EEG} temporal--spatial transformer for person identification}, author = {Y. Du and Y. Xu and X. Wang and L. Liu and P. Ma}, journal = {Sci. Rep.}, volume = {12}, number = {1}, pages = {14378}, year = {2022}, doi = {10.1038/s41598-022-18502-3}, url = {}}@inproceedings{92, title = {An image is worth 16x16 words: Transformers for image recognition at scale}, author = {A. Dosovitskiy and L. Beyer and A. Kolesnikov and D. Weissenborn and X. Zhai and T. Unterthiner and M. Dehghani and M. Minderer and G. Heigold and S. Gelly and J. Uszkoreit and N. Houlsby}, booktitle = {International Conference on Learning Representations}, year = {2021}, url = {https://openreview.net/forum?id=YicbFdNTTy}}@inproceedings{93, title = {{EEG-Transformer}: Self-attention from Transformer Architecture for Decoding {EEG} of Imagined Speech}, author = {Y.-E. Lee and S.-H. Lee}, booktitle = {2022 10th International Winter Conference on Brain-Computer Interface (BCI)}, pages = {1--4}, year = {2022}, doi = {10.1109/BCI53720.2022.9735124}, url = {https://doi.org/10.1109/BCI53720.2022.9735124}}. [Crossref]
@article{94, title = {{MVGT}: {A} Multi-view Graph Transformer Based on Spatial Relations for {EEG} Emotion Recognition}, author = {Y. Cui and X. Liu and J. Liang and Y. Fu}, journal = {arXiv}, year = {2024}, doi = {10.48550/ARXIV.2407.03131}, url = {},}@inproceedings{95, title = {{EEG} Signal Denoising Using Beta-Variational Autoencoder}, author = {B. Mahaseni and N. M. Khan}, booktitle = {2024 46th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, pages = {1--4}, year = {2024}, address = {Orlando, USA}, doi = {10.1109/EMBC53108.2024.10782962}, url = {https://doi.org/10.1109/EMBC53108.2024.10782962}}. [Crossref]
@article{96, title = {{VAEEG}: Variational auto-encoder for extracting {EEG} representation}, author = {T. Zhao and Y. Cui and T. Y. Ji and J. J. Luo and W. L. Li and J. Jiang and Z. F. Gao and W. G. Hu and Y. X. Yan and Y. W. Jiang and B. Hong}, journal = {NeuroImage}, volume = {304}, pages = {120946}, year = {2024}, doi = {10.1016/j.neuroimage.2024.120946}, url = {}}. [Crossref]
@article{97, title = {A fast learning algorithm for deep belief nets}, author = {G. E. Hinton and S. Osindero and Y. W. Teh}, journal = {Neural Comput.}, volume = {18}, number = {7}, pages = {1527--1554}, year = {2006}, doi = {10.1162/neco.2006.18.7.1527}, url = {}}. [Crossref]
@article{98, title = {Deep Belief Networks for Electroencephalography: {A} Review of Recent Contributions and Future Outlooks}, author = {F. Movahedi and J. L. Coyle and E. Sejdic}, journal = {IEEE J. Biomed. Health Inform.}, volume = {22}, number = {3}, pages = {642--652}, year = {2017}, doi = {10.1109/JBHI.2017.2727218}, url = {}}. [Crossref]
@article{99, title = {The Graph Neural Network Model}, author = {F. Scarselli and M. Gori and A. C. Tsoi and M. Hagenbuchner and G. Monfardini}, journal = {IEEE Trans. Neural Netw.}, volume = {20}, number = {1}, pages = {61--80}, year = {2008}, doi = {10.1109/TNN.2008.2005605}, url = {}}. [Crossref]
@article{100, title = {Semi-supervised classification with graph convolutional networks}, author = {T. N. Kipf and M. Welling}, journal = {arXiv}, year = {2016}, doi = {10.48550/ARXIV.1609.02907}, url = {},}. [Crossref]
@article{101, title = {Graph attention networks}, author = {P. Veli{\v{c}}kovi{\'c} and G. Cucurull and A. Casanova and A. Romero and P. Li{\`o} and Y. Bengio}, journal = {arXiv}, year = {2017}, doi = {10.48550/ARXIV.1710.10903}, url = {},}. [Crossref]
@article{102, title = {Graph-generative neural network for {EEG}-based epileptic seizure detection via discovery of dynamic brain functional connectivity}, author = {Z. D. Li and K. Hwang and K. Q. Li and J. Wu and T. K. Ji}, journal = {Sci. Rep.}, volume = {12}, number = {1}, pages = {18998}, year = {2022}, doi = {10.1038/s41598-022-23656-1}, url = {}}. [Crossref]
@article{103, title = {Graph neural network-based {EEG} classification: {A} Survey}, author = {D. Klepl and M. Wu and F. He}, journal = {IEEE Trans. Neural Syst. Rehabil. Eng.}, volume = {32}, pages = {493--503}, year = {2024}, doi = {10.1109/TNSRE.2024.3355750}, url = {}}@inproceedings{104, title = {Contrastive self-supervised {EEG} representation learning for emotion classification}, author = {K. Hu and R. J. Dai and W. T. Chen and H. L. Yin and B. L. Lu and W. L. Zheng}, booktitle = {2024 46th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, pages = {1--4}, year = {2024}, doi = {10.1109/EMBC53108.2024.10781579}, url = {https://doi.org/10.1109/EMBC53108.2024.10781579}}. [Crossref]
@article{105, title = {Pre-training graph contrastive masked autoencoders are strong distillers for {EEG}}, author = {X. X. Wei and K. H. Zhao and Y. Jiao and H. Xie and L. F. He and Y. Zhang}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2411.19230}, url = {},}. [Crossref]
@article{106, title = {{ViT2EEG}: Leveraging hybrid pretrained vision transformers for {EEG} data}, author = {R. Q. Yang and E. Modesitt}, journal = {arXiv}, year = {2023}, doi = {10.48550/ARXIV.2308.00454}, url = {},}. [Crossref]
@article{107, title = {Machine learning-based epileptic seizure detection methods using wavelet and {EMD}-based decomposition techniques: {A} Review}, author = {R. G. Thangarajoo and M. B. I. Reaz and G. Srivastava and F. Haque and S. H. M. Ali and A. A. A. Bakar and M. A. S. Bhuiyan}, journal = {Sensors}, volume = {21}, number = {24}, pages = {8485}, year = {2021}, doi = {10.3390/s21248485}, url = {}}. [Crossref]
@article{108, title = {Effective Epileptic Seizure Detection with Hybrid Feature Selection and {SMOTE}-Based Data Balancing Using {SVM} Classifier}, author = {H. F. Atlam and G. E. Aderibigbe and M. S. Nadeem}, journal = {Appl. Sci.}, volume = {15}, number = {9}, pages = {4690}, year = {2025}, doi = {10.3390/app15094690}, url = {}}. [Crossref]
@article{109, title = {Real-time epilepsy seizure detection based on {EEG} using tunable-{Q} wavelet transform and convolutional neural network}, author = {M. Shen and P. Wen and B. Song and Y. Li}, journal = {Biomed. Signal Process. Control}, volume = {82}, pages = {104566}, year = {2023}, doi = {10.1016/j.bspc.2022.104566}, url = {}}. [Crossref]
@article{110, title = {A hybrid optimization-enhanced {1D-ResCNN} framework for epileptic spike detection in scalp {EEG} signals}, author = {P. Kumar and P. K. Upadhyay}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {5707}, year = {2025}, doi = {10.1038/s41598-025-90164-3}, url = {}}. [Crossref]
@article{111, title = {Current trends, challenges, and future research directions of hybrid and deep learning techniques for motor imagery brain-computer interface}, author = {E. Lionakis and K. Karampidis and G. Papadourakis}, journal = {Multimodal Technol. Interact.}, volume = {7}, number = {10}, pages = {95}, year = {2023}, doi = {10.3390/mti7100095}, url = {}}. [Crossref]
@article{112, title = {{CLTNet}: {A} hybrid deep learning model for motor imagery classification}, author = {H. Gu and T. W. Chen and X. Ma and M. Y. Zhang and Y. Sun and J. Zhao}, journal = {Brain Sci.}, volume = {15}, number = {2}, pages = {124}, year = {2025}, doi = {10.3390/brainsci15020124}, url = {}}. [Crossref]
@article{113, title = {Multi-scale convolutional transformer network for motor imagery brain-computer interface}, author = {W. Zhao and B. C. Zhang and H. F. Zhou and D. Z. Wei and C. X. Huang and Q. Lan}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {12935}, year = {2025}, doi = {10.1038/s41598-025-96611-5}, url = {}}. [Crossref]
@article{114, title = {Emotion classification based on transformer and {CNN} for {EEG} spatial-temporal feature learning}, author = {X. Z. Yao and T. W. Li and P. Ding and F. Wang and L. Zhao and A. M. Gong and W. Y. Nan and Y. F. Fu}, journal = {Brain Sci.}, volume = {14}, number = {3}, pages = {268}, year = {2024}, doi = {10.3390/brainsci14030268}, url = {}}. [Crossref]
@article{115, title = {Epileptic seizures detection using deep learning techniques: {A} review}, author = {A. Shoeibi and M. Khodatars and N. Ghassemi and M. Jafari and P. Moridian and R. Alizadehsani and M. Panahiazar and F. Khozeimeh and A. Zare and H. Hosseini-Nejad and A. Khosravi and A. F. Atiya and D. Aminshahidi and S. Hussain and M. Rouhani and S. Nahavandi and U. R. Acharya}, journal = {Int. J. Environ. Res. Public Health}, volume = {18}, number = {11}, year = {2021}, doi = {10.3390/ijerph18115780}, url = {}}. [Crossref]
@article{116, title = {Accuracy of machine learning in detecting pediatric epileptic seizures: {S}ystematic review and meta-analysis}, author = {Z. Zou and B. Chen and D. Q. Xiao and F. J. Tang and X. H. Li}, journal = {J. Med. Internet Res.}, volume = {26}, pages = {e55986}, year = {2024}, doi = {10.2196/55986}, url = {}}@misc{117, title = {{CHB-MIT} scalp {EEG} database}, author = {J. Guttag}, year = {2010}, doi = {10.13026/C2K01R}, url = {https://doi.org/10.13026/C2K01R}, note = {PhysioNet}}. [Crossref]
@article{118, title = {Deep‐learning‐based seizure detection and prediction from electroencephalography signals}, author = {F. E. Ibrahim and H. M. Emara and W. El‐Shafai and M. Elwekeil and M. Rihan and I. M. Eldokany and E. T. Taha and A. S. El-Fishawy and E. -S. M. El-Rabaie and E. Abdellatef and F. E. Abd El‐Samie}, journal = {Int. J. Numer. Methods Biomed. Eng.}, volume = {38}, number = {6}, pages = {e3573}, year = {2022}, doi = {10.1002/cnm.3573}, url = {}}. [Crossref]
@article{119, title = {An explainable {EEG} epilepsy detection model using friend pattern}, author = {T. Tuncer and S. Dogan}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {16951}, year = {2025}, doi = {10.1038/s41598-025-01747-z}, url = {}}. [Crossref]
@article{120, title = {A dataset of neonatal {EEG} recordings with seizure annotations}, author = {N. J. Stevenson and K. Tapani and L. Lauronen and S. Vanhatalo}, journal = {Sci. Data}, volume = {6}, number = {1}, pages = {190039}, year = {2019}, doi = {10.1038/sdata.2019.39}, url = {}}. [Crossref]
@article{121,title={{TATP}at based explainable {EEG} model for neonatal seizure detection},author={Tuncer, T. and Dogan, S. and Tasci, I. and Tasci, B. and Hajiyeva, R.},journal={Sci. Rep.},volume={14},number={1},pages={26688},year={2024},doi = {10.1038/s41598-024-77609-x},url = { },}. [Crossref]
@article{122,title={An end-to-end deep learning approach for epileptic seizure prediction},author={Xu, Y. K. and Yang, J. and Zhao, S. Q. and Wu, H. M. and Sawan, M.},journal={arXiv},year={2021},doi = {10.48550/ARXIV.2108.07453},url = {https://arxiv.org/abs/2108.07453 },}.
@article{123,title={{EEG}-Based Epileptic Seizure Prediction Using Temporal Multi-Channel Transformers},author={Godoy, R. V. and Reis, T. J. and Polegato, P. H. and Lahr, G. J. and Saute, R. L. and Nakano, F. N. and Machado, H. R. and Sakamoto, A. C. and Becker, M. and Caurin, G. A.},journal={arXiv},year={2022},doi = {10.48550/ARXIV.2209.11172},url = {https://arxiv.org/abs/2209.11172 },}.
@article{124,title={Multi-channel vision transformer for epileptic seizure prediction},author={Hussein, R. and Lee, S. and Ward, R.},journal={Biomedicines},volume={10},number={7},pages={1551},year={2022},doi = {10.3390/biomedicines10071551},url = { },}. [Crossref]
@article{125,title={Preictal period optimization for deep learning-based epileptic seizure prediction},author={Koutsouvelis, P. and Chybowski, B. and Gonzalez-Sulser, A. and Abdullateef, S. and Escudero, J.},journal={J. Neural Eng.},volume={21},number={6},pages={066040},year={2024},doi = {10.1088/1741-2552/ad9ad0},url = { },}. [Crossref]
@article{126,title={Assessing the potential of {EEG} in early detection of Alzheimer’s disease: {A} systematic comprehensive review (2000–2023)},author={Ehteshamzad, S.},journal={J. Alzheimer's Dis. Rep.},volume={8},number={1},pages={1153-1169},year={2024},doi = {10.3233/ADR-230159},url = { },}. [Crossref]
@article{127,title={Unlocking the potential of {EEG} in Alzheimer’s disease research: {C}urrent status and pathways to precision detection},author={Akbar, F. and Taj, I. and Usman, S. M. and Imran, A. S. and Khalid, S. and Ihsan, I. and Ali, A. and Yasin, A.},journal={Brain Res. Bull.},volume={223},pages={111281},year={2025},doi = {10.1016/j.brainresbull.2025.111281},url = { },}. [Crossref]
@article{128,title={Data-driven retrieval of population-level {EEG} features and their role in neurodegenerative diseases},author={Li, W. and Varatharajah, Y. and Dicks, E. and Barnard, L. and Brinkmann, B. H. and Crepeau, D. and Worrel, G. and Fan, W. and Kremers, W. and Boeve, B. and Botha, H. and Gogineni, V. and Jones, D. T.},journal={Brain Commun.},volume={6},number={4},pages={fcae227},year={2024},doi = {10.1093/braincomms/fcae227},url = { },}. [Crossref]
@article{129,title={Use of {EEG} to Diagnose {ADHD}},author={Lenartowicz, A. and Loo, S. K.},journal={Curr. Psychiatry Rep.},volume={16},number={11},pages={498},year={2014},doi = {10.1007/s11920-014-0498-0},url = { },}. [Crossref]
@article{130,title={Machine learning in attention-deficit/hyperactivity disorder: {N}ew approaches toward understanding the neural mechanisms},author={Cao, M. and Martin, E. and Li, X.},journal={Transl. Psychiatry},volume={13},number={1},pages={236},year={2023},doi = {10.1038/s41398-023-02536-w},url = { },}. [Crossref]
@article{131,title={Monitoring burst suppression in critically ill patients: {M}ulti-centric evaluation of a novel method},author={Fürbass, F. and Herta, J. and Koren, J. and Westover, M. B. and Hartmann, M. M. and Gruber, A. and Baumgartner, C. and Kluge, T.},journal={Clin. Neurophysiol.},volume={127},number={4},pages={2038-2046},year={2016},doi = {10.1016/j.clinph.2016.02.001},url = { },}. [Crossref]
@article{132,title={Research and application of deep learning-based sleep staging: {D}ata, modeling, validation, and clinical practice},author={Yue, H. J. and Chen, Z. Q. and Guo, W. B. and Sun, L. and Dai, Y. D. and Wang, Y. M. and Ma, W. J. and Fan, X. M. and Wen, W. P. and Lei, W. B.},journal={Sleep Med. Rev.},volume={74},pages={101897},year={2024},doi = {10.1016/j.smrv.2024.101897},url = { },}. [Crossref]
@article{133,title={The sleep heart health study: {D}esign, rationale, and methods},author={Quan, S. F. and Howard, B. V. and Iber, C. and Kiley, J. P. and Nieto, F. J. and O’Connor, G. T. and Rapoport, D. M. and Redline, S. and Robbins, J. and Samet, J. M. and Wahl, P. W.},journal={Sleep},volume={20},number={12},pages={1077-1085},year={1997},doi = {10.1093/sleep/20.12.1077},url = { },}. [Crossref]
@article{134,title={Analysis of a sleep-dependent neuronal feedback loop: {T}he slow-wave microcontinuity of the {EEG}},author={Kemp, B. and Zwinderman, A. H. and Tuk, B. and Kamphuisen, H. A. C. and Oberye, J. J. L.},journal={IEEE Trans. Biomed. Eng.},volume={47},number={9},pages={1185-1194},year={2000},doi = {10.1109/10.867928},url = { },}. [Crossref]
@article{135,title={The {S}leep-{EDF} {Database} [Expanded]},author={Kemp, B. and Zwinderman, A. and Tuk, B. and Kamphuisen, H. and Oberye, J.},journal={PhysioNet},year={2013},doi = {10.13026/C2X676},url = {https://physionet.org/content/sleep-edf/1.0.0/ },}.
@article{136,title={The National Sleep Research Resource: {T}owards a sleep data commons},author={Zhang, G. Q. and Cui, L. C. and Mueller, R. and Tao, S. Q. and Kim, M. and Rueschman, M. and Mariani, S. and Mobley, D. and Redline, S.},journal={J. Am. Med. Inform. Assoc.},volume={25},number={10},pages={1351-1358},year={2018},doi = {10.1093/jamia/ocy064},url = { },}. [Crossref]
@article{137,title={ZleepAnlystNet: A novel deep learning model for automatic sleep stage scoring based on single-channel raw {EEG} data using separating training},author={Jirakittayakorn, N. and Wongsawat, Y. and Mitrirattanakul, S.},journal={Sci. Rep.},volume={14},number={1},pages={9859},year={2024},doi = {10.1038/s41598-024-60796-y},url = { },}. [Crossref]
@article{138,title={An interpretable and efficient sleep staging algorithm: {D}etectsleep{N}et},author={Guo, S.},journal={arXiv},year={2024},doi = {10.48550/arXiv.2406.19246},url = {https://arxiv.org/abs/2406.19246 },}.
@article{139,title={Towards interpretable sleep stage classification using cross-modal transformers},author={Pradeepkumar, J. and Anandakumar, M. and Kugathasan, V. and Suntharalingham, D. and Kappel, S. L. and De Silva, A. C. and Edussooriya, C. U.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},year={2024},doi = {10.1109/TNSRE.2024.3438610},url = { },}. [Crossref]
@article{140,title={A multi constrained transformer-{B}i{LSTM} guided network for automated sleep stage classification from single-channel {EEG}},author={Sadik, F. and Raihan, M. T. and Rashid, R. B. and Rahman, M. and Abdal, S. M. and Ahmed, S. and Mahmud, T. I.},journal={arXiv},year={2023},doi = {10.48550/ARXIV.2309.10542},url = {},}@inproceedings{141,title={Siamese sleep transformer for robust sleep stage scoring with self-knowledge distillation and selective batch sampling},author={Kwak, H.-G. and Kweon, Y.-S. and Shin, G.-H.},booktitle={2023 11th International Winter Conference on Brain-Computer Interface (BCI)},pages={1-5},year={2023},doi={10.1109/BCI57258.2023.10078532},url={https://doi.org/10.1109/BCI57258.2023.10078532}}. [Crossref]
@article{142,title={Hybrid deep learning model based on transformer encoder for sleep stages classification},author={Al-akkam, O. A. A.-S. M.},journal={Bilad Alrafidain J. Eng. Sci. Technol.},volume={4},number={1},pages={113-126},year={2025},doi={10.56990/bajest/2025.040110},url={}}. [Crossref]
@article{143,title={Real-time segmentation of burst suppression patterns in critical care {EEG} monitoring},author={Westover, M. B. and Shafi, M. M. and Ching, S. and Chemali, J. J. and Purdon, P. L. and Cash, S. S. and Brown, E. N.},journal={J. Neurosci. Methods},volume={219},number={1},pages={131-141},year={2013},doi={10.1016/j.jneumeth.2013.07.003},url={}}. [Crossref]
@article{144,title={Electroencephalogram based detection of deep sedation in {ICU} patients using atomic decomposition},author={Nagaraj, S. B. and McClain, L. M. and Boyle, E. J. and Zhou, D. W. and Ramaswamy, S. M. and Biswal, S. and Akeju, O. and Purdon, P. L. and Westover, M. B.},journal={IEEE Trans. Biomed. Eng.},volume={65},number={12},pages={2684-2691},year={2018},doi={10.1109/TBME.2018.2813265},url={}}. [Crossref]
@article{145,title={One-dimensional convolutional neural networks combined with channel selection strategy for seizure prediction using long-term intracranial {EEG}},author={Wang, X. S. and Zhang, G. H. and Wang, Y. and Wang, L. and Yang, Z. H. and Liang, F. Y. and Cong, F. Y.},journal={Int. J. Neur. Syst.},volume={32},number={02},pages={2150048},year={2022},doi={10.1142/S0129065721500489},url={}}. [Crossref]
@article{146,title={Automated tracking of level of consciousness and delirium in critical illness using deep learning},author={Sun, H. Q. and Kimchi, E. and Akeju, O. and Nagaraj, S. B. and McClain, L. M. and Zhou, D. W. and Boyle, E. and Zheng, W. L. and Ge, W. D. and Westover, M. B.},journal={NPJ Digit. Med.},volume={2},number={1},pages={89},year={2019},doi={10.1038/s41746-019-0167-0},url={}}. [Crossref]
@article{147,title={Etiology of Burst Suppression {EEG} Patterns},author={Shanker, A. and Abel, J. H. and Schamberg, G. and Brown, E. N.},journal={Front. Psychol.},volume={12},pages={673529},year={2021},doi={10.3389/fpsyg.2021.673529},url={}}. [Crossref]
@article{148,title={{DEAP}: {A} database for emotion analysis; Using physiological signals},author={Koelstra, S. and Muhl, C. and Soleymani, M. and Lee, J. -S. and Yazdani, A. and Ebrahimi, T. and Pun, T. and Nijholt, A. and Patras, I.},journal={IEEE Trans. Affective Comput.},volume={3},number={1},pages={18-31},year={2012},doi={10.1109/T-AFFC.2011.15},url={}}. [Crossref]
@article{149,title={Investigating critical frequency bands and channels for {EEG}-based emotion recognition with deep neural networks},author={Zheng, Wei-Long and Lu, Bao-Liang},journal={IEEE Trans. Auton. Mental Dev.},volume={7},number={3},pages={162-175},year={2015},doi={10.1109/TAMD.2015.2431497},url={}}. [Crossref]
@article{150,title={Exploration of effective electroencephalography features for the recognition of different valence emotions},author={Yang, K. and Tong, L. and Zeng, Y. and Lu, R. and Zhang, R. and Gao, Y. and Yan, B.},journal={Front. Neurosci.},volume={16},pages={1010951},year={2022},doi={10.3389/fnins.2022.1010951},url={}}. [Crossref]
@article{151,title={{AMDET}: {A}ttention based multiple dimensions {EEG} transformer for emotion recognition},author={Xu, Y. L. and Du, Y. and Li, L. and Lai, H. H. and Zou, J. and Zhou, T. Y. and Xiao, L. S. and Liu, L. and Ma, P. C.},journal={IEEE Trans. Affect. Comput.},volume={15},number={3},pages={1067-1077},year={2023},doi={10.1109/TAFFC.2023.3318321},url={}}. [Crossref]
@article{152,title={{TPRO-NET}: {A}n {EEG}-based emotion recognition method reflecting subtle changes in emotion},author={Zhang, X. Y. and Cheng, X. K. and Liu, H.},journal={Sci. Rep.},volume={14},number={1},pages={13491},year={2024},doi={10.1038/s41598-024-62990-4},url={}}. [Crossref]
@article{153,title={Cross-Subject {EEG} Emotion Recognition With Self-Organized Graph Neural Network},author={Li, J. C. and Li, S. Q. and Li, J. H. and Pan, J. H. and Wang, F.},journal={Front. Neurosci.},volume={15},pages={611653},year={2021},doi={10.3389/fnins.2021.611653},url={}}. [Crossref]
@article{154,title={{DAGAM}: {A} domain adversarial graph attention model for subject-independent EEG-based emotion recognition},author={Xu, T. and Dang, W. and Wang, J. B. and Zhou, Y.},journal={J. Neural Eng.},volume={20},number={1},pages={016022},year={2023},doi={10.1088/1741-2552/acae06},url={}}. [Crossref]
@article{155,title={{EEG} emotion recognition based on federated learning framework},author={Xu, C. and Liu, H. and Qi, W.},journal={Electronics},volume={11},number={20},pages={3316},year={2022},doi={10.3390/electronics11203316},url={}}. [Crossref]
@article{156,title={Semi-supervised dual-stream self-attentive adversarial graph contrastive learning for cross-subject eeg-based emotion recognition},author={Ye, W. S. and Zhang, Z. G. and Teng, F. and Zhang, M. and Wang, J. H. and Ni, D. and Li, F. L. and Xu, P. and Liang, Z.},journal={IEEE Trans. Affect. Comput.},year={2024},doi={10.1109/TAFFC.2024.3433470},url={}}@inproceedings{157,title={Adaptive federated learning for {EEG} emotion recognition},author={Chan, C. and Zheng, Q. Q. and Xu, C. J. and Wang, Q. and Heng, P. A.},booktitle={2024 International Joint Conference on Neural Networks (IJCNN)},pages={1-8},year={2024},organization={IEEE},doi={10.1109/IJCNN60899.2024.10650004},url={https://doi.org/10.1109/IJCNN60899.2024.10650004}}. [Crossref]
@article{158,title={{FBCN}et: {A} Multi-view Convolutional Neural Network for Brain-Computer Interface},author={Mane, R. and Chew, E. and Chua, K. and Ang, K. K. and Robinson, N. and Vinod, A. P. and Lee, S. -W. and Guan, C. T.},journal={arXiv},year={2021},doi={10.48550/ARXIV.2104.01233},url={}}. [Crossref]
@article{159,title={A robust multi-branch multi-attention-mechanism {EEGN}et for motor imagery {BCI} decoding},author={Deng, H. D. and Li, M. F. and Li, J. D. and Li, M. M. and Guo, M. M. and Xu, G. Z.},journal={Journal of Neuroscience Methods},volume={405},pages={110108},year={2024},doi={10.1016/j.jneumeth.2024.110108},url={}}. [Crossref]
@article{160,title={{CTN}et: {A} convolutional transformer network for {EEG}-based motor imagery classification},author={Zhao, W. and Jiang, X. L. and Zhang, B. C. and Xiao, S. X. and Weng, S. J.},journal={Sci. Rep.},volume={14},number={1},pages={20237},year={2024},doi={10.1038/s41598-024-71118-7},url={}}. [Crossref]
@article{161,title={{CCLN}et: {M}ulticlass motor imagery {EEG} decoding through extended common spatial patterns and CNN-LSTM hybrid network},author={Singh, K. and Singha, N. and Bhalaik, S.},journal={J. Supercomput.},volume={81},number={7},pages={805},year={2025},doi={10.1007/s11227-025-07319-2},url={}}. [Crossref]
@article{162,title={An efficient deep learning framework for {P}300 evoked related potential detection in {EEG} signal},author={Havaei, P. and Zekri, M. and Mahmoudzadeh, E. and Rabbani, H.},journal={Comput. Methods Programs Biomed.},volume={229},pages={107324},year={2023},doi={10.1016/j.cmpb.2022.107324},url={}}@inproceedings{163,title={{LSTM}-based classification of multiflicker-{SSVEP} in single channel dry-{EEG} for low-power/high-accuracy quadcopter-BMI system},author={Kobayashi, N. and Ishizuka, K.},booktitle={2019 IEEE International Conference on Systems, Man and Cybernetics (SMC)},pages={2160-2165},year={2019},doi={10.1109/SMC.2019.8914015},url={https://doi.org/10.1109/SMC.2019.8914015}}. [Crossref]
@article{164,title={Transfer learning and {S}pec{A}ugment applied to {SSVEP} based {BCI} classification},author={Bassi, P. R. A. S. and Rampazzo, W. and Attux, R.},journal={Biomed. Signal Process. Control},volume={67},pages={102542},year={2021},doi={10.1016/j.bspc.2021.102542},url={}}. [Crossref]
@article{165,title={Silent {EEG}-speech recognition using convolutional and recurrent neural network with 85\% accuracy of 9 words classification},author={Vorontsova, D. and Menshikov, I. and Zubov, A. and Orlov, K. and Rikunov, P. and Zvereva, E. and Flitman, L. and Lanikin, A. and Sokolova, A. and Markov, S. and Bernadotte, A.},journal={Sensors},volume={21},number={20},pages={6744},year={2021},doi={10.3390/s21206744},url={}}. [Crossref]
@article{166,title={Status of deep learning for {EEG}-based brain–computer interface applications},author={Hossain, K. M. and Islam, Md. A. and Hossain, S. and Nijholt, A. and Ahad, M. A. R.},journal={Front. Comput. Neurosci.},volume={16},pages={1006763},year={2023},doi={10.3389/fncom.2022.1006763},url={}}. [Crossref]
@article{167,title={{TFAC}-{N}et: {A} temporal-frequential attentional convolutional network for driver drowsiness recognition with single-channel {EEG}},author={Gong, P. and Wang, Y. and Zhou, X. and Wen, X. and Zhang, D.},journal={IEEE Trans. Intell. Transport. Syst.},volume={25},number={7},pages={7004-7016},year={2024},doi={10.1109/TITS.2023.3347075},url={}}@inproceedings{168,title={Neural networks meet neural activity: {U}tilizing {EEG} for mental workload estimation},author={Siddhad, G. and Roy, P. P. and Kim, B. G.},booktitle={International Conference on Pattern Recognition},pages={325-339},year={2025},address={Cham, Germany},doi={10.1007/978-3-031-78195-7_22},url={https://doi.org/10.1007/978-3-031-78195-7_22}}. [Crossref]
@article{169,title={{EEG}-{C}og{N}et: {A} deep learning framework for cognitive state assessment using {EEG} brain connectivity},author={Panwar, N. and Pandey, V. and Roy, P. P.},journal={Biomed. Signal Process. Control},volume={98},pages={106770},year={2024},doi={10.1016/j.bspc.2024.106770},url={}}. [Crossref]
@article{170,title={Optimized driver fatigue detection method using multimodal neural networks},author={Cao, S. L. and Feng, P. H. and Kang, W. and Chen, Z. Y. and Wang, B.},journal={Sci. Rep.},volume={15},number={1},pages={12240},year={2025},doi={10.1038/s41598-025-86709-1},url={}}. [Crossref]
@article{171,title={Deep{S}leep{N}et: {A} Model for Automatic Sleep Stage Scoring Based on Raw Single-Channel {EEG}},author={Supratak, A. and Dong, H. and Wu, C. and Guo, Y.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},volume={25},number={11},pages={1998-2008},year={2017},doi={10.1109/TNSRE.2017.2721116},url={}}. [Crossref]
@article{172,title={Sleep{T}ransformer: {A}utomatic sleep staging with interpretability and uncertainty quantification},author={Phan, H. and Mikkelsen, K. and Chen, O. Y. and Koch, A. M. and Mertins, P. and De Vos, M.},journal={IEEE Trans. Biomed. Eng.},volume={69},number={8},pages={2456-2467},year={2022},doi={10.1109/TBME.2022.3147187},url={}}. [Crossref]
@article{173,title={Flex{S}leep{T}ransformer: {A} transformer-based sleep staging model with flexible input channel configurations},author={Guo, Y. and Nowakowski, M. and Dai, W.},journal={Sci. Rep.},volume={14},number={1},pages={26312},year={2024},doi={10.1038/s41598-024-76197-0},url={}}. [Crossref]
@article{174,title={Care{S}leep{N}et: {A} hybrid deep learning network for automatic sleep staging},author={Wang, J. Q. and Zhao, S. and Jiang, H. T. and Zhou, Y. X. and Yu, Z. H. and Li, T. and Li, S. J. and Pan, G.},journal={IEEE J. Biomed. Health Inform.},volume={28},number={12},pages={7392-7405},year={2024},doi={10.1109/JBHI.2024.3426939},url={}}. [Crossref]
@article{175,title={An improved feature extraction algorithms of {EEG} signals based on motor imagery brain-computer interface},author={Geng, X. Z. and Li, D. Z. and Chen, H. L. and Yu, P. and Yan, H. and Yue, M. Z.},journal={Alexandria Eng. J.},volume={61},number={6},pages={4807-4820},year={2022},doi={10.1016/j.aej.2021.10.034},url={}}. [Crossref]
@article{176,title={The {BCI} competition {III}: {V}alidating alternative approaches to actual {BCI} problems},author={Blankertz, B. and Muller, K. R. and Krusienski, D. J. and Schalk, G. and Wolpaw, J. R. and Schlogl, A. and Pfurtscheller, G. and Millan, Jd.R. and Schroder, M. and Birbaumer, N.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},volume={14},number={2},pages={153-159},year={2006},doi={10.1109/TNSRE.2006.875642},url={}}. [Crossref]
@article{177,title={The non-invasive Berlin Brain–Computer Interface: {F}ast acquisition of effective performance in untrained subjects},author={Blankertz, B. and Dornhege, G. and Krauledat, M. and Muller, K.-R. and Curio, G.},journal={NeuroImage},volume={37},number={2},pages={539-550},year={2007},doi={10.1016/j.neuroimage.2007.01.051},url={}}@misc{178,title={{BCI} {C}ompetition 2008–{G}raz data set {A}},author={Brunner, C. and Leeb, R. and Muller-Putz, G. and Schlögl, A. and Pfurtscheller, G.},howpublished={Institute for knowledge discovery (laboratory of brain-computer interfaces), Graz University of Technology},volume={16},number={1-6},pages={34},year={2008},url={https://lampz.tugraz.at/~bci/database/001-2014/description.pdf}}@misc{179,title={{BCI} {C}ompetition 2008–{G}raz data set {B}},author={Leeb, R. and Brunner, C. and Muller-Putz, G. and Schlögl, A. and Pfurtscheller, G.},howpublished={Graz University of Technology, Austria},volume={16},number={1-6},year={2008},url={https://lampx.tugraz.at/~bci/database/004-2014/description.pdf}}@misc{180,title={Prediction of finger flexion: 4th brain-computer interface data competition},author={Miller, K. J. and Schalk, G.},howpublished={BCI Competition IV},volume={1},pages={1-2},year={2008}}. [Crossref]
@article{181,title={{BNCI} {H}orizon 2020: {T}owards a roadmap for the {BCI} community},author={Brunner, C. and Birbaumer, N. and Blankertz, B. and Guger, C. and Kübler, A. and Mattia, D. and Millán, Jd. R. and Miralles, F. and Nijholt, A. and Opisso, E. and Ramsey, N. and Salomon, P. and Müller-Putz, G. R.},journal={Brain-Comput. Interfaces},volume={2},number={1},pages={1-10},year={2015},doi={10.1080/2326263X.2015.1008956},url={}}. [Crossref]
@article{182,title={The {T}emple {U}niversity {H}ospital {EEG} data corpus},author={Obeid, I. and Picone, J.},journal={Front. Neurosci.},volume={10},pages={196},year={2016},doi={10.3389/fnins.2016.00196},url={}}. [Crossref]
@article{183,title={The {T}emple {U}niversity {H}ospital seizure detection corpus},author={Shah, V. and von Weltin, E. and Lopez, S. and McHugh, J. R. and Veloso, L. and Golmohammadi, M. and Obeid, L. and Picone, J.},journal={Front. Neuroinform.},volume={12},pages={83},year={2018},doi={10.3389/fninf.2018.00083},url={}}. [Crossref]
@article{184,title={Indications of nonlinear deterministic and finite-dimensional structures in time series of brain electrical activity: {D}ependence on recording region and brain state},author={Andrzejak, R. G. and Lehnertz, K. and Mormann, F. and Rieke, C. and David, P. and Elger, C. E.},journal={Phys. Rev. E},volume={64},number={6},pages={061907},year={2001},doi={10.1103/PhysRevE.64.061907},url={}}@inproceedings{185,title={Differential entropy feature for {EEG}-based emotion classification},author={Duan, R.-N. and Zhu, J.-Y. and Lu, B.-L.},booktitle={2013 6th International IEEE/EMBS Conference on Neural Engineering (NER)},pages={81-84},year={2013},doi={10.1109/NER.2013.6695876},url={https://doi.org/10.1109/NER.2013.6695876}}. [Crossref]
@article{186,title={{DREAMER}: {A} Database for Emotion Recognition Through {EEG} and {ECG} Signals From Wireless Low-cost Off-the-Shelf Devices},author={Katsigiannis, S. and Ramzan, N.},journal={IEEE J. Biomed. Health Inform.},volume={22},number={1},pages={98-107},year={2018},doi={10.1109/JBHI.2017.2688239},url={}}. [Crossref]
@article{187,title={{BCI2000}: {A} General-Purpose Brain-Computer Interface {(BCI)} System},author={Schalk, G. and McFarland, D. J. and Hinterberger, T. and Birbaumer, N. and Wolpaw, J. R.},journal={IEEE Trans. Biomed. Eng.},volume={51},number={6},pages={1034-1043},year={2004},doi={10.1109/TBME.2004.827072},url={}}. [Crossref]
@article{188,title={{ERP CORE}: {A}n open resource for human event-related potential research},author={Kappenman, E. S. and Farrens, J. L. and Zhang, W. and Stewart, A. X. and Luck, S. J.},journal={NeuroImage},volume={225},pages={117465},year={2021},doi={10.1016/j.neuroimage.2020.117465},url={}}. [Crossref]
@article{189,title={The {O}pen{N}euro resource for sharing of neuroscience data},author={Markiewicz, C. J. and Gorgolewski, K. J. and Feingold, F. and Blair, R. and Halchenko, Y. O. and Miller, E. and Hardcastle, N. and Wexler, J. and Esteban, O. and Goncavles, M. and Jwa, A. and Poldrack, R.},journal={eLife},volume={10},pages={e71774},year={2021},doi={10.7554/eLife.71774},url={}}. [Crossref]
@article{190,title={{EEG-BIDS}, an extension to the brain imaging data structure for electroencephalography},author={Pernet, C. R. and Appelhoff, S. and Gorgolewski, K. J. and Flandin, G. and Phillips, C. and Delorme, A. and Oostenveld, R.},journal={Sci. Data},volume={6},number={1},pages={103},year={2019},doi={10.1038/s41597-019-0104-8},url={}}. [Crossref]
@article{191,title={NEMAR: {A}n open access data, tools, and compute resource operating on {N}euro{E}lectro{M}agnetic data},author={Delorme, A. and Truong, D. and Youn, C. and Sivagnanam, S. and Stirm, C. and Yoshimoto, K. and Poldrack, R. A. and Majumdar, A. and Makeig, S.},journal={Database},volume={2022},pages={baac096},year={2022},doi={10.1093/database/baac096},url={}}. [Crossref]
@article{192,title={{HBN-EEG}: The {FAIR} implementation of the {H}ealthy {B}rain {N}etwork ({HBN}) electroencephalography dataset},author={Shirazi, S. Y. and Franco, A. and Scopel Hoffmann, M. and Esper, N. B. and Truong, D. and Delorme, A. and Milham, M. P. and Makeig, S.},journal={bioRxiv},year={2024},doi={10.1101/2024.10.03.615261},url={}}. [Crossref]
@article{193,title={SeizeI{T}2: {W}earable dataset of patients with focal epilepsy},author={Bhagubai, M. and Chatzichristos, C. and Swinnen, L. and Macea, J. and Zhang, J. and Lagae, L. and Jansen, K. and Schulze-Bonhage, A. and Sales, F. and Mahler, B. and Weber, Y. and Paesschen, W. V. and De Vos, M.},journal={Sci. Data},volume={12},number={1},pages={1228},year={2025},doi={10.1038/s41597-025-05580-x},url={}}. [Crossref]
@article{194,title={Open access dataset integrating {EEG} and {fNIRS} during Stroop tasks},author={Chen, Z. M. and Gao, C. Y.. and Li, T. and Ji, X. and Liu, S. Y. and Xiao, M.},journal={Sci. Data},volume={10},number={1},pages={618},year={2023},doi={10.1038/s41597-023-02524-1},url={}}. [Crossref]
@article{195,title={A simultaneous {EEG}-{fNIRS} dataset of the visual cognitive motivation study in healthy adults},author={Phukhachee, T. and Angsuwatanakul, T. and Iramina, K. and Kaewkamnerdpong, B.},journal={Data Brief},volume={53},pages={110260},year={2024},doi={10.1016/j.dib.2024.110260},url={}}. [Crossref]
@article{196,title={A large-scale {MEG} and {EEG} dataset for object recognition in naturalistic scenes},author={Zhang, G. H. and Zhou, M. and Zhen, S. Y. and Tang, S. H. and Li, Z. and Zhen, Z. L.},journal={Sci. Data},volume={12},number={1},pages={857},year={2025},doi={10.1038/s41597-025-05174-7},url={}}. [Crossref]
@article{197,title={Removing electroencephalographic artifacts by blind source separation},author={Jung, T. P. and Makeig, S. and Humphries, C. and Lee, T. W. and McKeown, M. J. and Iragui, V. and Sejnowski, T. J.},journal={Psychophysiology},volume={37},number={2},pages={163-178},year={2000},doi={10.1111/1469-8986.3720163},url={}}. [Crossref]
@article{198,title={Learning a robust unified domain adaptation framework for cross-subject {EEG}-based emotion recognition},author={Jiménez-Guarneros, M. and Fuentes-Pineda, G.},journal={Biomed. Signal Process. Control},volume={86},pages={105138},year={2023},doi={10.1016/j.bspc.2023.105138},url={}}. [Crossref]
@article{199,title={Quad{TP}at: {Q}uadruple Transition Pattern-based explainable feature engineering model for stress detection using {EEG} signals},author={Cambay, V. Y. and Tasci, I. and Tasci, G. and Hajiyeva, R. and Dogan, S. and Tuncer, T.},journal={Sci. Rep.},volume={14},number={1},pages={27320},year={2024},doi={10.1038/s41598-024-78222-8},url={}}. [Crossref]
@article{200,title={Discrepancy between inter- and intra-subject variability in {EEG}-based motor imagery brain-computer interface: Evidence from multiple perspectives},author={Huang, G. and Zhao, Z. H. and Zhang, S. R. and Hu, Z. X. and Fan, J. M. and Fu, M. S. and Chen, J. L. and Xiao, Y. Q. and Wang, J. and Dan, G.},journal={Front. Neurosci.},volume={17},pages={1122661},year={2023},doi={10.3389/fnins.2023.1122661},url={}}. [Crossref]
@article{201,title={{META-EEG}: {M}eta-learning-based class-relevant {EEG} representation learning for zero-calibration brain–computer interfaces},author={Han, J. W. and Bak, S. and Kim, J. M. and Choi, W. and Shin, D. H. and Son, Y. H. and Kam, T. E.},journal={Expert Syst. Appl.},volume={238},pages={121986},year={2024},doi={10.1016/j.eswa.2023.121986},url={}}. [Crossref]
@article{202,title={On the effects of data normalization for domain adaptation on {EEG} data},author={Apicella, A. and Isgrò, F. and Pollastro, A. and Prevete, R.},journal={Eng. Appl. Artif. Intell.},volume={123},pages={106205},year={2023},doi={10.1016/j.engappai.2023.106205},url={}}. [Crossref]
@article{203,title={{SMOTE}: {S}ynthetic Minority Over-sampling Technique},author={Chawla, N. V. and Bowyer, K. W. and Hall, L. O. and Kegelmeyer, W. P.},journal={J. Artif. Intell. Res.},volume={16},pages={321-357},year={2002},doi={10.1613/jair.953},url={}}@inproceedings{204,title={{IDA-GAN}: {A} novel imbalanced data augmentation {GAN}},author={Yang, H. and Zhou, Y.},booktitle={2020 25th International Conference on Pattern Recognition (ICPR)},pages={8299-8305},year={2021},address={Milan, Italy},doi={10.1109/ICPR48806.2021.9411996},url={https://doi.org/10.1109/ICPR48806.2021.9411996}}. [Crossref]
@article{205,title={Generative {AI} with {WGAN-GP} for boosting seizure detection accuracy},author={Abou-Abbas, L. and Henni, K. and Jemal, I. and Mezghani, N.},journal={Front. Artif. Intell.},volume={7},pages={1437315},year={2024},doi={10.3389/frai.2024.1437315},url={}}. [Crossref]
@article{206,title={Explainable {AI}: {A} review of applications to neuroimaging data},author={Farahani, F. V. and Fiok, K. and Lahijanian, B. and Karwowski, W. and Douglas, P. K.},journal={Front. Neurosci.},volume={16},pages={906290},year={2022},doi={10.3389/fnins.2022.906290},url={}}. [Crossref]
@article{207,title={Interpretable and robust {AI} in {EEG} systems: A survey},author={Zhou, X. L. and Liu, C. Y. and Zhou, J. N. and Wang, Z. R. and Zhai, L. M. and Jia, Z. Y. and Guan, C. T. and Liu, Y.},journal={arXiv},year={2025},doi={10.48550/arXiv.2304.10755},url={}}. [Crossref]
@article{208,title={Perturbing {BEAMs}: {EEG} adversarial attack to deep learning models for epilepsy diagnosing},author={Yu, J. and Qiu, K. and Wang, P. and Su, C. and Fan, Y. and Cao, Y.},journal={BMC Med. Inform. Decis. Mak.},volume={23},number={1},pages={115},year={2023},doi={10.1186/s12911-023-02212-5},url={}}. [Crossref]
@article{209,title={An Efficient Model-Compressed {EEGNet} Accelerator for Generalized Brain-Computer Interfaces With Near Sensor Intelligence},author={Feng, L. C. and Shan, H. W. and Zhang, Y. Q. and Zhu, Z. M.},journal={IEEE Trans. Biomed. Circuits Syst.},volume={16},number={6},pages={1239-1249},year={2022},doi={10.1109/TBCAS.2022.3215962},url={}}@inproceedings{210,title={On-device learning of {EEGN}et-based network for wearable motor imagery brain-computer interface},author={Bian, S. and Kang, P. and Moosmann, J. and Liu, M. and Bonazzi, P. and Rosipal, R. and Magno, M.},booktitle={Proceedings of the 2024 ACM International Symposium on Wearable Computers},pages={9-16},year={2024},address={Melbourne, Australia},doi={10.1145/3675095.3676607},url={https://doi.org/10.1145/3675095.3676607}}. [Crossref]
@article{211,title={Reproducible machine learning research in mental workload classification using {EEG}},author={Demirezen, G. and Taşkaya Temizel, T. and Brouwer, A.-M.},journal={Front. Neuroerg.},volume={5},pages={1346794},year={2024},doi={10.3389/fnrgo.2024.1346794},url={}}@inproceedings{212,title={Achieving Reproducibility in {EEG}-Based Machine Learning},author={Kinahan, S. and Saidi, P. and Daliri, A. and Liss, J. and Berisha, V.},booktitle={Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency},pages={1464-1474},year={2024},address={Rio de Janeiro, Brazil},doi={10.1145/3630106.3658983},url={https://doi.org/10.1145/3630106.3658983}}. [Crossref]
@article{213,title={A systematic survey on the application of federated learning in mental state detection and human activity recognition},author={Grataloup, A. and Kurpicz-Briki, M.},journal={Front. Digit. Health},volume={6},pages={1495999},year={2024},doi={10.3389/fdgth.2024.1495999},url={}}. [Crossref]
@article{214,title={{AFLEMP}: {A}ttention-based federated learning for emotion recognition using multi-modal physiological data},author={Gahlan, N. and Sethia, D.},journal={Biomed. Signal Process. Control},volume={94},pages={106353},year={2024},doi={10.1016/j.bspc.2024.106353},url={}}@inproceedings{215,title={{EEG}-based emotion recognition with prototype-based data representation},author={Wang, Y. X. and Qiu, S. and Zhao, C. and Yang, W. J. and Li, J. P. and Ma, X. L. and He, H. G.},booktitle={2019 41st annual international conference of the IEEE engineering in medicine and biology society (EMBC)},pages={684-689},year={2019},organization={IEEE},address={Berlin, Germany},doi={10.1109/EMBC.2019.8857340},url={https://doi.org/10.1109/EMBC.2019.8857340}}. [Crossref]
@article{216,title={Multi-layer prototype learning with Dirichlet mixup for open-set {EEG} recognition},author={Han, D.-K. and Lee, M. and Lee, S.-W.},journal={Expert Syst. Appl.},volume={266},pages={126047},year={2025},doi={10.1016/j.eswa.2024.126047},url={}}. [Crossref]
@article{217,title={Proto{EEGN}et: {A}n interpretable approach for detecting interictal epileptiform discharges},author={Tang, D. and Willard, F. and Tegerdine, R. and Triplett, L. and Donnelly, J. and Moffett, L. and Semenova, L. and Barnett, A. J. and Jing, J. and Rudin, C. and Westover, B.},journal={arXiv},year={2023},doi={10.48550/arXiv.2312.10056},url={}}. [Crossref]
@article{218,title={Exploration of an intrinsically explainable self-attention based model for prototype generation on single-channel {EEG} sleep stage classification},author={Adey, B. and Habib, A. and Karmakar, C.},journal={Sci. Rep.},volume={14},number={1},pages={27612},year={2024},doi={10.1038/s41598-024-79139-y},url={}}. [Crossref]
@article{219,title={An empirical comparison of deep learning explainability approaches for {EEG} using simulated ground truth},author={Sujatha Ravindran, A. and Contreras-Vidal, J.},journal={Sci. Rep.},volume={13},number={1},pages={17709},year={2023},doi={10.1038/s41598-023-43871-8},url={}}. [Crossref]
@article{220,title={Towards best practice of interpreting deep learning models for {EEG}-based brain computer interfaces},author={Cui, J. and Yuan, L. Q. and Wang, Z. X. and Li, R. L. and Jiang, T. Z.},journal={Front. Comput. Neurosci.},volume={17},pages={1232925},year={2023},doi={10.3389/fncom.2023.1232925},url={}}. [Crossref]
@article{221,title={Strategic integration: {A} cross-disciplinary review of the f{NIRS}-{EEG} dual-modality imaging system for delivering multimodal neuroimaging to applications},author={Chen, J. F. and Chen, K. W. and Yu, Y. F. and Bi, X. and Ji, X. and Zhang, D. W.},journal={Brain Sci.},volume={14},number={10},pages={1022},year={2024},doi={10.3390/brainsci14101022},url={}}. [Crossref]
@article{222,title={Adaptive extreme edge computing for wearable devices},author={Covi, E. and Donati, E. and Liang, X. P. and Kappel, D. and Heidari, H. and Payvand, M. and Wang, W.},journal={Front. Neurosci.},volume={15},pages={611300},year={2021},doi={10.3389/fnins.2021.611300},url={}}. [Crossref]
@article{223,title={Wearable {EEG} electronics for a brain–{AI} closed-loop system to enhance autonomous machine decision-making},author={Shin, J. H. and Kwon, J. and Kim, J. U. and Ryu, H. and Ok, J. and Kwon, S. J. and Park, H. and Kim, T. I.},journal={npj Flex. Electron.},volume={6},number={1},pages={32},year={2022},doi={10.1038/s41528-022-00164-w},url={}}. [Crossref]
@article{224,title={Mother of all {BCI} Benchmarks},author={Aristimunha, B. and Carrara, I. and Guetschel, P. and Sedlar, S. and Rodrigues, P. and Sosulski, J. and Narayanan, D. and Bjareholt, E. and Quentin, B. and Schirrmeister, R. T. and Kalunga, E. and Darmet, L. and Gregoire, C. and Hussain, A. and Gatti, R. and Goncharenko, V. and Thielen, J. and Moreau, T. and Roy, Y. and Jayaram, V. and Barachant, A. and Chevallier, S.},journal={Zenodo},year={2023},doi={10.5281/ZENODO.10034224},url={}}. [Crossref]
Search
Open Access
Research article

Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications

Abdulvahap Mutlu,
Şengül Doğan*,
Türker Tuncer
Department of Digital Forensics Engineering, Technology Faculty, Firat Universityy, 23119 Elazig, Turkey
Acadlore Transactions on AI and Machine Learning
|
Volume 4, Issue 3, 2025
|
Pages 157-189
Received: 07-21-2025,
Revised: 08-31-2025,
Accepted: 09-11-2025,
Available online: 09-15-2025
View Full Article|Download PDF

Abstract:

Electroencephalography (EEG) provides a non-invasive approach for capturing brain dynamics and has become a cornerstone in clinical diagnostics, cognitive neuroscience, and neuroengineering. The inherent complexity, low signal-to-noise ratio, and variability of EEG signals have historically posed substantial challenges for interpretation. In recent years, artificial intelligence (AI), encompassing both classical machine learning (ML) and advanced deep learning (DL) methodologies, has transformed EEG analysis by enabling automatic feature extraction, robust classification, regression-based state estimation, and synthetic data generation. This survey synthesizes developments up to 2025, structured along three dimensions. The first dimension is task category, e.g., classification, regression, generation and augmentation, clustering and anomaly detection. The second dimension is the methodological framework, e.g., shallow learners, Convolutional Neural Networks (CNNs), Recurrent Neural Networks (RNNs), Transformers, Graph Neural Networks (GNNs), and hybrid approaches. The third dimension is application domain, e.g., neurological disease diagnosis, brain-computer interfaces (BCIs), affective computing, cognitive workload monitoring, and specialized tasks such as sleep staging and artifact removal. Publicly available EEG datasets and benchmarking initiatives that have catalyzed progress were reviewed in this study. The strengths and limitations of current AI models were critically evaluated, including constraints related to data scarcity, inter-subject variability, noise sensitivity, limited interpretability, and challenges of real-world deployment. Future research directions were highlighted, including federated learning (FL) and privacy-preserving learning, self-supervised pretraining of Transformer-based architectures, explainable artificial intelligence (XAI) tailored to neurophysiological signals, multimodal fusion with complementary biosignals, and the integration of lightweight on-device AI for continuous monitoring. By bridging historical foundations with cutting-edge innovations, this survey aims to provide a comprehensive reference for advancing the development of accurate, robust, and transparent AI-driven EEG systems.
Keywords: Electroencephalography, Artificial intelligence, Deep learning, Machine learning, Brain-computer interface, Seizure detection, Emotion recognition, Explainable artificial intelligence

1. Introduction

EEG is a non-invasive technique that records the brain’s electrical activity via electrodes placed on the scalp [1], [2]. Due to its portability and relative simplicity, EEG has become central to both clinical neuroscience and BCI applications. However, raw EEG signals are complex and noisy, posing significant challenges for interpretation by human experts [3], [4]. In recent years, AI techniques—particularly ML and DL—have been increasingly employed to automatically analyze EEG data and uncover subtle patterns beyond the reach of traditional analyses [5], [6]. These AI-driven advances promise improved diagnostic tools and new interactive technologies that utilize brain signals [7]. The rapid development of AI for EEG analysis warrants a comprehensive survey to consolidate recent findings. Research in this area has grown explosively, especially in the last five years, fueled by larger EEG datasets and more powerful algorithms. A survey can illuminate the state of the art, from early ML approaches to modern deep neural networks, and highlight how these techniques are transforming EEG-based research and applications. This study aims to bridge historical context with cutting-edge developments, providing researchers a clear roadmap of how AI methods have evolved to meet EEG analysis challenges.

EEG is widely used to study brain function and to diagnose neurological conditions (e.g., epilepsy and dementia) because it directly reflects brain dynamics in real time [8]. However, the interpretation of EEG traditionally requires laborious manual inspection or hand-crafted feature extraction by experts. AI offers a powerful alternative: algorithms can learn complex EEG patterns and make accurate classifications or predictions, improving both speed and accuracy of the analysis [9]. For example, modern ML can sift through thousands of EEG recordings to detect signatures of Alzheimer’s disease (AD) that clinicians might miss [10], [11]. DL models, by automatically learning features, avoid the biases of manual feature selection and have the potential to make EEG a more robust tool for clinical diagnosis [12]. Beyond medicine, AI-powered EEG analysis is enabling new BCI systems where computers interpret users’ mental commands or emotional states for assistive technology and human-computer interaction [13].

This study provides a comprehensive survey of AI techniques applied to EEG signals. It covers both classical ML and contemporary DL methods, emphasizing developments through 2025. After introducing a background on EEG signals and AI methodologies, a multi-faceted categorization of AI techniques was proposed for EEG analysis: task category (e.g., classification vs. regression), methodological framework, e.g., Support Vector Machines (SVMs) vs. CNNs, and application domain (e.g., medical diagnosis, emotion recognition, seizure prediction, and cognitive workload assessment). For each category, representative methods were summarized, comparing performance on common datasets (with tables of benchmarks and metrics) and discussing strengths and weaknesses. Then the major public EEG datasets and benchmarks that have driven progress in the field were reviewed. Finally, current challenges (such as data quality, variability, and interpretability issues) were analyzed and promising future directions were outlined. In particular, emphasis was placed on Transformers for long-range temporal modeling, FL and privacy-preserving learning to enable collaborative yet secure EEG analysis across institutions, and XAI tailored to neurophysiological signals. That could shape the next generation of EEG-based technologies. The objective of this study is to provide an authoritative reference that both newcomers and experienced researchers can use to understand the landscape of AI in EEG analysis and to identify opportunities for future research in this interdisciplinary domain.

2. Background and Preliminaries

EEG measures voltage fluctuations generated by ionic currents in neurons, captured at the scalp. EEG signals are characterized by multiple frequency components corresponding to different brain rhythms. They are conventionally divided into frequency bands—delta (approximately 0.5–4 Hz), theta (approximately 4–8 Hz), alpha (approximately 8–12 Hz), beta (approximately 12–30 Hz), and gamma (approximately 30–50+ Hz)—each associated with different brain states [14]. These frequency bands are not only physiologically distinct but also serve as crucial biomarkers in AI-based EEG applications. For example, delta rhythms dominate in deep sleep and are leveraged in sleep stage classification models. Theta activity is linked with memory and drowsiness, making it useful in cognitive workload and driver-fatigue monitoring. Alpha rhythms reflect relaxation and attentional states, and asymmetry in alpha power is widely used in affective computing for emotion recognition. Beta activity is associated with motor control and concentration, forming the basis of motor imagery (MI) BCIs. Gamma oscillations, linked with higher-order cognition, are explored in studies of working memory and perceptual binding. EEG recordings typically have low amplitude (tens of microvolts) and can be recorded from dozens of electrodes placed according to the international 10–20 system across the scalp. During EEG acquisition, signals are often sampled at rates from 128 Hz up to 1000 Hz or more and require amplification and filtering. However, EEG is highly prone to noise and artifacts from muscle activity, eye blinks, electrical interference, etc. [15]. This inherent noisiness and non-stationarity make analysis difficult—preprocessing steps like filtering, artifact removal, e.g., via independent component analysis (ICA), and segmentation are usually applied to isolate meaningful neural signals [16]. Despite these challenges, EEG remains a key tool for its millisecond-level temporal resolution and its ability to reflect cognitive or clinical states in real time [17], [18]. Figure 1 illustrates an example of a raw EEG segment and its decomposition into these canonical sub-bands. Each band exhibits a characteristic oscillatory pattern that serves as a biomarker in AI-based EEG applications such as sleep staging, emotion recognition, and MI BCIs.

EEG is used in a wide range of applications in neuroscience and engineering. In clinical settings, EEG is indispensable for neurological diagnosis—for example, detecting epileptic seizures and abnormal brain waves, assessing depth of anesthesia, or aiding diagnosis of neurodegenerative diseases [19]. In cognitive and affective research, EEG allows monitoring of mental states such as workload, fatigue, or emotion [20], [21], [22]. In sleep medicine, EEG-based sleep staging classifies sleep phases such as Rapid Eye Movement (REM) and Non-Rapid Eye Movement (NREM) stages for disorders like insomnia or sleep apnea [23], [24]. EEG is also the backbone of BCIs, enabling direct communication or control of external devices via brain signals [25]. Classic BCI paradigms include MI (where users imagine movements to control prosthetics or cursors), P300 spellers using event-related potentials (ERPs) for communication, and neurofeedback systems [26], [27], [28]. These diverse applications each impose different requirements on EEG analysis methods\textemdash for instance, real-time responsiveness for BCIs, or high specificity for medical diagnosis—which in turn influence the choice of AI techniques.

The application of AI to EEG is not new—even early EEG studies employed pattern recognition algorithms [29]. Traditional ML approaches in EEG analysis often involved manual feature extraction followed by classifiers like Linear Discriminant Analysis (LDA) or SVM [30]. Handcrafted feature pipelines remain valuable, especially when data are limited [31], [32]. Common handcrafted EEG features include power spectral densities in specific bands, wavelet transform coefficients, common spatial patterns (CSP) for BCIs, and connectivity or coherence measures [33], [34]. These features, once extracted, are fed into ML classifiers (SVM, LDA, k-nearest neighbors, random forests, etc.) to perform tasks like detection of a condition or classification of mental states [29], [35]. Such pipelines have shown success in many studies; for example, LDA and SVM were long employed as reliable classifiers for EEG-based BCI systems. However, a limitation of this “feature engineering” approach is that it relies on human expertise to choose the right features and may miss complex patterns [6].

Figure 1. Examples of EEG waveforms in different frequency bands (delta, theta, alpha, beta, and gamma)

The rise of DL has shifted the paradigm towards end-to-end feature learning from raw data. In the last decade, as larger EEG datasets became available and computational power increased, researchers started training deep neural networks to automatically learn features directly from EEG signals. Notably, CNNs and RNNs have become prevalent [36], [37], [38]. CNNs are adept at learning spatial and temporal filters from multichannel EEG; they can be applied either on raw time-series or on time-frequency images of EEG (e.g., spectrograms) [39]. RNNs, including popular variants like Long Short-Term Memory (LSTM) and Gated Recurrent Units (GRUs), excel at modeling the temporal dynamics of EEG sequences [40], [41]. CNNs and RNNs can also be combined (e.g., a CNN for feature extraction per time window, feeding into an LSTM for sequence modeling) [42]. More recently, advanced architectures such as Transformers (which use self-attention mechanisms) have been introduced to EEG, showing promise in capturing long-range dependencies in the data [43], [44], [45]. In parallel, hybrid models have emerged—for instance, combining DL with traditional methods or combining multiple neural network types—to use complementary strengths [46]. Throughout this survey, various model types and their adaptations for EEG were encountered.

Since many AI-on-EEG advancements were driven by BCI research, this study briefly notes the basics of a BCI system [47]. A BCI acquires brain signals (often EEG due to its non-invasive nature), processes them to extract features, then classifies or translates them into commands that drive an external device or computer interface. For example, in an MI BCI, when a user imagines moving their left hand, the EEG rhythms (typically mu and beta rhythms over the motor cortex) desynchronize in a characteristic way; a trained AI model can recognize this pattern and cause a robotic arm to move left [48], [49]. Early BCI implementations used simple ML classifiers on a few handcrafted features (like band power changes), but modern BCIs increasingly rely on DL to improve accuracy and reduce the need for user-specific calibration [50], [51]. The requirement for BCIs to operate in real time with high reliability underscores the importance of robust and computationally efficient AI algorithms for EEG [52]. Additionally, because BCIs directly affect user experience or safety (in assistive devices, for instance), interpretability of the AI’s decisions and generalization across users are critical considerations—issues discussed in later sections of this study [53].

In summary, EEG signals present unique opportunities and challenges for AI. The rest of this study delves into how various AI techniques are categorized and applied to EEG, what benchmarks and datasets are advancing the field, and how researchers are tackling the inherent hurdles while pushing toward more powerful and generalizable EEG-based AI systems.

3. Categorization of AI Techniques for EEG Analysis

AI techniques applied to EEG can be categorized from multiple perspectives. In this study, the methods were categorized into three complementary dimensions: (a) task category (what kind of problem is being solved with EEG data), (b) methodological framework (which type of AI/ML model is used), and (c) application domain (the functional area or purpose of the analysis). This multi-axis categorization helps clarify how certain algorithms align with specific tasks and applications [54]. For each category, typical methods were summarized, providing examples (including datasets and metrics where applicable) and discussing the strengths and weaknesses of the approaches. Comparative tables were included to highlight key methods and performance on benchmark datasets.

3.1 Task Category

EEG analysis tasks can be broadly divided by the nature of the output that the AI model produces or the problem being addressed.

3.1.1 Classification tasks

The majority of EEG studies formulate a classification problem—the AI model assigns the EEG data to one of several discrete classes or categories. Examples include diagnosing a condition (seizure vs. non-seizure EEG, or patient vs. healthy control), recognizing a mental state (e.g., “imagining left hand movement” vs. “right hand movement” in MI BCI, or high vs. low cognitive workload), or detecting an event (like the presence of a P300 ERP vs. no event) [55], [56], [57], [58]. Robust automatic classification of EEG signals is crucial for practical applications [51]. Historically, classification tasks were tackled with traditional ML on extracted features, but recent DL models have reported improved accuracy [59]. For instance, CNNs or RNNs can classify emotional states from EEG with higher accuracy than earlier methods. Classification performance is typically measured by accuracy, precision/recall, or area under the curve (AUC) for each class, depending on the application. A strength of classification formulations is their simplicity and direct relevance to decision-making (e.g., a seizure happening or not). A weakness, however, is that complex brain states may not fall into clear-cut categories (e.g., gradations of emotion or cognitive load), and training classifiers requires sufficient labeled examples of each class [60].

3.1.2 Regression and continuous estimation tasks

In some cases, the goal is to predict a continuous value or time series from EEG such as estimating a person’s level of drowsiness or cognitive load on a continuous scale, tracking the depth of anesthesia, or reconstructing a continuous hand trajectory from EEG for a prosthetic device [61], [62]. Regression tasks output a numerical value (or a set of values). They are less common than classification in EEG research, but important for scenarios where brain states vary along a spectrum rather than in distinct categories. Common approaches include linear regression models on EEG features, as well as neural networks that output continuous values (e.g., using a linear output layer) [63]. Recurrent networks (LSTM/GRU) are particularly suited for continuous sequence prediction from EEG, as they can learn temporal mappings (e.g., predicting a future EEG trend or an external signal like respiratory rate from EEG). The performance of regression models is measured by metrics like mean squared error (MSE), correlation coefficient ($r$), or mean absolute error [62]. One challenge in regression with EEG is obtaining ground truth for continuous mental states, which often rely on subjective scales or indirect proxies.

3.1.3 Generation and data augmentation tasks

An emerging task category involves generating or synthesizing EEG signals using AI, typically with generative models. The motivation is often to augment limited EEG datasets or to simulate EEG for scenarios where data are hard to collect. Techniques like Generative Adversarial Networks (GANs) and Variational Autoencoders (VAEs) have been applied to create realistic EEG-like signals or to enhance data variability [64], [65], [66], [67]. For example, GANs have been used to generate artificial EEG segments to balance class distributions (such as creating “fake” seizure examples to supplement real data in training) and have shown improvements in classifier performance [68], [69], [70]. Recently, diffusion models—which have revolutionized image generation—have been explored for EEG. One study has used a diffusion model to remove artifacts from EEG and improve cross-subject generalization [71], [72]. Another application of generation is style transfer or transformation of EEG (e.g., converting one subject’s EEG style to that of another to facilitate domain adaptation) [73]. While generative models can produce highly realistic EEG patterns, a challenge is ensuring physiological plausibility and that synthetic data cover meaningful variations [74]. Data augmentation through simpler means is also widely used. Sliding windows, signal segmentation and recombination, and noise injection are common strategies to enlarge training data [75]. These conventional augmentations are easier to implement but limited in diversity; by contrast, deep generative augmentations (GAN/VAE) can introduce novel and plausible variations.

The strength of generation tasks is in addressing data scarcity and improving model robustness; the weakness lies in the risk of generating artifacts or overfitting to synthetic patterns that don’t generalize to real data. Recent innovations have applied diffusion-based models to EEG tasks. For instance, a flexible Denoising Diffusion Probabilistic Model (DDPM) framework synthesized realistic EEG/electrocorticography (ECoG)/Local Field Potential (LFP) signals with preserved spectral and coupling characteristics [76]. EEGDfus, a conditional diffusion architecture combining CNN and Transformer branches, achieved state-of-the-art denoising (correlations up to 0.992) [77]. Transformer-based diffusion models utilizing generated vicinal data enhanced classification accuracy across diverse datasets by 2.5–14% [78]. Multimodal augmentation methods, e.g., EEG functional near-infrared spectroscopy (fNIRS) EFDA CDG, yield significant performance gains such as approximately 98% accuracy on motor tasks and drug-addiction discrimination [79]. ERP-focused conditional diffusion models enabled subject- and session-specific signal generation with domain-aware fidelity metrics [80]. Finally, EEGDM, a diffusion-pretrained representation learning model, achieved superior results on Temple University Hospital (TUH) EEG tasks with a lightweight architecture [81].

While generative models such as GANs, VAEs, and diffusion networks offer promising solutions for data scarcity and augmentation, they also introduce ethical and safety risks. Synthetic EEG signals may be difficult to distinguish from authentic recordings, raising concerns about data provenance and verification in shared repositories. In clinical contexts, reliance on artificially generated data without proper validation could increase the risk of misdiagnosis or inappropriate treatment recommendations, especially if models learn spurious features present only in synthetic data. Furthermore, adversarial misuse—such as intentionally generating deceptive EEG to manipulate diagnostic systems—is a potential threat. To mitigate these risks, researchers emphasize the need for clear labeling of synthetic vs. real data, rigorous physiological plausibility checks, and transparent reporting standards when using generative EEG in scientific or clinical applications. Ethical frameworks and auditing mechanisms should therefore complement technical advances in generative EEG research to ensure responsible deployment.

3.1.4 Clustering and anomaly detection

In unsupervised or semi-supervised settings, one may cluster EEG data into groups or detect anomalous EEG segments without explicit labels. For example, clustering could be used to find recurring EEG microstates or to group patients by EEG features [82]. Anomaly detection is relevant in monitoring scenarios (flagging an abnormal EEG pattern that might indicate an impending seizure or equipment malfunction) [83]. Techniques include clustering algorithms (k-means on EEG features, or dimensionality reduction + clustering) and autoencoder (AE)-based anomaly detectors that learn to reconstruct normal EEG and raise an alert when reconstruction error is high (indicating an out-of-distribution pattern). While not as prominent in literature as supervised tasks, these methods are valuable for exploratory analysis and situations with few labels. Their performance is harder to quantify, often requiring qualitative validation or use of surrogate labels.

3.1.5 Summary of task categories

Table 1 provides an overview of common EEG task formulations with examples. Classification is the most prevalent task in literature, but other task types address important needs like continuous monitoring and data scarcity.

Table 1. Summary of EEG analysis tasks and examples
Task CategoryTypical ObjectivesExample ApplicationsCommon Metrics
ClassificationAssign a label to the EEG segment/trialSeizure vs. non-seizure detection; mental state (e.g., emotion) classification; MI command recognition; sleep stage classificationAccuracy, F1-score, AUC, and kappa
RegressionPredict continuous value or time seriesWorkload level estimation, drowsiness level, and reconstructing movement kinematics from EEGMSE, correlation ( $r$ ), and mean absolute error (MAE)
Generation (augmentation)Produce synthetic EEG or transform EEG (often for data augmentation or denoising)Creating additional training data for rare events (e.g., seizures); removing artifacts (e.g., via diffusion model); simulating EEG for BCI trainingQuality judged by realism (visual inspection) or improvement in downstream task performance
Clustering / unsupervisedDiscover structure without labels; anomaly detectionGrouping EEG features into clusters (e.g., EEG microstates); detecting abnormal EEG epochs in long recordingsClustering validity indices; anomaly detection Receiver Operating Characteristic (ROC) (if ground truth anomalies are available)

Each task type may demand different algorithmic solutions—for instance, classification often relies on discriminative models, while generation uses generative models. Next, this study examines AI techniques by methodological framework, noting how they align with these tasks.

3.2 Methodological Framework

AI algorithms applied to EEG range from established shallow learners to cutting-edge deep architectures. This study categorizes them into subgroups and highlights key characteristics, strengths, and weaknesses of each.

3.2.1 Traditional ML (shallow models)

This group includes methods like LDA, SVM, naive Bayes, decision trees, random forests, k-Nearest Neighbors (k-NN), and ensemble methods [84], [85], [86]. These algorithms typically require a feature extraction step from EEG signals (e.g., mean band power, coherence between channels, and entropy measures). They are often fast to train and can work well with limited data, which was advantageous in early EEG studies where datasets were small. For example, SVM was popular in EEG classification and could achieve high accuracy in BCI tasks when combined with well-chosen features. A major benefit of shallow models is their interpretability (somewhat) and lower risk of overfitting on small data—e.g., decision tree ensembles can highlight which features are important, and linear models give weight coefficients. Shallow ensembles remain surprisingly competitive with deep networks on several EEG problems when paired with well-crafted features [87]. However, their capacity to model complex nonlinear patterns is limited compared to deep networks. They also rely heavily on the quality of input features; if the handcrafted features omit critical information, the model cannot recover it. In summary, traditional ML methods serve as a strong baseline and are still used in scenarios with very scarce data or when model interpretability is paramount. However, as data sizes and problem complexity grew, their performance plateaued in comparison to DL approaches.

3.2.2 Artificial Neural Networks (ANNs) and DL

This category encompasses multi-layer neural networks capable of automatic feature learning. The simplest form, Multilayer Perceptrons (MLPs), consisting of fully connected layers, were applied to EEG in the past but found limited use alone (they often underperformed feature-based methods on raw EEG). The revolution came with specialized architectures.

CNNs

Figure 2. Deep ConvNet for BCI [7]

CNNs are now a dominant model for EEG signal classification. They apply learned convolution filters across the input, which in EEG can capture local temporal patterns or spatial combinations of channels. CNNs are adept at learning translation-invariant features and have been extensively used for tasks like seizure detection, where a CNN can learn waveform patterns of seizures, or for MI, where CNNs can learn spatial filters akin to CSP. Notably, a survey found that CNNs (often with various architectural tweaks) constituted over 90% of DL papers in EEG classification in recent years [72]. The strengths of CNNs are their ability to automatically extract relevant features (avoiding manual feature engineering) and their efficiency due to weight sharing (making them faster to train than fully connected networks with similar layers). Some CNN architectures for EEG incorporate domain knowledge—for instance, filters that first convolve across time and then across channels to separate temporal and spatial filtering [7]. Figure 2 shows the Deep ConvNet architecture proposed by Schirrmeister et al. [7] for BCI, which applies an initial temporal convolution to capture frequency-specific patterns, followed by spatial filters across electrodes. Subsequent convolution-pooling blocks learn increasingly abstract representations of EEG, and a final dense layer performs classification. This design mimics classical preprocessing steps (band-pass filtering and spatial filtering) while enabling end-to-end feature learning.

CNNs have achieved state-of-the-art performance on many benchmarks, sometimes rivaling human expert accuracy in tasks like sleep stage scoring or detecting specific EEG patterns [88], [89]. However, CNNs require sufficient training data to learn effectively; with very small EEG datasets, they risk overfitting. They also act as “black boxes,” making it hard to interpret what features have been learned (though techniques exist to visualize CNN filters or use class activation maps). Additionally, many CNNs treat EEG as an image or grid of channels, which may not optimally account for the true 3D geometry of the head. Some newer models address this via Graph Convolution Neural Networks (GCNNs), as shown in Figure 3. Overall, CNNs are powerful for spatial-temporal pattern recognition in EEG and remain an active area of model innovation (with variations like deep vs. shallow CNNs, residual connections, separable convolutions for efficiency, etc.).

Figure 3. GCNs-Net for EEG MI [90]

RNNs

RNNs, especially LSTM and GRU networks, have been widely used for sequential modeling of EEG data. EEG is inherently time-series, and RNNs can maintain an internal memory to accumulate information over time. For example, an LSTM can integrate evidence over several seconds of EEG to decide if a cognitive event has occurred, potentially improving detection of subtle events that unfold over time. LSTMs and GRUs address the vanishing gradient problem of basic RNNs, enabling learning of longer temporal dependencies. RNNs naturally handle variable-length sequences and can model temporal dynamics like oscillatory phase or ERP shapes. They have been used for tasks like predicting upcoming seizures (by analyzing temporal patterns preceding a seizure) or for continuous sleep stage tracking due to the rise of attention mechanisms and Transformers, which often outperform RNNs in capturing long-range dependency. However, RNNs can be slower to train (due to sequential processing) and may struggle with very long sequences unless truncated or hierarchical approaches are used. In practice, many EEG studies combine CNNs and RNNs (using CNNs to extract per-window features and then RNNs to capture sequences of those features). Pure RNN architectures have somewhat fallen out of favor for some EEG tasks.

Transformers and attention-based models

Transformers are a newer class of models that rely on self-attention mechanisms instead of recurrent connections to process sequential data [91]. In the last couple of years, Transformers have rapidly influenced EEG research. Their ability to attend to relationships between any two time points (or channels) in the sequence is advantageous for EEG, where important patterns (like spike-wave discharges in epilepsy or ERP components) may be dispersed in time. Transformers can also integrate information from multi-channel data effectively by treating the multi-channel EEG as a sequence of combined feature vectors. Studies have proposed adaptations like the time-series Transformer (treating EEG time points similarly to words in a sentence), the Vision Transformer applied to EEG time-frequency images, and even the Graph Transformer for EEG where nodes represent electrodes [92], [93], [94]. Transformers excel at capturing long-range dependencies and complex interactions in the data. They have achieved state-of-the-art performance in some application benchmarks, reportedly outperforming CNN/RNN in MI classification, emotion recognition, and seizure detection tasks. They are also highly parallelizable (allowing faster training on GPUs than RNNs which are sequential). However, a key drawback is the need for large training datasets—Transformers have many more parameters and can easily overfit to smaller EEG datasets. EEG datasets are often smaller than the big data regimes where Transformers shine (e.g., ImageNet or large text corpora). Therefore, without careful regularization or pretraining, Transformers might not realize their full potential on EEG. Another issue is interpretability. While the attention weights in Transformers can sometimes be visualized to infer which time periods or channels the model deemed important, interpreting them in a neurophysiological sense is non-trivial. Nonetheless, the emergence of Transformers in EEG analysis is a notable trend, and researchers are actively exploring optimized Transformer architectures for EEG (such as combining convolutional front-ends with Transformer back-ends or using Transformer encoders for spatial filtering).

Despite their strengths, Transformers are particularly sensitive to EEG artifacts and non-stationarity. Because attention mechanisms treat all time points as potential contributors to prediction, spurious events such as eye blinks, muscle contractions, or electrode drifts can disproportionately influence the learned attention weights. Unlike CNNs that can localize features or RNNs that smooth temporal context, Transformers may overemphasize transient noise if not explicitly constrained. Moreover, EEG’s non-stationary nature—variability across sessions, subjects, or recording conditions—can cause Transformers to overfit to session-specific noise patterns, reducing cross-subject generalization. Large model capacity also exacerbates this risk when training data are limited. Recent work attempts to address these issues by incorporating artifact-aware preprocessing, attention regularization, and hybrid CNN-Transformer designs that first extract robust local features before global modeling. Nevertheless, the challenge of ensuring Transformer robustness to real-world EEG noise remains an open problem and a key consideration for future research.

Other DL models

There are additional architectures worth mentioning. AEs have been used in unsupervised learning on EEG such as learning low-dimensional representations or denoising signals [95], [96]. Stacked AEs and deep belief networks (DBNs) were some of the earlier deep models applied to EEG, with moderate success in feature learning [97], [98]. GNNs, including Graph Convolutional Networks (GCN) and graph attention networks, treat EEG channels as nodes in a graph (edges might represent physical adjacency or functional connectivity) and perform learning on this graph structure [99], [100], [101], [102], [103]. GNNs can better exploit the non-grid topology of electrodes and have shown promise in emotion recognition and seizure detection by modeling relationships between electrode signals. Hybrid models combine modalities or algorithmic approaches. For instance, a model might use a CNN to extract features and then an SVM as the final classifier (a convolutional+deep hybrid), or combine EEG with other signals like fNIRS or electrooculography (EOG) in a multi-modal network. Another growing area is transfer learning models—due to the variability in EEG, researchers have explored pretraining a deep model on a large dataset and fine-tuning it on a smaller target dataset, or using domain adaptation networks to transfer knowledge across subjects or tasks. The hybrid and transfer approaches are attempts to boost performance when data are limited or when a single model type alone is insufficient.

Beyond supervised architectures such as CNNs, RNNs, and Transformers, a growing line of work leverages self-supervised learning (SSL) to exploit large amounts of unlabeled EEG data. SSL frameworks, often built upon deep encoders, pretrain feature representations through contrastive or predictive objectives before fine-tuning on downstream tasks. For example, contrastive SSL has improved emotion recognition under limited labeled data and enabled cross-subject transfer in MI classification [104]. Graph-based SSL methods (e.g., EEG-DisGCMAE) further adapt to varying electrode densities, facilitating knowledge transfer between high- and low-density EEG setups [105]. These approaches consistently demonstrate greater label efficiency, robustness to inter-subject variability, and enhanced generalization compared to purely supervised training. Integrating SSL into EEG analysis thus expands the applicability of DL to domains where data scarcity is a critical bottleneck, complementing the advances achieved by supervised and semi-supervised models.

3.2.3 Strengths vs. weaknesses

To summarize the algorithmic families, Table 2 provides a comparison, highlighting their typical pros and cons in the EEG context.

Table 2. Comparison of algorithmic approaches for EEG analysis

Algorithm class

Examples

Strengths

Weaknesses

Traditional (shallow)

LDA, SVM, k-NN, random forest, etc.

Simple, fast training; interpretable (for some); works on small data; a well-established theory

Requires manual feature extraction; limited modeling capacity for complex patterns; may not fully exploit raw data

CNN-based networks

Deep ConvNets, EEGNet, etc.

Learns spatial-temporal features automatically; state-of-the-art accuracy in many tasks; efficient convolution operations

Needs large data; can overfit if not regularized; acts as a black box (features not directly interpretable)

RNN-based networks

LSTM, GRU, etc.

Captures temporal dependencies; suitable for sequence data; can model context over time (useful for ERP or trend analysis)

Training can be slow; there is difficulty with very long sequences; older RNNs can suffer vanishing gradients; and they are less used now compared to attention models

Transformer attention

EEGformer, EEGViT [106], Vision Transformer, etc.

Models long-range interactions effectively; often highest performance when data abundant; highly parallelizable; flexible input representations

Very data-hungry; large model size; interpretability of attention weights isn't straightforward; computationally heavy if not optimized

Hybrid / specialized models

GNNs, hybrid CNNRNN, and ensemble methods combining deep and shallow

Can incorporate domain knowledge (e.g., electrode layout in GNN), utilize strengths of multiple methods, and often improve generalization

Added complexity in design; harder to analyze; may require tuning multiple components; not as widely adopted yet

While Table 2 summarizes general strengths and weaknesses, several direct comparisons on benchmark EEG datasets illustrate performance gaps more concretely. For example, on the CHB-MIT pediatric epilepsy dataset, classical ML pipelines—e.g., Least Squares Support Vector Machine (LS-SVM) with Radial Basis Function (RBF) kernels using handcrafted time-frequency features—achieve approximately 97–98% accuracy with sensitivity around 97% and specificity around 99% [107]. Meanwhile, modern DL models such as hybrid CNN-LSTM architectures reported accuracy near 94.8%, sensitivity around 90.2%, and specificity up to 99.5% [108]; other methods push above 97.5% accuracy with high sensitivity (approximately 98.9%) and low false positive rates (approximately 2%) [109]; and a recent 1D Residual Convolutional Neural Network (ResCNN) model achieves near-perfect performance (accuracy approximately 99.7% and sensitivity approximately 99.6%, specificity approximately 99.6%) alongside extremely low false-positive rates [110].

In MI BCIs (BCI Competition IV-2a), classical CSP-based pipelines typically yield four-class accuracy in the range of approximately 60–75%. For instance, baseline models often hover around 70% accuracy [111]. DL pipelines such as Filter Bank Convolutional Network (FBCNet)—a CNN variant—achieve around 76.2% accuracy; Transformer-infused architectures like EEG-TCNet reach approximately 77.35%; larger hybrid CNN-Transformer models such as CLTNet obtain around 83.0% accuracy [112]; and state-of-the-art multi-scale Transformer models, e.g., Multi-Scale Convolutional Transformer (MSCFormer), reach approximately 83–83.0% average accuracy, with some reports reaching up to approximately 88% in enhanced configurations [113].

Similarly, in emotion recognition using the DEAP benchmark, traditional subject-independent pipelines using handcrafted features (e.g., differential entropy) generally reach moderate accuracy levels—often around 70% or lower; although explicit SVM-based numbers weren’t located, these rates are in line with historical baselines. In contrast, deep architectures show striking improvements: CNN-LSTM networks with feature matrices achieve valence/arousal classification accuracy of 96.9% and 97.4%, respectively (presumably subject-dependent) [114], and Transformer-based systems in binary classification exceed 95% accuracy in subject-dependent settings.

These examples highlight that while traditional methods remain useful for smaller datasets or interpretable pipelines, DL architectures—especially with CNNs, hybrid models, and Transformers—generally dominate when sufficient training data and model capacity are available. In practice, the choice of algorithm depends on the task requirements and available data. For example, a hospital deploying an EEG-based diagnostic tool might favor an interpretable model or one tested for robustness, whereas a competition for BCI accuracy might push for complex deep ensembles to squeeze out extra percentage points of accuracy. The next section looks at EEG-focused AI methods by application domain, which often dictates which algorithms and task formulations are suitable.

3.3 Application Domain

Another way to organize AI in EEG research is by application or analytical purpose. Different applications emphasize different aspects of EEG signals and pose distinct challenges. This study highlights several major application domains where AI has been applied to EEG, noting the typical approaches and performance in each.

3.3.1 Medical diagnosis and neurological disorders

Using AI to interpret clinical EEGs is a rapidly growing area. EEG is critical in diagnosing conditions like epilepsy, where neurologists examine EEG for epileptiform spikes or seizures, and it’s useful in assessing neurodegenerative diseases (dementia, Parkinson's, etc.), detecting encephalopathy, and more. AI methods are being developed to assist or automate these interpretations.

Epileptic seizure detection and prediction

This has been a flagship problem for EEG-based AI [115]. Many studies have segmented EEG into short windows and classified each as “seizure” or “non-seizure.” Classical approaches extract features such as band power, entropy or wavelet coefficients and feed them into SVMs or tree-based classifiers, yielding average accuracies around 75–80%. DL, particularly CNNs, has substantially outperformed these methods. A December 2024 meta-analysis reported pooled DL accuracy of $\approx$ 89% (sensitivity $\approx$ 89% and specificity $\approx$ 91%) versus $\approx$ 78% accuracy for traditional ML on validation sets [116]. Moreover, state-of-the-art single-study CNNs on the CHB-MIT pediatric dataset routinely exceed 95%, with some fully convolutional networks achieving >99% accuracy, >99.6% specificity, and false-alarm rates below 0.5/h [117], [118]. Alongside deep models, hybrid feature-engineering pipelines have also achieved near-perfect detection in cross-validation. For example, the explainable FriendPat-centric XFE model attained 99.61% accuracy using 10-fold Cross-Validation (CV) (and 79.92% with leave-one-subject-out CV) on a 10,356-signal Turkish epilepsy EEG dataset [119]. Similarly, the neonatal-focused TATPat ensemble obtained 99.1% 10-fold-CV accuracy on the open Helsinki NICU corpus while retaining full interpretability [120], [121]. Beyond detection, seizure prediction—forecasting events minutes to hours in advance—remains more challenging but has also seen rapid progress. Early-warning CNNs evaluated on CHB-MIT reported AUCs of 98.8% with sensitivity >93.5% and false-prediction rates as low as 0.074/h [122], [123]. More recent hybrid CNN-Transformer architectures push sensitivities to approximately 99.3% with >95% specificity and average warning times exceeding one hour [124], [125].

While AI models for epilepsy detection often report high accuracy in controlled experimental settings, translation into real-world clinical workflows remains non-trivial. Regulatory approval processes, such as U.S. Food and Drug Administration (FDA) clearance, CE marking in Europe, and equivalent certifications in other regions, impose stringent requirements on safety, reliability, reproducibility, and interpretability. In particular, FDA’s Software as a Medical Device (SaMD) framework demands not only proof of accuracy but also robustness across diverse populations, transparent documentation of training datasets, and post-market performance monitoring. Furthermore, dataset limitations (e.g., imbalance between ictal vs. interictal events and scarcity of rare seizure types) and generalizability concerns (across different acquisition hardware, electrode setups, or patient demographics) hinder clinical reliability. Another bottleneck is the “black-box” nature of DL models, which makes regulatory reviewers cautious unless interpretability modules (e.g., saliency maps linked to electrode activity) are integrated.

Neurodegenerative and cognitive disorders

EEG changes have been observed in conditions like AD, Parkinson’s disease, mild cognitive impairment (MCI), and attention-deficit/hyperactivity disorder (ADHD). AI is used to detect these changes which might be hard to quantify by eye. For example, Alzheimer’s patients often show shifts in EEG power spectra (more delta/theta and less alpha) and altered connectivity [126], [127]. ML models trained on resting-state EEG have been developed to distinguish Alzheimer’s or MCI patients from healthy aging individuals. A recent study analyzed over 12,000 routine EEG recordings with ML and could identify distinct EEG patterns corresponding to stages of cognitive decline, successfully differentiating patients with Alzheimer’s and Lewy body dementia from normal controls [128]. Impressively, their AI method did this without requiring manual selection of channels or frequency bands, reducing human bias. The ability to learn directly from raw EEG is a strength of modern AI—it may surface novel biomarkers. AI-based diagnosis in this domain is still in exploratory phases but shows potential as a cheap, accessible screening tool (since EEG is widely available and non-invasive). Similarly, for ADHD or autism, EEG ML models have been tested to detect developmental differences (often using EEG during cognitive tasks) [129], [130]. One challenge in these applications is variability—diseases like AD have heterogeneous EEG presentations. Therefore, large training datasets are needed for robustness. The Mayo Clinic study above is a good example of utilizing a huge dataset to achieve generalizable results.

Other clinical uses

EEG is routinely used to monitor depth of anesthesia, detect cerebral ischemia, and diagnose sleep disorders, and AI has been applied successfully across all these domains [131], [132]. In automated sleep staging, end-to-end DL models now routinely match or exceed expert consensus: DetectsleepNet—a lightweight, interpretable CNN built on sleep-expert priors—achieved 80.9% accuracy on SHHS and 88.0% on Physio2018 without any hand-crafted preprocessing, while ZleepAnlystNet reported 87.0% overall accuracy and 82.1% macro-F1 on Sleep-EDF-13 [133], [134], [135], [136], [137], [138]. Although overall accuracy remains in the high 80s, Transformer-enhanced architectures and hybrid designs combining CNN with Bidirectional Long Short-Term Memory (BiLSTM) are beginning to push per-stage F1-scores higher and improve cross-cohort robustness [139], [140], [141], [142].

In the Intensive Care Unit (ICU), AI‐driven EEG alarms are streamlining clinicians’ workflows by delivering fatigue-free, preliminary analyses. Real-time burst-suppression segmentation algorithms—based on adaptive thresholding of local voltage variance—achieved agreement rates with expert readers ($\kappa \approx$ 0.90) [143]. DL models combining CNNs and RNNs for detecting levels of sedation in ICU patients yielded test AUCs of approximately 0.80 [144]. Patient-specific intracranial-EEG seizure detectors—using features such as the Hilbert-Huang transform with Bayesian network classifiers—achieved 96.6% sensitivity at just 0.21 false alarms per hour [145]. Together with emerging EEG-based delirium tracking systems, these tools automatically flag critical events—burst suppression, seizure onsets, or delirium—enabling faster, more reliable alerts [146], [147].

The primary strengths of AI in medical EEG are consistency (no fatigue or intra-reader bias) and the ability to uncover subtle multivariate patterns directly from raw signals. However, high-reliability clinical deployment demands rigorous validation, regulatory approval, and, critically, model explainability. Interpretable designs—such as the expert-inspired modules in DetectsleepNet that highlight salient channels and temporal features—are essential to build clinician trust. Overall, AI in medical EEG is poised to improve early detection and personalized monitoring. But these applications demand high reliability and interpretability. Black-box models are less acceptable in medicine, prompting research into XAI for EEG (as addressed in Sections 5 and 6).

3.3.2 Emotion recognition and affective computing

Using EEG to infer human affective states has become a cornerstone of affective computing. Emotional arousal and valence manifest in EEG rhythms—for example, frontal alpha asymmetry reliably indexes positive versus negative affect [148]. Public benchmarks such as DEAP and SEED have catalyzed this field, with participants’ EEG recorded while watching music videos or film clips annotated along valence-arousal dimensions [149]. Early approaches extracted hand-crafted features (e.g., differential entropy in canonical bands) and fed them into SVMs or similar classifiers, yielding modest subject-dependent accuracies of approximately 60–70% on DEAP [150]. Deep CNNs operating on time-frequency or 3D temporal-spectral-spatial representations later pushed subject-dependent performance above 90%: the dual-scale EEG-Mixer attained >95% on DEAP and approximately 93.7% on SEED, and the Attention-based Multiple Dimensions EEG Transformer (AMDET) model reached 97.48% (arousal) and 96.85% (valence) on DEAP plus 97.17% on SEED [151], [152].

However, real-world use demands subject-independent generalization. In leave-one-subject-out tests on DEAP, accuracies dropped to approximately 65.9% for valence and 69.5% for arousal and to approximately 76.7% for binary positive-negative classification [153]. This gap highlights the challenge of inter-subject variability. Emerging methods—including FL frameworks that achieved 90.7% on DEAP in a distributed setup and graph- or attention-augmented hybrids—are actively tackling cross-subject robustness [154], [155], [156], [157]. In summary, while modern CNN and Transformer models can exceed 95% accuracy in subject-dependent settings, subject-independent EEG emotion recognition typically remains in the 65–80% range, underscoring the need for larger, more diverse training cohorts and novel architectures to bridge this generalization gap.

Emotion EEG signals can be subtle and easily confounded by unrelated mental activity or noise. There is also considerable individual variability—people’s EEG responses to emotional stimuli differ. Therefore, models may overfit to specific participants. To address this, researchers have explored subject-independent models (trained on data from multiple participants and evaluated on unseen individuals) and transfer learning. Another challenge is the reliability of labels. Emotions are often self-reported, which can be subjective or inconsistent. Despite these issues, progress is steady. The strength of DL is in capturing complex, distributed patterns (perhaps a combination of frequency changes across multiple regions) that correlate with emotional states, something difficult to do with manual features alone. Emotion recognition from EEG has applications in adaptive user interfaces (e.g., games that respond to a player’s frustration level), mental health (monitoring mood), and marketing (measuring engagement). However, performance in real-life settings (outside lab stimuli) is still an open question—thus research often continues on improving the robustness of these models (through techniques like FL to gather diverse data without violating privacy).

3.3.3 BCIs and MI

Clinical intervention (CI) applications form a substantial portion of EEG-AI research. In MI BCIs, users imagine limb movements, eliciting characteristic desynchronization in the $\mu$ (8–12 Hz) and $\beta$ (13–30 Hz) bands over the sensorimotor cortex. Traditional pipelines combined CSP with LDA or SVM classifiers. DL quickly overtook these. The Deep ConvNet proposed by Schirrmeister et al. [7] reached approximately 75% accuracy on the four-class BCI Competition IV-2a task under a within-subject protocol, and the compact EEGNet model matched CSP+LDA baselines while remaining lightweight. Specialized architectures then set new state-of-the-art benchmarks: FBCNet achieved 76.20% four-class accuracy on IV-2a [158]. MBMANet further improved to 83.18% ($\kappa$ = 0.776) on the same dataset [159]. CTNet, a convolutional-Transformer hybrid, reached 78.66% [160]. CCLNet reported within-subject accuracies up to 95.87% on IV-2a [161]. Despite these gains, subject-independent MI still lags—cross-subject accuracies typically fall to 60–80%, highlighting inter-subject variability as a major hurdle.

BCI paradigms beyond MI include ERPs and steady-state visual evoked potentials (SSVEPs). For P300 spellers, CNN+LSTM ensembles achieved >98.7% single-trial accuracy on BCI Competition II data, vastly outpacing manual averaging methods [162]. In SSVEP-based BCIs, an LSTM model decoding drone-control signals reached 96.8% accuracy, while compact CNNs decoding 12-class asynchronous SSVEP achieve approximately 80% cross-subject accuracy (chance = 8.3%), with subject-specific fine-tuning pushing some reports above 97% [163], [164]. These rapid advances—in specialized CNNs, Transformer hybrids, and end-to-end feature learning—are driving real-time, robust BCI closer to both clinical and assistive-technology deployment.

A major advantage of DL in BCI is reducing calibration time—some deep models can be pre-trained on large datasets and adapt to new users with minimal additional data (through transfer learning), addressing a long-standing BCI issue of lengthy per-user training. In addition, deep models can integrate multiple modalities of input (e.g., EEG + EOG) to simultaneously handle artifacts, as one hybrid approach did by feeding both EEG and an eye-blink channel into a network to make it robust to ocular artifacts. The weakness is that BCIs demand real-time performance; large models with high computation or latency might not be feasible in an online setting. Researchers have tackled this by model compression, using smaller networks (like SSVEP-Net and EEGNet) or optimizing code for deployment. Another challenge is subject variability—a model trained on one set of subjects may perform poorly on a new user due to differences in skull anatomy, electrode contact, etc. Domain adaptation techniques (calibrating the model with a few minutes of data from the new user, or using adversarial training to make the model invariant to subject-specific features) are being explored to mitigate this.

Nonetheless, DL has revitalized BCI research by substantially improving the accuracy and reliability of decoding intentions from EEG. This brings BCIs closer to practical use in assisting disabled users. For example, a recent study combined a Residual Network (ResNet) (for spatial feature extraction) with a GRU (for temporal smoothing) to let severely motor-impaired users control a robotic arm; they achieved reliable device control without the need for a handcrafted feature pipeline [165]. As BCI applications expand (from medical to gaming to virtual reality), the flexibility of AI models to adapt and learn complex mappings will be increasingly important.

3.3.4 Cognitive workload and attention monitoring

EEG is often used to monitor cognitive states such as workload, fatigue, or attention in high-stakes settings (e.g., an operator in a control room or a drowsy driver). AI models have evolved from using simple $\theta$ / $\beta$ or $\alpha$ / $\theta$ power‐ratio thresholds and SVMs to deep networks that capture subtler, multivariate patterns. CNNs, for example, can learn combined slow‐wave increases and blink-artifact signatures of fatigue, while RNNs track the temporal evolution of attention dips. Hossain et al. [166] specifically demonstrated deep‐learning models for driver-attention recognition using short (2–5 s) EEG windows. Time sensitivity is critical—models must flag cognitive decline with minimal delay—and researchers have mitigated inter-subject variability via transfer learning or by fusing EEG with modalities like eye-tracking or heart rate.

Traditional pipelines achieved roughly 70–85% accuracy in driver-fatigue and workload classification. Today’s state-of-the-art deep models routinely exceed 95% on benchmark datasets. TFAC-Net, a single-channel temporal-frequency attention CNN, delivered approximately 95.3% real-time fatigue detection without bulky setups [167]. A ConvNeXt adaptation on the STEW dataset attained 95.76% accuracy for binary workload and 95.11% for three-class load estimation [168]. EEG-CogNet, which combines multi-band features in a deep framework, reported 96.53% accuracy for mental workload, 98.40% for attention state, and 97.86% for fatigue detection [169]. Multimodal fusion, i.e., EEG + electrocardiography (ECG) + facial video, on the DROZY dataset achieves 98.41% accuracy (F1 98.38%) [170]. These models were used to process 1–5 s windows to minimize detection delay, making them viable for real-time alerts. Nevertheless, environmental confounders and subject variability still challenge subject-independent deployment, spurring ongoing work in transfer learning and robust multimodal integration. Real-world deployment remains nascent, but one can envision smart vehicles or workstations that adapt dynamically—e.g., prompting a break when driver fatigue is detected.

Beyond driving and operator vigilance, cognitive workload monitoring with EEG has begun to influence diverse domains such as aviation, education, and gaming. In aviation, EEG-driven workload indices are being integrated into pilot training simulators and cockpit monitoring systems, enabling adaptive autopilot engagement during periods of high workload or fatigue. In education, real-time EEG-based attention tracking has been used to personalize digital learning platforms, dynamically adjusting difficulty levels or presenting breaks when students show declining focus. Similarly, in immersive environments like virtual reality (VR) and augmented reality (AR), EEG-informed adaptive interfaces help prevent simulator sickness and optimize engagement by monitoring mental effort and attentional load. In professional e-sports and gaming, EEG is leveraged for both performance enhancement and neurofeedback, offering players feedback on focus maintenance and stress regulation. These cross-domain applications highlight how advances in lightweight headsets, mobile EEG, and multimodal AI pipelines are making cognitive monitoring more practical outside the laboratory. Despite promising results, challenges remain in ecological validity—classifiers trained on controlled datasets often degrade in noisy, real-world conditions—necessitating further progress in domain adaptation, XAI, and seamless integration with unobtrusive wearable sensors.

3.3.5 Other notable applications

There are many niche but important areas where EEG and AI intersect. A few examples are as follows:

•Sleep stage classification: As touched on earlier, this involves classifying EEG, often along with EOG and electromyography (EMG) channels, into sleep stages (wake, N1, N2, N3, and REM). It’s a critical task in sleep medicine and traditionally done manually by experts. Deep CNNs have been very successful in this aspect, achieving human-level accuracy on large sleep datasets by learning characteristic waveforms like spindles or K-complexes for stage N2, delta waves for N3, etc. [171]. More recently, hybrid CNN-Transformer models and Transformer-based models have emerged, and hybrid models use small convolutional front-ends to extract local spectral features before feeding 30 s epochs as “tokens” into self-attention layers that model long-range stage transitions in parallel [172], [173], [174]. These Transformer-based architectures not only match or exceed CNN+LSTM performance but also provide natural attention-based interpretability and greater flexibility when integrating multiple polysomnography (PSG) channels. Together, they represent the current state of the art in automatic sleep staging.

•Event detection in neuroscience experiments: Researchers have used AI to detect specific neural events in EEG, such as detecting error-related potentials (brain signals when a person makes a mistake), or markers of memory encoding. These help in brain-state-triggered experiments or neurofeedback. For instance, real-time closed-loop experiments might use a classifier to detect when a subject’s brain indicates detection of a rare stimulus (P300) and then respond accordingly. Such event-based decoding is being extended to experiments on decision-making, emotion recognition, and learning, where AI enables millisecond-level detection and precise intervention. This allows researchers to probe causal brain-behavior relationships that were previously inaccessible.

•Artifact recognition and removal: While not an “application” to an end-user, an important use of AI is to automatically identify artifacts in EEG (eye blinks and muscle noise) and remove or correct them [175]. Methods like AEs or GANs have been used to filter out artifacts without losing underlying brain signals. This automation reduces the need for manual preprocessing, accelerating research pipelines and improving the reliability of clinical monitoring. In mobile or wearable EEG setups\textemdash where noise is more prevalent\textemdash AI-based artifact correction is particularly valuable for enabling real-time applications such as BCI or fatigue detection.

•Neurofeedback and rehabilitation: EEG-driven AI can personalize neurofeedback (training individuals to modulate their own brain activity). For example, AI could adaptively determine which brain patterns to reinforce for a patient recovering from stroke, based on EEG, and adjust the feedback stimuli accordingly. Recent work has explored deep reinforcement learning to optimize training schedules and feedback modalities, making sessions more effective and patient-specific. Beyond stroke, neurofeedback has shown promise in managing ADHD, anxiety, and chronic pain, where AI-driven EEG adaptation improves both user engagement and therapeutic outcomes.

Each application domain tends to use a tailored set of techniques and has unique benchmarks. Table 3 summarizes a few key application areas, their common model types, and representative outcomes reported. Performance figures are approximate from recent literature and can vary with dataset and setup.

Table 3. Representative applications of EEG AI with typical methods and performance

Application

Typical AI Methods

Example Dataset / Benchmark

Performance (Reported)

Epileptic seizure detection

CNN on raw or spectrogram; CNN-LSTM hybrids; ensemble of CNN+SVM; Transformer’s emerging

TUH EEG Seizure Corpus; CHB-MIT; Bonn EEG

90–99% accuracy in controlled teats (e.g., >95% sensitivity on CHB-MIT); real-time systems <1s delay with few false alarms

Alzheimer’s/MCT diagnosis

Feature-based ML (e.g, SVM on bandpower); 1D CNN on EEG; GCNN on coherence networks

Local clinical EEG datasets (e.g., the Mayo Clinic dataset of 12k EEGs)

Distinguigh ADMCI vs. healthy with approximately 80–90% accuracy in research settings (needs external validation for clinical use)

Emotion recognition

CNN on topographic feature maps, RNN on time-series, GCNN, and Transformers (recent)

DEAP (emotion EEG); SEED; DREAMER

Approximately 95–100% accuracy for binary high/low arousal or valence; approximately 90–95% for three or more emotion classes (harder task)

MI BCI

Filter-bank CSP + SVM (traditional); Deep CorroNet EEGWet (CNN); CNN+LSTM; Transformerbased MI networks

BCI Competition IV (2a, 2b); Phasionet EEG Motor Movement dataset

Approximately 76–93% classification accuracy for four-class MI; near 95–100% accuracy for two-class tasks in some subjects; advanced deep models outperform traditional Filter Bank Common Spatial Pattern (FBCSP) + SVM pipelines by up to approximately 20 %

Cognitive load / drowsiness

Shallow ANN/SVM; random forest / XGBe95t ensembles; DBNs; 1D CNN and Temporal Convolutional Networks (TCNs); LSTM ( $\pm$ attention); graph neural nets; self-supervised pretraining

NASA-Mental Workload dataset; Driving fatigue EEG set; MAT and STEW cognitive taska

Approximately 84–98.7% accuracy distinguishing low v3. high workload or alert vs. fatigued (e.g., 98.7% with random forest ensembles; 97.4% on MAT; 96.1% on STEW; DBNs $\approx$ 92%); typical SVM/ANN/1D CNN/LSTM models (80–95%)

Sleep stage classification

Deep CCNNs (SleepNet and U-Sleep); CNN + BiLSIM sequence models; Seasleephet and SleepTransformer fully convolutional U-Nets (UTime); TCNs; selfsupervised pretraining

Sleep-EDF; MASS dataset; SHHS; CAP Sleep

Approximately 82–88% overall accuracy for fivestage classification on SleepEDF / MASS; advanced models reach approximately 87–89% on Sleep-EDF and approximately 88–90% on SHHS with Cohen’s $\kappa$ $\approx$ 0.75–0.82; cascaded RNN pipelines report approximately 95% on sixclass CAP Sleep

In summary, AI techniques have permeated virtually every application area of EEG. The success in each domain depends on the interplay between algorithm capabilities and the nature of the EEG signatures for that domain (e.g., distinct large transient events like seizures are easier to detect than diffuse mood states). Across domains, one trend is clear: DL now often provides state-of-the-art results given sufficient data, while integrating domain knowledge (through hybrid models or specialized architectures) and addressing generalization remain active research themes. The next section focuses on the data itself\textemdash the public datasets and benchmarks that have catalyzed progress in EEG AI.

4. Public Datasets and Benchmarks

Availability of high-quality EEG datasets has been a crucial factor in advancing AI research on EEG. In the past, many studies used private or small datasets, limiting generalization. Over the last decade, numerous public EEG datasets have been released, covering a variety of tasks from BCI to clinical diagnostics, and several benchmark competitions have been organized. These shared resources enable reproducible research and head-to-head comparison of algorithms. The following is a summary of major EEG datasets and benchmarks, along with their characteristics:

•BCI competition datasets: A cornerstone of BCI research has been the series of international BCI competitions (I, II, III, IV) that released datasets for tasks like MI, P300 spellers, SSVEP, and more [176-180]. For example, BCI Competition IV-2a is a popular MI dataset (9 subjects, 22-channel EEG, and four classes of imagined movements) often used to benchmark classification algorithms.

These datasets typically come with standardized train/test splits, allowing fair comparisons. Researchers continue to use them to report performance of new algorithms, making them de facto benchmarks (e.g., DeepConvNet was evaluated on BCI competition IV-2a, etc.). Performance on these has steadily improved, demonstrating algorithmic progress. The BNCI Horizon 2020 initiative aggregated many BCI datasets in a unified repository, including competition data and other BCI experiment data (like error potentials, etc.) [181].

•Clinical EEG databases: One of the largest is the TUH EEG Corpus, which contains over 60,000 EEG recordings, with a subset labeled for seizures (TUH Seizure Corpus) [182], [183]. This is a game-changer for training deep models in a clinical context. TUH has been used in contests, e.g., the seizure detection challenge of the Institute of Electrical and Electronics Engineers (IEEE), and in studies to train robust seizure detectors. Another notable one is the CHB-MIT dataset (Children’s Hospital Boston), 23 pediatric patients (24 cases) with epilepsy, each with many hours of EEG and annotated seizures; it’s smaller (CHB-MIT: approximately 969 hours and 173 seizures) but widely used in proof-of-concept algorithm tests [117]. The Freiburg EEG dataset provides long-term EEG from epilepsy patients, including intracranial recordings (useful for seizure prediction research). There’s also the Bonn EEG dataset—small, with segments of EEG labeled normal vs. seizure—often used in early ML papers for quick testing, though it’s very limited in size and diversity [184]. For sleep, the Sleep-EDF and expanded sleep datasets (like MASS) provide many nights of PSG for training sleep stage classifiers [134].

•Affective and cognitive datasets: As mentioned, DEAP (32 subjects, 32-channel EEG + peripheral signals, and watching music videos) is a benchmark for emotion recognition. SEED (Chinese dataset: 15 subjects, 62-channel EEG, and film clips for positive/neutral/negative emotion) is another [185]. DREAMER is a smaller one with EEG and other signals for emotion [186]. The PhysioNet EEG Motor Movement/Imagery Dataset is essentially an MI dataset with 109 subjects (from the Wadsworth Center) performing various tasks—it’s a valuable resource for MI and has been used to pretrain networks that are later fine-tuned on smaller BCI data [187]. There are also datasets for specialized tasks like ERP CORE (a recent set of datasets for different evoked potential paradigms, useful for testing AI on ERPs) [188].

•Open data repositories: Platforms such as OpenNeuro and IEEE DataPort host EEG datasets contributed by labs worldwide, ranging from resting-state EEG recordings to task-based EEG [189]. The field has moved toward sharing data in standardized formats (e.g., EEG-BIDS format) to facilitate reuse [190]. The Neuroelectromagnetic Data Archive and Tools Resource (NEMAR), built on OpenNeuro, specifically supports EEG/MEG data sharing and analysis in the cloud [191].

In addition to the widely used datasets summarized above, several large-scale and multimodal resources have been released in recent years (2023–2025) that broaden the benchmark landscape:

•Expanded TUH sub-libraries: TUH continues to grow with modular releases (TUSZ v2.0.3 seizures; TUAB v3.0.1 abnormal; TUAR v3.0.1 artifacts; TUEP v2.0.1 epilepsy; TUEV v2.0.1 events; TUSL v2.0.1 slowing). These standardized subsets support task-specific benchmarking in clinical EEG.

•Population-scale EEG (HBN-EEG): The Healthy Brain Network has released more than 2,600 pediatric/young adult EEG sessions (BIDS, OpenNeuro/NEMAR), with deep phenotyping. A 2025 EEG Challenge built on these data promotes generalizable and fairness-aware benchmarks [192].

•Wearable epilepsy data (SeizeIT2, 2025): More than 11,000 hours of behind-the-ear EEG plus ECG/EMG/IMU, with 886 focal seizures, enabling real-world ambulatory seizure detection research [193].

•EEG-fNIRS multimodal sets: New open datasets include a Stroop task, a visual motivation dataset, and multi-joint MI, supporting hybrid BCI and fusion learning [194], [195].

•Naturalistic Multimodal (NOD, 2025): EEG/MEG/fMRI from the same participants viewing 57k natural images, shared in BIDS with OpenNeuro access, enabling cross-modal representation learning [196].

•Repository Growth: OpenNeuro now hosts approximately 1,400 public datasets (more than 62k participants), including new EEG/iEEG releases, while NEMAR provides browser-based access and compute tools.

These resources extend traditional benchmarks with greater scale, multimodality, and ecological validity and are expected to shape the next wave of EEG-AI research. To give a quick reference, Table 4 lists a selection of widely used EEG datasets and their key features.

Table 4. Selected public EEG datasets and corresponding characteristics

Dataset (Year)

Description

Subjects

Channels

Use case

BCI Competition IV2a (2008)

MI (L/R hand, $\mathrm{L} / \mathrm{R}$ foot), 9 subjects, and 4 classes

9

$22(\mathrm{EEG})+3$ EOG

BCI (MI classification)

PhysioNet EEG Motor Movement/Imagery (2009)

Various MI tasks (open/close fist, etc.)

109

64

BCI/MI, widely used as a baseline for transfer learning

CHB-MIT Scalp EEG (PhysioNet, 2010)

Pediatric epilepsy and continuous EEG with seizures

22

23

Seizure detection (clinical)

TUH EEG Corpus (TUEG: v2.0.1 (2002–2017))

Large clinical EEG database and diverse pathologies

26,846 from more than 10,000 unique patients

24-36 EEG per session

General EEG analysis, seizure subset widely used, and baseline for DL models on real-world clinical data

DEAP (2012)

EEG + peripheral signals during music videos (emotion labels)

32

32 EEG + 8 peripheral physiological channels

Emotion classification (valence/arousal)

SEED (2015)

EEG during film clips (3 emotion classes)

15

62

Emotion classification

Sleep-EDF (Expanded, 2013)

Whole-night sleep EEG with hypnograms (sleep stages)

42

2 EEG + other

Sleep stage classification

BNCI Horizon 2020 (Launched 2014)

Repository of 28 open-access BCI datasets (MI, SSVEP, P300, etc.)

8\textendash109 (various)

8-64 (various)

BCI benchmark suite

EEG Eye State (UCI, 2013)

EEG during continuous recording with eye open/closed labels

1

14

Simple binary classification (open vs. closed)

Using these datasets, the community has established certain benchmarks\textemdash for example, classification accuracy on DEAP two-class emotion, or kappa score on Sleep-EDF for sleep staging, or AUC on TUH for seizure detection\textemdash that new AI models strive to improve. It’s important to note that raw performance numbers are not the only consideration; robustness across datasets is also valued. There is a push towards evaluating models on multiple datasets to ensure they generalize (e.g., testing a seizure detector trained on TUH on CHB-MIT data). This has revealed that some models that do well on a single dataset might not generalize without adaptation, highlighting the need for techniques like transfer learning. Benchmarks also include computational efficiency and latency, especially relevant for real-time BCI. For instance, an algorithm might be benchmarked by information transfer rate (bits/min) rather than just accuracy to account for how quickly and confidently it makes predictions. In research papers, however, accuracy and F1-score remain common comparative metrics.

In summary, the availability of rich EEG datasets and organized challenges has accelerated progress. As data resources continue to grow (with initiatives encouraging data sharing), it is expected that AI models will further improve by learning from diverse EEG data encompassing many subjects and conditions. With benchmarks in place, the field can track improvements in a concrete way. Next, this study discusses the persistent challenges that even the best current methods face and then explores future directions aimed at overcoming those challenges.

Despite their widespread use, many public EEG datasets exhibit demographic and geographic biases that constrain the generalizability of trained models. For example, the CHB-MIT Scalp EEG Database is one of the most cited resources for seizure detection, yet it is derived almost exclusively from pediatric patients at a single U.S. hospital, limiting its representativeness across adult populations or non-Western cohorts. Similarly, several emotion recognition and BCI datasets disproportionately sample Western, educated, industrialized, rich, and democratic (WEIRD) populations, overlooking cultural, ethnic, and neurophysiological diversity. Such biases risk producing models that perform well on benchmark datasets but fail to generalize in clinical or global applications. For instance, variations in electrode montages, genetic backgrounds, lifestyle factors, and clinical practices may all affect EEG signals, yet these factors are underrepresented in current datasets. To mitigate these issues, researchers are increasingly advocating for:

•Cross-cultural and multi-site data collection,

•Incorporation of age- and sex-balanced cohorts,

•Development of open multimodal datasets (e.g., EEG-fNIRS and EEG-MRI) that cover broader populations.

Recognizing and addressing this “Western population bias” is essential for building EEG-AI systems that are equitable, clinically reliable, and globally deployable.

5. Challenges and Limitations

Despite considerable advancements, applying AI to EEG data still faces a number of fundamental challenges and limitations. These issues stem both from the nature of EEG signals and from the characteristics of AI models. Recognizing these challenges is important for properly interpreting current results and for directing future research. Key challenges are as follows:

•Noise, artifacts, and data quality: EEG signals are notoriously noisy. They are susceptible to interference from muscle activity (EMG), eye blinks/movements (EOG), electrical noise from equipment, and environmental artifacts. Poor contact or impedance can introduce drift and noise. AI models trained on data with certain noise characteristics may not generalize if the noise changes (for instance, a model may misclassify if the test data have more muscle artifact than the training data). While preprocessing can attenuate artifacts, complete elimination is hard, and aggressive filtering can distort true brain signals. This makes building robust AI difficult. Signal processing methods like ICA or wavelet denoising are commonly used as a front-end to AI models to isolate artifacts [197]. Some DL approaches incorporate artifact removal within the model (e.g., an AE learning to separate brain vs. noise). Nevertheless, data quality remains a bottleneck. Ensuring high-quality EEG (through good hardware and acquisition protocols) and using data augmentation to teach models to handle noise are active areas of work.

•Inter-subject and inter-session variability: Perhaps the biggest challenge for generalized EEG AI is the variability across individuals and even across sessions for the same individual. Each person’s brain anatomy and functional patterns differ, meaning that the same mental state can manifest differently in EEG features across individuals. Moreover, electrode placement variations or slight impedance differences in each session can change the signal. This variability often causes a model trained on one group to perform poorly on another—the classic domain shift problem [198]. Designing subject-independent models is difficult. For example, a deep network might latch onto idiosyncratic features of the training subjects that don’t transfer (e.g., the sharp drop from approximately 93% (10-fold) to approximately 74% (LOSO) in QuadTPat stress detection and from 99.6% to 79.9% in FriendPat epilepsy highlights how subject-specific patterns still dominate handcrafted descriptors, underscoring the need for stronger domain adaptation) [199]. A universal classifier that works for all users without calibration remains unsolved in BCI and other areas. Techniques to address this include adaptive learning (fine-tuning the model on a new subject with a small amount of data), domain adaptation (aligning feature distributions between source and target via adversarial training or transfer components), and meta-learning (training models that can quickly adapt to new subjects) [200], [201]. Some non-deep approaches use normalization of EEG per subject (z-scoring features within each subject) to remove global differences [202]. While many studies have reported high accuracy, it’s often observed in within-subject cross-validation; the drop in cross-subject testing is significant. Overcoming this remains a major research focus because practical EEG applications need models that can be deployed to new users or patients reliably. Low cross-subject accuracy has been pointed out as a key limitation hindering real-life use of EEG DL.

•Data scarcity and class imbalance: High-quality labeled EEG data is expensive and time-consuming to collect. Many medical applications suffer from limited data—e.g., collecting thousands of EEGs for a rare disease is challenging, and labeling events like seizures requires expert clinicians. DL models thrive on big data and can otherwise overfit on small datasets. The community has tackled this through data augmentation (as discussed, using techniques like sliding windows and synthetic data generation) and by pooling data (creating consortia for multi-center EEG data). Still, some domains have inherent scarcity (e.g., EEG during a very specific cognitive task). Class imbalance is also common. For example, in seizure detection, most of the EEG is non-seizure, with only brief seizure segments. Models can get biased towards the majority class. Methods like oversampling, focal loss, Synthetic Minority Over-sampling Technique (SMOTE), or generating more minority samples with GANs have been used to mitigate this [203], [204], [205]. Transfer learning from larger unrelated EEG datasets is another approach (e.g., pretrained on a large EEG dataset with surrogate tasks, then fine-tuned on the small target dataset). While these strategies help, limited data remain a core limitation, especially compared to fields like image or text where enormous labeled datasets exist. In addition, regulatory and privacy issues can limit data sharing in clinical contexts, though FL aims to address that. As discussed in Section 6, emerging directions such as FL and self-supervised pretraining aim to mitigate data scarcity by enabling collaborative learning without centralizing data and by utilizing large unlabeled EEG corpora.

•Model interpretability: Most high-performing AI models for EEG (deep neural networks and Transformers) are complex black boxes. This raises concerns in scientific, clinical, and user-facing contexts. For instance, a neurologist might hesitate to trust AI's diagnosis suggestion if the model cannot explain which EEG features led to that conclusion. Lack of interpretability also hampers scientific insight—people might care why a certain brain-state classification is working (to discover neural correlates), not just that it works. Currently, understanding deep EEG models is difficult. There have been attempts such as visualizing CNN filters to see if they align with known EEG rhythms or using saliency maps to highlight parts of EEG that influenced a decision. Some models incorporate attention mechanisms which can be analyzed to infer the importance of time segments or channels. But these are indirect explanations. As it stands, the opaque nature of deep models is a limitation, and interpretability is “particularly crucial” for user trust and ethical use of EEG AI [206], [207]. The black-box issue is also closely linked to generalization: if it is unclear what the model has learned, it cannot be determined whether it is capturing biologically meaningful signals or merely exploiting trivial biases or noise. For example, a model might latch onto power line noise patterns that coincidentally correlate with a class in the training set—it would perform well on similar data but fail elsewhere. Without interpretability, such failure modes are hard to anticipate. This challenge is being increasingly recognized, and research on XAI specific to EEG is emerging (Section 6).

•Robustness and reliability: Beyond accuracy, robustness to various factors is a limitation. AI models can be sensitive to slight changes. For example, a classifier might perform poorly if electrodes are re-referenced differently or if there's a slight timing shift in signals. Adversarial robustness is also a consideration—studies have shown that adding small perturbations to EEG (within a physiological noise range) can fool a deep classifier, which is problematic for security/safety if someone could maliciously interfere with EEG-based systems [207-208]. Models are also often narrow in scope: a network trained to detect seizures might not know how to handle EEG with some other pathology (out-of-distribution data), leading to unpredictable outputs. For BCIs, reliability over time is key—changes in the user’s condition or headset position day to day can degrade performance (the “non-stationarity” problem). Continual learning or periodic recalibration might be needed, but many current models do not support efficient updates without retraining from scratch (if retrained, they might forget old data—the catastrophic forgetting issue). Thus, maintaining performance over time and across conditions is a practical limitation.

•Computational demands: Some advanced models (like Transformers or very deep CNNs) are computationally heavy, with millions of parameters. Training them can require GPUs and a lengthy time, and running them in real time on portable devices (e.g., a wearable EEG) may be infeasible without significant power. While this is a technical limitation that tends to diminish as hardware improves, it’s still relevant. Researchers are investigating model compression (pruning and quantization) and more efficient architectures (like TinyML approaches for EEG) to allow on-device processing, especially for BCIs or mobile health applications [209-210]. For example, using separable convolutions and smaller kernels (as in EEGNet) can drastically reduce parameter count while keeping good accuracy. Nonetheless, a complicated model might have latency that's too high for an interactive BCI (which often needs less than 100 ms from brain event to action). Therefore, there’s a trade-off between model complexity and deployment feasibility.

•Evaluation and reproducibility issues: Lastly, a meta-challenge is that comparing models across studies can be difficult due to differing evaluation protocols. Some works use within-subject cross-validation, others use leave-one-subject-out, and some use different preprocessing. If not standardized, claims of improvement might be hard to verify. There’s a push for open-source code and standardized benchmarks (Section 4) to ensure fair comparison [211], [212]. Reproducibility is improving thanks to shared datasets, but sometimes details like how EEG is filtered or how deep models are initialized can affect outcomes. This is a community challenge to address by adhering to rigorous validation (e.g., reporting results on held-out datasets, not just on the same data used for training/tuning).

In summary, the challenges outlined above are not isolated obstacles but active drivers of current research. Each limitation maps directly onto the future directions discussed in Section 6. For example, data scarcity motivates FL and SSL, inter-subject variability calls for domain adaptation and meta-learning, interpretability concerns drive XAI, and robustness issues are addressed through adversarial training, multimodal fusion, and continual learning. Establishing this logical loop strengthens the survey’s coherence by showing how today’s obstacles shape tomorrow’s innovations.

6. Future Directions

Looking ahead, the intersection of AI and EEG is poised to continue evolving rapidly. Researchers have been actively exploring new methodologies to tackle the challenges outlined and to open up novel applications. This study highlights several promising future directions and emerging trends that are likely to shape the field in the coming years as follows:

(a) Transformers and advanced deep architectures: Transformers have made a big splash in EEG analysis recently, but this is likely just the beginning. Future work will refine Transformer architectures to better suit EEG. This could involve hybrid models that combine CNNs (for local feature extraction) with Transformer encoders (for global context), or the development of lightweight Transformers that can handle smaller data regimes. There is also interest in pretraining Transformers on large unlabeled EEG datasets using SSL which is analogous to Bidirectional Encoder Representations from Transformers (BERT) in Natural Language Processing (NLP)—for example, forcing it to learn general EEG representations by masking parts of an EEG sequence and training the model to reconstruct them. Such pretraining could then be fine-tuned for specific tasks (seizure detection, BCI, etc.), potentially improving performance when labeled data is scarce. Given Transformers’ success in other domains, it is expected that they will become a staple for EEG tasks requiring long-range temporal modeling or integration of multimodal data. However, making them data-efficient and computationally feasible (perhaps through sparsity, factorized attention, or smaller patch-based models) will be key areas of innovation.

(b) FL and privacy-preserving learning: In clinical EEG especially, privacy is a major concern—patient data cannot always be centralized for training an AI model. FL offers a solution by allowing models to be trained collaboratively across multiple hospitals or devices, without sharing raw data. In FL, each center computes model updates on its local data and only those updates (not the EEG signals) are sent to a central server to be aggregated. This preserves privacy while tapping on data volume. Initial studies have shown that FL can be effective for EEG-based emotion recognition, such as training on data from multiple users or institutions to get a better general model. More applications of FL for building robust EEG models (e.g., a federated network for seizure prediction that learns from patients across different clinics) can be foreseen. There are challenges like handling non-independent and identically distributed (non-iid) data (EEG from different sites may have different characteristics) and communication overhead, but active research is making FL more efficient and robust [213]. In addition, techniques like differential privacy could be applied to ensure that models do not inadvertently leak personal information (like an outlier EEG pattern unique to one individual) [214]. The drive for privacy will also encourage on-device AI for EEG (like running a model directly on a wearable or mobile device), which ties into making models efficient.

(c) Interpretable and XAI for EEG: As highlighted, interpretability is crucial, and a surge in methods to make EEG AI more transparent is expected. This includes developing visualization tools and metrics specifically for EEG networks. For example, techniques to map a deep network’s features back to EEG time-frequency space could help reveal if the model is focusing on known physiological patterns (like alpha oscillations or sleep spindles). A recent direction is concept-based interpretability, where one defines human-understandable concepts (e.g., “delta wave activity” or “frontal asymmetry”) and tests how strongly those concepts influence the model’s decisions. Another is training inherently interpretable models: one idea is a network that outputs intermediate representations that correspond to features experts recognize (such as detecting spikes, then using those to classify epilepsy). Prototype learning is another promising approach—e.g., a network can be designed to learn prototypical EEG patterns for each class, and then classification is based on similarity to these prototypes (which can be visualized as representative signals) [215], [216]. Attention mechanisms also naturally lend some interpretability by indicating which parts of the signal are important. Zhou et al. [207] focused on interpretable and robust AI for EEG, indicating growing interest. It can be predicted that future models, especially for clinical use, will come with “explanation modules”—perhaps using methods like Local Interpretable Model-agnostic Explanations (LIME) or SHapley Additive exPlanations (SHAP) adapted to time-series—to provide reasons for their outputs. This not only builds trust but can potentially lead to scientific discovery (e.g., an AI model “discovering” an EEG biomarker that was not obvious to humans by consistently pointing to a particular pattern that indicates a disease).

A clarification is necessary between interpretability and explainability, which are often used interchangeably in AI literature but differ in scope—particularly for EEG-based applications. Interpretability refers to the degree to which the internal logic of a model is transparent and traceable, for example, whether a linear classifier's decision boundary or a CNN's learned filters can be directly examined. In contrast, explainability emphasizes whether the outputs of a (possibly complex) model can be made understandable to humans, often through post hoc methods such as saliency maps, feature attribution, or scalp-topography visualizations. In EEG-AI, this distinction is crucial: interpretability ensures that the computational process itself can be audited for validity (e.g., verifying whether spectral features used by the model correspond to physiologically plausible patterns), while explainability focuses on translating complex outputs into forms that clinicians, neuroscientists, or end-users can readily comprehend. Maintaining this distinction helps prevent conflating technical model transparency with user-facing understanding, thereby aligning methodological rigor with practical usability.

Beyond generic XAI methods such as saliency maps or SHAP values, several interpretation techniques have emerged that tap into EEG’s spatio-temporal and neurophysiological structure. For example, deep models employing prototype-based methods—like ProtoEEGNet—store representative EEG waveforms and perform classification by comparing new inputs to learned prototypes [217]. Similarly, adaptations of self-attention prototype methods (originally from ECG) applied to EEG for sleep-stage classification reveal prototypical components such as alpha spindles and slow waves, hinting at interpretable biomarkers [218]. Additionally, CNN-based feature visualization pipelines for Multivariate Pattern Analysis (MVPA) allow examination of discriminative activations on a trial-by-trial basis, revealing when and where certain spatial-temporal EEG patterns drive decisions [219]. A broader evaluation of XAI techniques—including saliency mapping, guided backpropagation, integrated gradients, Layer-wise Relevance Propagation (LRP), and more—has led to recommended best practices for presenting model interpretations in EEG-based BCI settings. Further, GNN architectures—which model electrode layouts as graphs—enhance neuroscientific interpretability by illuminating how different electrode regions contribute to decisions [220]. While these EEG-adapted interpretability strategies significantly enhance transparency and trust, several intuitive extensions remain underexplored. Explicit scalp-topography visualizations of model attention or weight maps, frequency-domain attribution (e.g., LRP identifying delta vs. alpha contributions), and ERP-component-based interpretability (e.g., highlighting reliance on P300 or N400 signals) have not yet been robustly demonstrated in the literature. Nonetheless, integrating domain knowledge into XAI remains a crucial frontier—vital for fostering clinically viable and scientifically insightful EEG-AI systems.

(d) Robustness and domain adaptation: Future research will continue tackling the generalization issue. Techniques from domain adaptation and transfer learning will be further refined. One likely direction is meta-learning (learning to learn) where a model is trained on a variety of tasks or subjects and learns a good initialization that can quickly adapt to new ones. This has shown promise in a few BCI studies. Another approach is synthetic-to-real adaptation: using generated EEG data to augment training and then adapting models to real data. People might see more use of adversarial training—not only to defend against malicious attacks but also as a way to make models robust to noise/artifact variations (train the model with adversarial examples of noise so it learns to be invariant). In addition, continuous learning algorithms could allow a deployed EEG model to keep improving as it sees more data from a user, without forgetting past knowledge. Research into architectures that support incremental updates will be valuable (for instance, dynamically expanding networks, or using Bayesian approaches to update weights with new data while preserving old performance).

(e) Multimodal and context-aware EEG analysis: The brain does not operate in isolation, and neither should models if additional data is available. Future AI for EEG may increasingly incorporate other data streams: combining EEG with fNIRS in hybrid BCIs, or with eye-tracking data, or physiological signals like ECG/GSR for emotion recognition [221]. Multimodal models can compensate for weaknesses of one modality (e.g., EEG might be noisy but another signal isn’t, or EEG gives high temporal resolution while fNIRS gives more spatial information). Recent advances have moved beyond simple feature concatenation. Attention-based fusion architectures now allow modality-specific encoders (e.g., CNNs for EEG and temporal convolutions for fNIRS) to interact through cross-attention layers, dynamically weighting each modality according to signal quality. Transformer-based fusion aligns temporal embeddings from EEG and EOG, enabling context-aware selection of relevant signals for tasks such as emotion recognition. Meanwhile, GNNs have been adapted for multimodal learning, where nodes represent EEG channels and fNIRS optodes, and edges encode inter-modality correlations—capturing richer spatial-temporal dependencies. These approaches demonstrate how architectural innovations can explicitly capture complementary information across modalities. DL frameworks are well-suited to learn from multimodal data, and research has shown improved results in emotion recognition when fusing EEG with peripheral signals. Contextual information is another modality of sorts—for instance, knowing the timing of external events (stimuli) can help in analyzing EEG. Future systems might incorporate context through techniques like encoding stimulus features and feeding them alongside EEG into a model (a rudimentary example: combining EEG with a driving simulator’s context to better predict if a brain pattern indicates hazard response or just random distraction). Essentially, moving beyond treating EEG in isolation could significantly enhance AI’s interpretative power.

(f) Emerging hardware and on-chip AI: On the hardware side, as dry EEG electrodes and wearable EEG devices become more common, there will be a drive to implement AI on hardware (e.g., chips in headbands). Neuromorphic computing and dedicated EEG processing chips could run simplified spiking neural networks or compressed models in real time with low power [222], [223]. This ties into edge computing for EEG—rather than sending data to the cloud for analysis (which can be slow and raises privacy concerns), the analysis might happen locally on the device. Collaborations between hardware designers and AI researchers might be seen to create models that are co-optimized with EEG sensors (for example, selecting channel subsets on the fly that maximize model confidence, reducing data dimensions). While somewhat outside the traditional scope of AI algorithms, this direction ensures that the sophisticated models being developed can actually be deployed in mobile neurotechnology.

(g) Domain-specific innovations: Various subfields will push their own specialized innovations. For instance, there is interest in adaptive BCIs that can not only decode but also actively query the user or adjust parameters based on confidence (an application of reinforcement learning or active learning). In clinical AI, integrating EEG analysis with electronic health records or imaging could provide more holistic diagnostic models (multimodal in a different sense: EEG + MRI, etc., where AI finds correlations). In addition, the concept of brain state decoding might shift from discrete classification to more fluid tracking, and AI might be used to drive neurofeedback (i.e., closed-loop systems where AI both reads and helps modulate brain activity in real time, perhaps using generative models to suggest stimuli that move the brain towards a desired state).

(h) Standardization and reproducibility efforts: A somewhat meta future direction is the continued development of standards (like EEG-BIDS for data and common frameworks for training/testing models). Tools and libraries (for example, MOABB—mother of all BCI benchmarks—a package that lets people evaluate multiple algorithms on many BCI datasets easily) will be refined, making it easier for researchers to test new models across a battery of datasets [224]. This will encourage the field to favor models that are not just tuned to one dataset but work broadly. It is expected that winning approaches will be those that generalize well, as evidenced by performance on community benchmark platforms (PapersWithCode trends already reflect this as people report state-of-the-art across benchmarks).

In summary, the future of AI in EEG analysis is very promising. More accurate, robust, and transparent models that can be deployed in real-world settings can be foreseen. The synergy between new algorithmic developments (like Transformers and FL) and the unique demands of EEG (nonstationarity, individual differences, and need for interpretability) will define the next wave of research. If successful, these future advances will not only solve technical challenges but also unlock new applications, ranging from brain-monitoring wearables for wellness to brain-controlled smart environments and AI-assisted neurological diagnosis that is faster and more personalized. The ultimate vision is a new generation of cognitive neurodynamic technologies—powered by AI—that seamlessly integrate with human brain function for health and enhancement, all while being reliable and understood by their users.

7. Conclusion

AI has become an indispensable tool for EEG signal analysis, bringing significant improvements in automation and accuracy to a domain historically reliant on manual inspection and handcrafted methods. This survey traced the development of AI techniques for EEG, from early ML classifiers to state-of-the-art DL models. In addition, this study reviewed how these techniques are applied across various tasks (classification, regression, and generation) and algorithms (SVMs, neural networks, Transformers, etc.) and examined their use in key application areas, including medical diagnosis, emotion recognition, BCIs, and beyond. Recent years have seen DL approaches achieve remarkable success on benchmark datasets—for example, convolutional networks detecting seizures or classifying MI with high accuracy—demonstrating the potential for AI to decode the complex patterns in EEG that underpin cognitive and clinical phenomena.

However, this review also emphasized that challenges remain. EEG data pose unique difficulties such as noise, inter-subject variability, and limited availability of labeled data, which can constrain model performance and generalization. Furthermore, issues of interpretability and trust in AI decisions are especially pronounced in neurophysiological contexts. These challenges underscore that raw performance metrics, while important, are not the sole determinant of a method’s utility in practice. Robustness, transparency, and the ability to handle real-world variability are equally critical benchmarks that the next generation of EEG AI methods must meet. Encouragingly, the field is actively responding to these challenges. Emerging trends such as Transformer-based architectures are opening new frontiers in performance, FL is addressing data privacy and scarcity by enabling collaborative model training, and XAI techniques are beginning to peel back the curtain on black-box models to reveal the brain features driving their decisions. At the same time, the continued expansion of public EEG datasets and competitions is fostering a more rigorous and comparative research environment, accelerating progress. Future advances may well come from interdisciplinary collaboration—combining insights from neuroscience (e.g., knowledge of brain networks) with novel ML strategies (e.g., GNNs or SSL) to create models that are both powerful and physiologically interpretable.

In conclusion, the synergy of AI and EEG has greatly advanced the ability to interpret the electrical language of the brain. What began with modest ML experiments has evolved into sophisticated DL systems that can discern intricate neural patterns, often in real time. This progress holds immense promise: more effective clinical diagnostics for neurological conditions, more immersive and reliable brain-controlled devices, and deeper scientific understanding of brain function through data-driven discovery. Achieving these promises will require ongoing efforts to surmount current limitations and ensure that AI models are used judiciously and transparently in sensitive applications. The trajectory of recent research is highly encouraging—with each passing year, AI algorithms become more adept at handling EEG’s complexities, and EEG data science becomes more integrated with mainstream AI developments.

Looking ahead, new opportunities may arise beyond conventional clinical and cognitive domains. Extreme environments such as space missions, deep-sea exploration, or high-risk industrial operations demand continuous monitoring of human performance under stress, fatigue, or altered physiology. AI-powered EEG systems could provide real-time neurocognitive assessment for astronauts adapting to microgravity, divers facing high-pressure conditions, or workers in hazardous industries where safety depends on vigilance. Developing such systems will require robust models that can withstand noisy conditions, adapt across individuals, and operate on portable hardware. These cross-domain applications highlight the potential of EEG-AI not only as a clinical or research tool but also as a cornerstone of human-machine integration in the most challenging contexts. It is expected that the coming years will solidify the role of AI-powered EEG analysis as a cornerstone of cognitive neurodynamics research and its translational applications, ultimately enabling technologies that can beneficially interface human brains with the digital world in ways once confined to science fiction.

Data Availability

The data used to support the findings of this study are available from the corresponding author upon request.

Conflicts of Interest

The authors declare that they have no conflicts of interest.

References
@article{1, title = {Fundamentals of {EEG} measurement}, author = {M. Teplan}, journal = {Meas. Sci. Rev.}, volume = {2}, number = {2}, pages = {1--11}, year = {2002}}@book{2, title = {Niedermeyer's Electroencephalography: Basic Principles, Clinical Applications, and Related Fields}, editor = {D. L. Schomer and F. H. Lopes da Silva}, edition = {7}, year = {2017}, publisher = {Oxford University Press}, doi = {10.1093/med/9780190228484.001.0001}, url = {}}. [Crossref]
@article{3, title = {Review of challenges associated with the {EEG} artifact removal methods}, author = {W. Mumtaz and S. Rasheed and A. Irfan}, journal = {Biomed. Signal Process. Control}, volume = {68}, pages = {102741}, year = {2021}, doi = {10.1016/j.bspc.2021.102741}, url = {}}. [Crossref]
@article{4, title = {{EEG} signal complexity measurements to enhance {BCI}-based stroke patients' rehabilitation}, author = {N. K. Al-Qazzaz and A. A. Aldoori and S. H. B. M. Ali and S. A. Ahmad and A. K. Mohammed and M. I. Mohyee}, journal = {Sensors}, volume = {23}, number = {8}, pages = {3889}, year = {2023}, doi = {10.3390/s23083889}, url = {}}. [Crossref]
@article{5, title = {Deep learning for electroencephalogram ({EEG}) classification tasks: {A} review}, author = {A. Craik and Y. He and J. L. Contreras-Vidal}, journal = {J. Neural Eng.}, volume = {16}, number = {3}, pages = {031001}, year = {2019}, doi = {10.1088/1741-2552/ab0ab5}, url = {}}. [Crossref]
@article{6, title = {Deep learning-based electroencephalography analysis: {A} systematic review}, author = {Y. Roy and H. Banville and I. Albuquerque and A. Gramfort and T. H. Falk and J. Faubert}, journal = {J. Neural Eng.}, volume = {16}, number = {5}, pages = {051001}, year = {2019}, doi = {10.1088/1741-2552/ab260c}, url = {}}. [Crossref]
@article{7, title = {Deep learning with convolutional neural networks for {EEG} decoding and visualization}, author = {R. T. Schirrmeister and J. T. Springenberg and L. D. J. Fiederer and M. Glasstetter and K. Eggensperger and M. Tangermann and F. Hutter and W. Burgard and T. Ball}, journal = {Hum. Brain Mapp.}, volume = {38}, number = {11}, pages = {5391--5420}, year = {2017}, doi = {10.1002/hbm.23730}, url = {}}. [Crossref]
@article{8, title = {The applied principles of {EEG} analysis methods in neuroscience and clinical neurology}, author = {H. Zhang and Q. Q. Zhou and H. Chen and X. Q. Hu and W. G. Li and Y. Bai and J. X. Han and Y. Wang and Z. H. Liang and D. Chen and F. Y. Cong and J. Q. Yan and X. L. Li}, journal = {Mil. Med. Res.}, volume = {10}, number = {1}, pages = {67}, year = {2023}, doi = {10.1186/s40779-023-00502-7}, url = {}}. [Crossref]
@article{9, title = {Neural decoding of {EEG} signals with machine learning: {A} systematic review}, author = {M. Saeidi and W. Karwowski and F. V. Farahani and K. Fiok and R. Taiar and P. A. Hancock and A. Al-Juaid}, journal = {Brain Sci.}, volume = {11}, number = {11}, pages = {1525}, year = {2021}, doi = {10.3390/brainsci11111525}, url = {}}. [Crossref]
@article{10, title = {{EEG} Signal Processing for {Alzheimer's} Disorders Using Discrete Wavelet Transform and Machine Learning Approaches}, author = {K. AlSharabi and Y. Bin Salamah and A. M. Abdurraqeeb and M. Aljalal and F. A. Alturki}, journal = {IEEE Access}, volume = {10}, pages = {89781--89797}, year = {2022}, doi = {10.1109/ACCESS.2022.3198988}, url = {}}. [Crossref]
@article{11, title = {{EEG}-based clinical decision support system for {Alzheimer's} disorders diagnosis using {EMD} and deep learning techniques}, author = {K. AlSharabi and Y. B. Salamah and M. Aljalal and A. M. Abdurraqeeb and F. A. Alturki}, journal = {Front. Hum. Neurosci.}, volume = {17}, pages = {1190203}, year = {2023}, doi = {10.3389/fnhum.2023.1190203}, url = {}}. [Crossref]
@article{12, title = {Retraining and evaluation of machine learning and deep learning models for seizure classification from {EEG} data}, author = {J. P. Carvajal-Dossman and L. Guio and D. García-Orjuela and J. J. Guzmán-Porras and K. Garces and A. Naranjo and S. J. Maradei-Anaya and J. Duitama}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {15345}, year = {2025}, doi = {10.1038/s41598-025-98389-y}, url = {}}. [Crossref]
@article{13, title = {The combination of brain-computer interfaces and artificial intelligence: Applications and challenges}, author = {X. Y. Zhang and Z. Y. Ma and H. J. Zheng and T. K. Li and K. X. Chen and X. Wang and C. T. Liu and L. X. Xu and X. H. Wu and D. R. Lin and H. Lin}, journal = {Ann. Transl. Med.}, volume = {8}, number = {11}, pages = {712}, year = {2020}, doi = {10.21037/atm.2019.11.109}, url = {}}@book{14, title = {{EEG} Normal Waveforms}, author = {C. S. Nayak and A. C. Anilkumar}, year = {2025}, publisher = {StatPearls Publishing}, url = {http://www.ncbi.nlm.nih.gov/books/NBK539805/}}. [Crossref]
@article{15, title = {Removal of Artifacts from {EEG} Signals: {A} Review}, author = {X. Jiang and G. B. Bian and Z. Tian}, journal = {Sensors}, volume = {19}, number = {5}, pages = {987}, year = {2019}, doi = {10.3390/s19050987}, url = {}}. [Crossref]
@article{16, title = {Protocol for semi-automatic {EEG} preprocessing incorporating independent component analysis and principal component analysis}, author = {G. Ouyang and Y. Li}, journal = {STAR Protoc.}, volume = {6}, number = {1}, pages = {103682}, year = {2025}, doi = {10.1016/j.xpro.2025.103682}, url = {}}. [Crossref]
@article{17, title = {Electroencephalographic imaging of higher brain function}, author = {A. Gevins and M. E. Smith and L. K. McEvoy and H. Leong and J. Le}, journal = {Phil. Trans. R. Soc. Lond. B}, volume = {354}, number = {1387}, pages = {1125--1134}, year = {1999}, doi = {10.1098/rstb.1999.0468}, url = {}}. [Crossref]
@article{18, title = {Electroencephalography ({EEG}) and event-related potentials ({ERPs}) with human participants}, author = {G. A. Light and L. E. Williams and F. Minow and J. Sprock and A. Rissling and R. Sharp and D. L. Braff}, journal = {Curr. Protoc. Neurosci.}, volume = {52}, number = {1}, pages = {6--25}, year = {2010}, doi = {10.1002/0471142301.ns0625s52}, url = {}}. [Crossref]
@article{19, title = {Neural biomarker diagnosis and prediction to mild cognitive impairment and {Alzheimer's} disease using {EEG} technology}, author = {B. Jiao and R. H. Li and H. Zhou and K. Q. Qing and H. Liu and H. F. Pan and Y. Q. Lei and W. J. Fu and X. A. Wang and X. W. Xiao and X. X. Liu and Q. J. Yang and X. X. Liao and Y. F. Zhou and L. J. Fang and Y. B. Dong and Y. H. Yang and H. Y. Jiang and S. Huang and L. Shen}, journal = {Alz. Res. Therapy}, volume = {15}, pages = {32}, year = {2023}, doi = {10.1186/s13195-023-01181-1}, url = {}}. [Crossref]
@article{20, title = {Effects of Fatigue Based on Electroencephalography Signal during Laparoscopic Surgical Simulation}, author = {N. Z. Ndaro and S.-Y. Wang}, journal = {Minim. Invasive Surg.}, volume = {2018}, number = {1}, pages = {2389158}, year = {2018}, doi = {10.1155/2018/2389158}, url = {}}. [Crossref]
@article{21, title = {Emotion detection using electroencephalography signals and a zero-time windowing-based epoch estimation and relevant electrode identification}, author = {S. Gannouni and A. Aledaily and K. Belwafi and H. Aboalsamh}, journal = {Sci. Rep.}, volume = {11}, number = {1}, pages = {7071}, year = {2021}, doi = {10.1038/s41598-021-86345-5}, url = {}}. [Crossref]
@article{22, title = {{EEG} power spectral measures of cognitive workload: {A} meta‐analysis}, author = {S. Chikhi and N. Matton and S. Blanchet}, journal = {Psychophysiology}, volume = {59}, number = {6}, pages = {e14009}, year = {2022}, doi = {10.1111/psyp.14009}, url = {}}. [Crossref]
@article{23, title = {Spotlight on sleep stage classification based on {EEG}}, author = {I. Lambert and L. Peter-Derex}, journal = {Nat. Sci. Sleep}, volume = {15}, pages = {479--490}, year = {2023}, doi = {10.2147/NSS.S401270}, url = {}}@incollection{24, title = {Physiology, sleep stages}, author = {A. K. Patel and V. Reddy and K. R. Shumway and J. F. Araujo}, booktitle = {StatPearls}, year = {2024}, publisher = {StatPearls Publishing}, url = {http://www.ncbi.nlm.nih.gov/books/NBK526132/}}. [Crossref]
@article{25, title = {Current Status, Challenges, and Possible Solutions of {EEG}-Based Brain-Computer Interface: {A} Comprehensive Review}, author = {M. Rashid and N. Sulaiman and A. PP Abdul Majeed and R. M. Musa and A. F. Ab. Nasir and B. S. Bari and S. Khatun}, journal = {Front. Neurorob.}, volume = {14}, pages = {25}, year = {2020}, doi = {10.3389/fnbot.2020.00025}, url = {}}. [Crossref]
@article{26, title = {Neurofeedback: {A} comprehensive review on system design, methodology and clinical applications}, author = {H. Marzbani and H. R. Marateb and M. Mansourian}, journal = {Basic Clin. Neurosci.}, volume = {7}, number = {2}, pages = {143--158}, year = {2016}, doi = {10.15412/J.BCN.03070208}, url = {}}. [Crossref]
@article{27, title = {Advances in {P300} brain--computer interface spellers: Toward paradigm design and performance evaluation}, author = {J. H. Pan and X. N. Chen and N. M. Ban and J. S. He and J. Y. Chen and H. Y. Huang}, journal = {Front. Hum. Neurosci.}, volume = {16}, pages = {1077717}, year = {2022}, doi = {10.3389/fnhum.2022.1077717}, url = {}}. [Crossref]
@article{28, title = {{EEG}-Based {BCIs} on Motor Imagery Paradigm Using Wearable Technologies: {A} Systematic Review}, author = {A. Saibene and M. Caglioni and S. Corchs and F. Gasparini}, journal = {Sensors}, volume = {23}, number = {5}, pages = {2798}, year = {2023}, doi = {10.3390/s23052798}, url = {}}. [Crossref]
@article{29, title = {A review of classification algorithms for {EEG}-based brain--computer interfaces}, author = {F. Lotte and M. Congedo and A. Lécuyer and F. Lamarche and B. Arnaldi}, journal = {J. Neural Eng.}, volume = {4}, number = {2}, pages = {R1}, year = {2007}, doi = {10.1088/1741-2560/4/2/R01}, url = {}}. [Crossref]
@article{30, title = {Support-vector networks}, author = {C. Cortes and V. Vapnik}, journal = {Mach. Learn.}, volume = {20}, number = {3}, pages = {273--297}, year = {1995}, doi = {10.1007/BF00994018}, url = {}}. [Crossref]
@article{31, title = {Optimal spatial filtering of single trial {EEG} during imagined hand movement}, author = {H. Ramoser and J. Muller-Gerking and G. Pfurtscheller}, journal = {IEEE Trans. Rehab. Eng.}, volume = {8}, number = {4}, pages = {441--446}, year = {2000}, doi = {10.1109/86.895946}, url = {}}. [Crossref]
@article{32, title = {{LEDPatNet19}: Automated Emotion Recognition Model based on Nonlinear {LED} Pattern Feature Extraction Function using {EEG} Signals}, author = {T. Tuncer and S. Dogan and A. Subasi}, journal = {Cogn. Neurodyn.}, volume = {16}, number = {4}, pages = {779--790}, year = {2022}, doi = {10.1007/s11571-021-09748-0}, url = {}}. [Crossref]
@article{33, title = {{EEG} signal classification using wavelet feature extraction and a mixture of expert model}, author = {A. Subasi}, journal = {Expert Syst. Appl.}, volume = {32}, number = {4}, pages = {1084--1093}, year = {2007}, doi = {10.1016/j.eswa.2006.02.005}, url = {}}. [Crossref]
@article{34, title = {A tutorial review of functional connectivity analysis methods and their interpretational pitfalls}, author = {A. M. Bastos and J.-M. Schoffelen}, journal = {Front. Syst. Neurosci.}, volume = {9}, pages = {175}, year = {2016}, doi = {10.3389/fnsys.2015.00175}, url = {}}. [Crossref]
@article{35, title = {Random forests}, author = {L. Breiman}, journal = {Mach. Learn.}, volume = {45}, number = {1}, pages = {5--32}, year = {2001}, doi = {10.1023/A:1010933404324}, url = {}}. [Crossref]
@article{36, title = {Gradient-based learning applied to document recognition}, author = {Y. Lecun and L. Bottou and Y. Bengio and P. Haffner}, journal = {Proc. IEEE}, volume = {86}, number = {11}, pages = {2278--2324}, year = {2002}, doi = {10.1109/5.726791}, url = {}}@inproceedings{37, title = {Chrono{N}et: {A} Deep Recurrent Neural Network for Abnormal {EEG} Identification}, author = {S. Roy and I. Kiral-Kornek and S. Harrer}, editor = {D. Riaño and S. Wilk and A. ten Teije}, booktitle = {Artificial Intelligence in Medicine}, series = {Lecture Notes in Computer Science}, volume = {11526}, pages = {47--56}, year = {2019}, address = {Cham, Germany}, doi = {10.1007/978-3-030-21642-9_8}, url = {https://doi.org/10.1007/978-3-030-21642-9_8}}. [Crossref]
@article{38, title = {{EEGNet}: {A} compact convolutional neural network for {EEG}-based brain--computer interfaces}, author = {V. J. Lawhern and A. J. Solon and N. R. Waytowich and S. M. Gordon and C. P. Hung and B. J. Lance}, journal = {J. Neural Eng.}, volume = {15}, number = {5}, pages = {056013}, year = {2018}, doi = {10.1088/1741-2552/aace8c}, url = {}}. [Crossref]
@article{39, title = {Learning Representations from {EEG} with Deep Recurrent-Convolutional Neural Networks}, author = {P. Bashivan and I. Rish and M. Yeasin and N. Codella}, journal = {arXiv}, year = {2015}, doi = {10.48550/arXiv.1511.06448}, url = {},}. [Crossref]
@article{40, title = {Long short-term memory}, author = {S. Hochreiter and J. Schmidhuber}, journal = {Neural Comput.}, volume = {9}, number = {8}, pages = {1735--1780}, year = {1997}, doi = {10.1162/neco.1997.9.8.1735}, url = {}}. [Crossref]
@article{41, title = {On the Properties of Neural Machine Translation: Encoder-Decoder Approaches}, author = {K. Cho and B. van Merrienboer and D. Bahdanau and Y. Bengio}, journal = {arXiv}, year = {2014}, doi = {10.48550/arXiv.1409.1259}, url = {},}. [Crossref]
@article{42, title = {Enhancing {EEG} signals classification using {LSTM‐CNN} architecture}, author = {S. M. Omar and M. Kimwele and A. Olowolayemo and D. M. Kaburu}, journal = {Eng. Rep.}, volume = {6}, number = {9}, pages = {e12827}, year = {2024}, doi = {10.1002/eng2.12827}, url = {}}. [Crossref]
@article{43, title = {{EEGformer}: {A} transformer–based brain activity classification method using {EEG} signal}, author = {Z. J. Wan and M. Y. Li and S. C. Liu and J. J. Huang and H. Tan and W. F. Duan}, journal = {Front. Neurosci.}, volume = {17}, pages = {1148855}, year = {2023}, doi = {10.3389/fnins.2023.1148855}, url = {}}. [Crossref]
@article{44, title = {Attention Is All You Need}, author = {A. Vaswani and N. Shazeer and N. Parmar and J. Uszkoreit and L. Jones and A. N. Gomez and {\L}. Kaiser and I. Polosukhin}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.1706.03762}, url = {},}. [Crossref]
@article{45, title = {Transformers in {EEG} Analysis: {A} review of architectures and applications in motor imagery, seizure, and emotion classification}, author = {E. Vafaei and M. Hosseini}, journal = {Sensors}, volume = {25}, number = {5}, pages = {1293}, year = {2025}, doi = {10.3390/s25051293}, url = {}}. [Crossref]
@article{46, title = {{MP-SeizNet}: {A} multi-path {CNN} {Bi-LSTM} Network for seizure-type classification using {EEG}}, author = {H. Albaqami and G. M. Hassan and A. Datta}, journal = {Biomed. Signal Process. Control}, volume = {84}, pages = {104780}, year = {2023}, doi = {10.1016/j.bspc.2023.104780}, url = {}}. [Crossref]
@article{47, title = {{EEG}-based Brain-Computer Interfaces: {An} Overview of Basic Concepts and Clinical Applications in Neurorehabilitation}, author = {S. Machado and F. Ara{\'u}jo and F. Paes and B. Velasques and M. Cunha and H. Budde and L. F. Basile and R. Anghinah and O. Arias-Carri{\'o}n and M. Cagy and R. Piedade and T. A. de Graaf and A. T. Sack and P. Ribeiro}, journal = {Rev. Neurosci.}, volume = {21}, number = {6}, pages = {451--468}, year = {2010}, doi = {10.1515/REVNEURO.2010.21.6.451}, url = {}}. [Crossref]
@article{48, title = {A survey on robots controlled by motor imagery brain-computer interfaces}, author = {J. Zhang and M. Wang}, journal = {Cogn. Robot.}, volume = {1}, pages = {12--24}, year = {2021}, doi = {10.1016/j.cogr.2021.02.001}, url = {}}. [Crossref]
@article{49, title = {Optimizing Motor Imagery Parameters for Robotic Arm Control by Brain-Computer Interface}, author = {{\"U}. Hayta and D. C. Irimia and C. Guger and {\u{I}}. Erkutlu and {\u{I}}. H. G{\"u}zelbey}, journal = {Brain Sci.}, volume = {12}, number = {7}, pages = {833}, year = {2022}, doi = {10.3390/brainsci12070833}, url = {}}. [Crossref]
@article{50, title = {Trends in {EEG} signal feature extraction applications}, author = {A. K. Singh and S. Krishnan}, journal = {Front. Artif. Intell.}, volume = {5}, pages = {1072801}, year = {2023}, doi = {10.3389/frai.2022.1072801}, url = {}}. [Crossref]
@article{51, title = {Machine learning techniques for electroencephalogram based brain-computer interface: {A} systematic literature review}, author = {Pawan and R. Dhiman}, journal = {Meas. Sens.}, volume = {28}, pages = {100823}, year = {2023}, doi = {10.1016/j.measen.2023.100823}, url = {}}. [Crossref]
@article{52, title = {Development of real-time brain-computer interface control system for robot}, author = {Y. An and J. Wong and S. H. Ling}, journal = {Appl. Soft Comput.}, volume = {159}, pages = {111648}, year = {2024}, doi = {10.1016/j.asoc.2024.111648}, url = {}}. [Crossref]
@article{53, title = {Explainable artificial intelligence approaches for brain-computer interfaces: {A} review and design space}, author = {P. Rajpura and H. Cecotti and Y. K. Meena}, journal = {J. Neural Eng.}, volume = {21}, number = {4}, pages = {0401003}, year = {2024}, doi = {10.1088/1741-2552/ad6593}, url = {}}. [Crossref]
@article{54, title = {Interpretable and robust {AI} in {EEG} systems: {A} survey}, author = {X. L. Zhou and C. Y. Liu and J. N. Zhou and Z. R. Wang and L. M. Zhai and Z. Y. Jia and C. T. Guan and Y. Liu}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.2304.10755}, url = {},}. [Crossref]
@article{55, title = {A review of epileptic seizure detection using machine learning classifiers}, author = {M. K. Siddiqui and R. Morales-Menendez and X. Huang and N. Hussain}, journal = {Brain Inf.}, volume = {7}, number = {1}, pages = {5}, year = {2020}, doi = {10.1186/s40708-020-00105-1}, url = {}}. [Crossref]
@article{56, title = {A p300-detection method based on logistic regression and a convolutional neural network}, author = {Q. Li and Y. Wu and Y. Yu and D. Zhao and M. Q. Sun and Z. L. Zhang and J. L. Wu}, journal = {Front. Comput. Neurosci.}, volume = {16}, pages = {909553}, year = {2022}, doi = {10.3389/fncom.2022.909553}, url = {}}. [Crossref]
@article{57, title = {Evaluation of machine learning algorithms for classification of {EEG} signals}, author = {F. J. Ram{\'i}rez-Arias and E. E. Garc{\'i}a-Guerrero and E. Tlelo-Cuautle and J. M. Colores-Vargas and E. Garc{\'i}a-Canseco and O. R. L{\'o}pez-Bonilla and G. M. Galindo-Aldana and E. Inzunza-Gonz{\'a}lez}, journal = {Technologies}, volume = {10}, number = {4}, pages = {79}, year = {2022}, doi = {10.3390/technologies10040079}, url = {}}. [Crossref]
@article{58, title = {{EEG} datasets for seizure detection and prediction—{A} review}, author = {S. Wong and A. Simmons and J. Rivera‐Villicana and S. Barnett and S. Sivathamboo and P. Perucca and Z. Y. Ge and P. Kwan and L. Kuhlmann and R. Vasa and K. Mouzakis and T. J. O’Brien}, journal = {Epilepsia Open}, volume = {8}, number = {2}, pages = {252--267}, year = {2023}, doi = {10.1002/epi4.12704}, url = {}}. [Crossref]
@article{59, title = {A comprehensive review of deep learning in {EEG}-based emotion recognition: Classifications, trends, and practical implications}, author = {W. Z. Ma and Y. J. Zheng and T. H. Li and Z. P. Li and Y. Li and L. J. Wang}, journal = {PeerJ Comput. Sci.}, volume = {10}, pages = {e2065}, year = {2024}, doi = {10.7717/peerj-cs.2065}, url = {}}. [Crossref]
@article{60, title = {A novel {AI}-driven {EEG} generalized classification model for cross-subject and cross-scene analysis}, author = {J. J. Li and C. H. Lee and Y. H. Zhou and T. G. Liu and T. P. Jung and X. L. Wan and D. N. Duan and D. Wen}, journal = {Adv. Eng. Inform.}, volume = {63}, pages = {102971}, year = {2025}, doi = {10.1016/j.aei.2024.102971}, url = {}}. [Crossref]
@article{61, title = {A Depth of Anaesthesia Index from Linear Regression of {EEG} Parameters}, author = {A. Kumar and S. Anand}, journal = {J. Clin. Monit. Comput.}, volume = {20}, number = {2}, pages = {67--73}, year = {2006}, doi = {10.1007/s10877-005-9004-x}, url = {}}. [Crossref]
@article{62, title = {A regression method for {EEG}-based cross-dataset fatigue detection}, author = {D. Y. Yuan and J. W. Yue and X. F. Xiong and Y. B. Jiang and P. Zan and C. Y. Li}, journal = {Front. Physiol.}, volume = {14}, pages = {1196919}, year = {2023}, doi = {10.3389/fphys.2023.1196919}, url = {}}. [Crossref]
@article{63, title = {Comparison of {LSTM}- and {GRU}-Type {RNN} Networks for Attention and Meditation Prediction on Raw {EEG} Data from Low-Cost Headsets}, author = {F. Rivas and J. E. Sierra-Garcia and J. M. Camara}, journal = {Electronics}, volume = {14}, number = {4}, pages = {707}, year = {2025}, doi = {10.3390/electronics14040707}, url = {}}@inproceedings{64, title = {Generative Adversarial Networks}, author = {I. J. Goodfellow and J. Pouget-Abadie and M. Mirza and B. Xu and D. Warde-Farley and S. Ozair and A. Courville and Y. Bengio}, booktitle = {Advances in Neural Information Processing Systems}, year = {2014}, url = {https://proceedings.neurips.cc/paper_files/paper/2014/file/f033ed80deb0234979a61f95710dbe25-Paper.pdf}}. [Crossref]
@article{65, title = {Auto-Encoding Variational Bayes}, author = {D. P. Kingma and M. Welling}, journal = {arXiv}, year = {2022}, doi = {10.48550/arXiv.1312.6114}, url = {},}. [Crossref]
@article{66, title = {Generative adversarial networks in {EEG} analysis: An overview}, author = {A. G. Habashi and A. M. Azab and S. Eldawlatly and G. M. Aly}, journal = {J. Neuroeng. Rehabil.}, volume = {20}, number = {1}, pages = {40}, year = {2023}, doi = {10.1186/s12984-023-01169-w}, url = {}}. [Crossref]
@article{67, title = {Virtual Electroencephalogram Acquisition: {A} Review on Electroencephalogram Generative Methods}, author = {Z. S. You and Y. Z. Guo and X. L. Zhang and Y. F. Zhao}, journal = {Sensors}, volume = {25}, number = {10}, pages = {3178}, year = {2025}, doi = {10.3390/s25103178}, url = {}}. [Crossref]
@article{68, title = {{EEGGAN-Net}: Enhancing {EEG} signal classification through data augmentation}, author = {J. X. Song and Q. Zhai and C. Wang and J. Z. Liu}, journal = {Front. Hum. Neurosci.}, volume = {18}, pages = {1430086}, year = {2024}, doi = {10.3389/fnhum.2024.1430086}, url = {}}. [Crossref]
@article{69, title = {{ATGAN}: Attention-based temporal {GAN} for {EEG} data augmentation in personal identification}, author = {S. Zhang and L. Sun and X. Q. Mao and M. Zhao and Y. D. Hu}, journal = {EURASIP J. Adv. Signal Process.}, volume = {2024}, number = {1}, pages = {94}, year = {2024}, doi = {10.1186/s13634-024-01188-2}, url = {}}. [Crossref]
@article{70, title = {Synthetic {ALS-EEG} Data Augmentation for {ALS} Diagnosis Using Conditional {WGAN} with Weight Clipping}, author = {A. Mutlu and {\c{S}}. Do{\u{g}}an and T. Tuncer}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2506.16243}, url = {},}. [Crossref]
@article{71, title = {Domain-Specific Denoising Diffusion Probabilistic Models for Brain Dynamics}, author = {Y. Q. Duan and J. Z. Zhou and Z. Wang and Y.-C. Chang and Y.-K. Wang and C.-T. Lin}, journal = {arXiv}, year = {2023}, doi = {10.48550/arXiv.2305.04200}, url = {},}. [Crossref]
@article{72, title = {Survey on the research direction of {EEG}-based signal processing}, author = {C. Z. Sun and C. Z. Mou}, journal = {Front. Neurosci.}, volume = {17}, pages = {1203059}, year = {2023}, doi = {10.3389/fnins.2023.1203059}, url = {}}. [Crossref]
@article{73, title = {Cross-subject {EEG} emotion recognition using multi-source domain manifold feature selection}, author = {Q. S. She and X. S. Shi and F. Fang and Y. L. Ma and Y. C. Zhang}, journal = {Comput. Biol. Med.}, volume = {159}, pages = {106860}, year = {2023}, doi = {10.1016/j.compbiomed.2023.106860}, url = {}}. [Crossref]
@article{74, title = {Data augmentation for {EEG}-based emotion recognition using generative adversarial networks}, author = {G. C. Bao and B. Yan and L. Tong and J. Shu and L. Y. Wang and K. Yang and Y. Zeng}, journal = {Front. Comput. Neurosci.}, volume = {15}, pages = {723843}, year = {2021}, doi = {10.3389/fncom.2021.723843}, url = {}}. [Crossref]
@article{75, title = {Data augmentation strategies for {EEG}-based motor imagery decoding}, author = {O. George and R. Smith and P. Madiraju and N. Yahyasoltani and S. I. Ahamed}, journal = {Heliyon}, volume = {8}, number = {8}, year = {2022}, doi = {10.1016/j.heliyon.2022.e10240}, url = {}}. [Crossref]
@article{76, title = {Generating realistic neurophysiological time series with denoising diffusion probabilistic models}, author = {J. Vetter and J. H. Macke and R. Gao}, journal = {Patterns}, volume = {5}, number = {9}, pages = {101047}, year = {2024}, doi = {10.1016/j.patter.2024.101047}, url = {}}. [Crossref]
@article{77, title = {{EEGDfus}: {A} Conditional Diffusion Model for Fine-Grained {EEG} Denoising}, author = {X. Y. Huang and C. Li and A. P. Liu and R. B. Qian and X. Chen}, journal = {IEEE J. Biomed. Health Inform.}, volume = {29}, number = {4}, pages = {2557--2569}, year = {2025}, doi = {10.1109/JBHI.2024.3504717}, url = {}}. [Crossref]
@article{78, title = {Improving {EEG} Classification Through Randomly Reassembling Original and Generated Data with Transformer-based Diffusion Models}, author = {M. Z. Chen and Y. Y. Gui and Y. Q. Su and Y. S. Zhu and G. B. Luo and Y. C. Yang}, journal = {arXiv}, year = {2024}, doi = {10.48550/arXiv.2407.20253}, url = {},}. [Crossref]
@article{79, title = {Neurophysiological data augmentation for {EEG-fNIRS} multimodal features based on a denoising diffusion probabilistic model}, author = {L. Chen and Z. Yin and X. L. Gu and X. W. Zhang and X. S. Cao and C. J. Zhang and X. O. Li}, journal = {Comput. Methods Programs Biomed.}, volume = {261}, pages = {108594}, year = {2025}, doi = {10.1016/j.cmpb.2025.108594}, url = {}}. [Crossref]
@article{80, title = {Synthesizing {EEG} Signals from Event-Related Potential Paradigms with Conditional Diffusion Models}, author = {G. Klein and P. Guetschel and G. Silvestri and M. Tangermann}, journal = {arXiv}, year = {2024}, doi = {10.3217/978-3-99161-014-4-077}, url = {},}. [Crossref]
@article{81, title = {{EEGDM}: {EEG} Representation Learning via Generative Diffusion Model}, author = {J. H. Puah and S. K. Goh and Z. Zhang and Z. Ye and C. K. Chan and K. S. Lim and S. L. Fong and K. S. Woon and C. T. Guan}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2508.14086}, url = {},}. [Crossref]
@article{82, title = {{EEG} microstate sequences from different clustering algorithms are information-theoretically invariant}, author = {F. Von Wegner and P. Knaut and H. Laufs}, journal = {Front. Comput. Neurosci.}, volume = {12}, pages = {70}, year = {2018}, doi = {10.3389/fncom.2018.00070}, url = {}}. [Crossref]
@article{83, title = {A comprehensive study of auto-encoders for anomaly detection: Efficiency and trade-offs}, author = {A. A. Neloy and M. Turgeon}, journal = {Mach. Learn. Appl.}, volume = {17}, pages = {100572}, year = {2024}, doi = {10.1016/j.mlwa.2024.100572}, url = {}}. [Crossref]
@article{84, title = {{EEG} signal classification using {PCA}, {ICA}, {LDA} and support vector machines}, author = {A. Subasi and M. Ismail Gursoy}, journal = {Expert Syst. Appl.}, volume = {37}, number = {12}, pages = {8659--8666}, year = {2010}, doi = {10.1016/j.eswa.2010.06.065}, url = {}}@inproceedings{85, title = {{kNN} and {SVM} Classification for {EEG}: {A} Review}, author = {M. N. A. H. Sha'abani and N. Fuad and N. Jamal and M. F. Ismail}, booktitle = {InECCE2019 Lecture Notes in Electrical Engineering}, volume = {632}, pages = {555--565}, year = {2020}, address = {Kuantan, Malaysia}, doi = {10.1007/978-981-15-2317-5_47}, url = {https://doi.org/10.1007/978-981-15-2317-5_47}}. [Crossref]
@article{86, title = {Applying machine learning {EEG} signal classification to emotion‑related brain anticipatory activity}, author = {M. Bilucaglia and G. M. Duma and G. Mento and L. Semenzato and P. E. Tressoldi}, journal = {F1000Res.}, volume = {9}, pages = {173}, year = {2021}, doi = {10.12688/f1000research.22202.3}, url = {}}. [Crossref]
@article{87, title = {{EEG}-based emotion recognition using tunable {Q} wavelet transform and rotation forest ensemble classifier}, author = {A. Subasi and T. Tuncer and S. Dogan and D. Tanko and U. Sakoglu}, journal = {Biomed. Signal Process. Control}, volume = {68}, pages = {102648}, year = {2021}, doi = {10.1016/j.bspc.2021.102648}, url = {}}. [Crossref]
@article{88, title = {{MI-EEGNET}: {A} novel convolutional neural network for motor imagery classification}, author = {M. Riyad and M. Khalil and A. Adib}, journal = {J. Neurosci. Methods}, volume = {353}, pages = {109037}, year = {2021}, doi = {10.1016/j.jneumeth.2020.109037}, url = {}}. [Crossref]
@article{89, title = {{EEG}-based epilepsy detection using {CNN-SVM} and {DNN-SVM} with feature dimensionality reduction by {PCA}}, author = {Y. Berrich and Z. Guennoun}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {14313}, year = {2025}, doi = {10.1038/s41598-025-95831-z}, url = {}}. [Crossref]
@article{90, title = {{GCNs-Net}: {A} Graph Convolutional Neural Network Approach for Decoding Time-Resolved {EEG} Motor Imagery Signals}, author = {Y. M. Hou and S. Y. Jia and X. M. Lun and Z. Q. Hao and Y. Shi and Y. Li and R. Zeng and J. L. Lv}, journal = {IEEE Trans. Neural Netw. Learn. Syst.}, volume = {35}, number = {6}, pages = {7312--7323}, year = {2022}, doi = {10.1109/TNNLS.2022.3202569}, url = {}}. [Crossref]
@article{91, title = {{EEG} temporal--spatial transformer for person identification}, author = {Y. Du and Y. Xu and X. Wang and L. Liu and P. Ma}, journal = {Sci. Rep.}, volume = {12}, number = {1}, pages = {14378}, year = {2022}, doi = {10.1038/s41598-022-18502-3}, url = {}}@inproceedings{92, title = {An image is worth 16x16 words: Transformers for image recognition at scale}, author = {A. Dosovitskiy and L. Beyer and A. Kolesnikov and D. Weissenborn and X. Zhai and T. Unterthiner and M. Dehghani and M. Minderer and G. Heigold and S. Gelly and J. Uszkoreit and N. Houlsby}, booktitle = {International Conference on Learning Representations}, year = {2021}, url = {https://openreview.net/forum?id=YicbFdNTTy}}@inproceedings{93, title = {{EEG-Transformer}: Self-attention from Transformer Architecture for Decoding {EEG} of Imagined Speech}, author = {Y.-E. Lee and S.-H. Lee}, booktitle = {2022 10th International Winter Conference on Brain-Computer Interface (BCI)}, pages = {1--4}, year = {2022}, doi = {10.1109/BCI53720.2022.9735124}, url = {https://doi.org/10.1109/BCI53720.2022.9735124}}. [Crossref]
@article{94, title = {{MVGT}: {A} Multi-view Graph Transformer Based on Spatial Relations for {EEG} Emotion Recognition}, author = {Y. Cui and X. Liu and J. Liang and Y. Fu}, journal = {arXiv}, year = {2024}, doi = {10.48550/ARXIV.2407.03131}, url = {},}@inproceedings{95, title = {{EEG} Signal Denoising Using Beta-Variational Autoencoder}, author = {B. Mahaseni and N. M. Khan}, booktitle = {2024 46th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, pages = {1--4}, year = {2024}, address = {Orlando, USA}, doi = {10.1109/EMBC53108.2024.10782962}, url = {https://doi.org/10.1109/EMBC53108.2024.10782962}}. [Crossref]
@article{96, title = {{VAEEG}: Variational auto-encoder for extracting {EEG} representation}, author = {T. Zhao and Y. Cui and T. Y. Ji and J. J. Luo and W. L. Li and J. Jiang and Z. F. Gao and W. G. Hu and Y. X. Yan and Y. W. Jiang and B. Hong}, journal = {NeuroImage}, volume = {304}, pages = {120946}, year = {2024}, doi = {10.1016/j.neuroimage.2024.120946}, url = {}}. [Crossref]
@article{97, title = {A fast learning algorithm for deep belief nets}, author = {G. E. Hinton and S. Osindero and Y. W. Teh}, journal = {Neural Comput.}, volume = {18}, number = {7}, pages = {1527--1554}, year = {2006}, doi = {10.1162/neco.2006.18.7.1527}, url = {}}. [Crossref]
@article{98, title = {Deep Belief Networks for Electroencephalography: {A} Review of Recent Contributions and Future Outlooks}, author = {F. Movahedi and J. L. Coyle and E. Sejdic}, journal = {IEEE J. Biomed. Health Inform.}, volume = {22}, number = {3}, pages = {642--652}, year = {2017}, doi = {10.1109/JBHI.2017.2727218}, url = {}}. [Crossref]
@article{99, title = {The Graph Neural Network Model}, author = {F. Scarselli and M. Gori and A. C. Tsoi and M. Hagenbuchner and G. Monfardini}, journal = {IEEE Trans. Neural Netw.}, volume = {20}, number = {1}, pages = {61--80}, year = {2008}, doi = {10.1109/TNN.2008.2005605}, url = {}}. [Crossref]
@article{100, title = {Semi-supervised classification with graph convolutional networks}, author = {T. N. Kipf and M. Welling}, journal = {arXiv}, year = {2016}, doi = {10.48550/ARXIV.1609.02907}, url = {},}. [Crossref]
@article{101, title = {Graph attention networks}, author = {P. Veli{\v{c}}kovi{\'c} and G. Cucurull and A. Casanova and A. Romero and P. Li{\`o} and Y. Bengio}, journal = {arXiv}, year = {2017}, doi = {10.48550/ARXIV.1710.10903}, url = {},}. [Crossref]
@article{102, title = {Graph-generative neural network for {EEG}-based epileptic seizure detection via discovery of dynamic brain functional connectivity}, author = {Z. D. Li and K. Hwang and K. Q. Li and J. Wu and T. K. Ji}, journal = {Sci. Rep.}, volume = {12}, number = {1}, pages = {18998}, year = {2022}, doi = {10.1038/s41598-022-23656-1}, url = {}}. [Crossref]
@article{103, title = {Graph neural network-based {EEG} classification: {A} Survey}, author = {D. Klepl and M. Wu and F. He}, journal = {IEEE Trans. Neural Syst. Rehabil. Eng.}, volume = {32}, pages = {493--503}, year = {2024}, doi = {10.1109/TNSRE.2024.3355750}, url = {}}@inproceedings{104, title = {Contrastive self-supervised {EEG} representation learning for emotion classification}, author = {K. Hu and R. J. Dai and W. T. Chen and H. L. Yin and B. L. Lu and W. L. Zheng}, booktitle = {2024 46th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, pages = {1--4}, year = {2024}, doi = {10.1109/EMBC53108.2024.10781579}, url = {https://doi.org/10.1109/EMBC53108.2024.10781579}}. [Crossref]
@article{105, title = {Pre-training graph contrastive masked autoencoders are strong distillers for {EEG}}, author = {X. X. Wei and K. H. Zhao and Y. Jiao and H. Xie and L. F. He and Y. Zhang}, journal = {arXiv}, year = {2025}, doi = {10.48550/arXiv.2411.19230}, url = {},}. [Crossref]
@article{106, title = {{ViT2EEG}: Leveraging hybrid pretrained vision transformers for {EEG} data}, author = {R. Q. Yang and E. Modesitt}, journal = {arXiv}, year = {2023}, doi = {10.48550/ARXIV.2308.00454}, url = {},}. [Crossref]
@article{107, title = {Machine learning-based epileptic seizure detection methods using wavelet and {EMD}-based decomposition techniques: {A} Review}, author = {R. G. Thangarajoo and M. B. I. Reaz and G. Srivastava and F. Haque and S. H. M. Ali and A. A. A. Bakar and M. A. S. Bhuiyan}, journal = {Sensors}, volume = {21}, number = {24}, pages = {8485}, year = {2021}, doi = {10.3390/s21248485}, url = {}}. [Crossref]
@article{108, title = {Effective Epileptic Seizure Detection with Hybrid Feature Selection and {SMOTE}-Based Data Balancing Using {SVM} Classifier}, author = {H. F. Atlam and G. E. Aderibigbe and M. S. Nadeem}, journal = {Appl. Sci.}, volume = {15}, number = {9}, pages = {4690}, year = {2025}, doi = {10.3390/app15094690}, url = {}}. [Crossref]
@article{109, title = {Real-time epilepsy seizure detection based on {EEG} using tunable-{Q} wavelet transform and convolutional neural network}, author = {M. Shen and P. Wen and B. Song and Y. Li}, journal = {Biomed. Signal Process. Control}, volume = {82}, pages = {104566}, year = {2023}, doi = {10.1016/j.bspc.2022.104566}, url = {}}. [Crossref]
@article{110, title = {A hybrid optimization-enhanced {1D-ResCNN} framework for epileptic spike detection in scalp {EEG} signals}, author = {P. Kumar and P. K. Upadhyay}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {5707}, year = {2025}, doi = {10.1038/s41598-025-90164-3}, url = {}}. [Crossref]
@article{111, title = {Current trends, challenges, and future research directions of hybrid and deep learning techniques for motor imagery brain-computer interface}, author = {E. Lionakis and K. Karampidis and G. Papadourakis}, journal = {Multimodal Technol. Interact.}, volume = {7}, number = {10}, pages = {95}, year = {2023}, doi = {10.3390/mti7100095}, url = {}}. [Crossref]
@article{112, title = {{CLTNet}: {A} hybrid deep learning model for motor imagery classification}, author = {H. Gu and T. W. Chen and X. Ma and M. Y. Zhang and Y. Sun and J. Zhao}, journal = {Brain Sci.}, volume = {15}, number = {2}, pages = {124}, year = {2025}, doi = {10.3390/brainsci15020124}, url = {}}. [Crossref]
@article{113, title = {Multi-scale convolutional transformer network for motor imagery brain-computer interface}, author = {W. Zhao and B. C. Zhang and H. F. Zhou and D. Z. Wei and C. X. Huang and Q. Lan}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {12935}, year = {2025}, doi = {10.1038/s41598-025-96611-5}, url = {}}. [Crossref]
@article{114, title = {Emotion classification based on transformer and {CNN} for {EEG} spatial-temporal feature learning}, author = {X. Z. Yao and T. W. Li and P. Ding and F. Wang and L. Zhao and A. M. Gong and W. Y. Nan and Y. F. Fu}, journal = {Brain Sci.}, volume = {14}, number = {3}, pages = {268}, year = {2024}, doi = {10.3390/brainsci14030268}, url = {}}. [Crossref]
@article{115, title = {Epileptic seizures detection using deep learning techniques: {A} review}, author = {A. Shoeibi and M. Khodatars and N. Ghassemi and M. Jafari and P. Moridian and R. Alizadehsani and M. Panahiazar and F. Khozeimeh and A. Zare and H. Hosseini-Nejad and A. Khosravi and A. F. Atiya and D. Aminshahidi and S. Hussain and M. Rouhani and S. Nahavandi and U. R. Acharya}, journal = {Int. J. Environ. Res. Public Health}, volume = {18}, number = {11}, year = {2021}, doi = {10.3390/ijerph18115780}, url = {}}. [Crossref]
@article{116, title = {Accuracy of machine learning in detecting pediatric epileptic seizures: {S}ystematic review and meta-analysis}, author = {Z. Zou and B. Chen and D. Q. Xiao and F. J. Tang and X. H. Li}, journal = {J. Med. Internet Res.}, volume = {26}, pages = {e55986}, year = {2024}, doi = {10.2196/55986}, url = {}}@misc{117, title = {{CHB-MIT} scalp {EEG} database}, author = {J. Guttag}, year = {2010}, doi = {10.13026/C2K01R}, url = {https://doi.org/10.13026/C2K01R}, note = {PhysioNet}}. [Crossref]
@article{118, title = {Deep‐learning‐based seizure detection and prediction from electroencephalography signals}, author = {F. E. Ibrahim and H. M. Emara and W. El‐Shafai and M. Elwekeil and M. Rihan and I. M. Eldokany and E. T. Taha and A. S. El-Fishawy and E. -S. M. El-Rabaie and E. Abdellatef and F. E. Abd El‐Samie}, journal = {Int. J. Numer. Methods Biomed. Eng.}, volume = {38}, number = {6}, pages = {e3573}, year = {2022}, doi = {10.1002/cnm.3573}, url = {}}. [Crossref]
@article{119, title = {An explainable {EEG} epilepsy detection model using friend pattern}, author = {T. Tuncer and S. Dogan}, journal = {Sci. Rep.}, volume = {15}, number = {1}, pages = {16951}, year = {2025}, doi = {10.1038/s41598-025-01747-z}, url = {}}. [Crossref]
@article{120, title = {A dataset of neonatal {EEG} recordings with seizure annotations}, author = {N. J. Stevenson and K. Tapani and L. Lauronen and S. Vanhatalo}, journal = {Sci. Data}, volume = {6}, number = {1}, pages = {190039}, year = {2019}, doi = {10.1038/sdata.2019.39}, url = {}}. [Crossref]
@article{121,title={{TATP}at based explainable {EEG} model for neonatal seizure detection},author={Tuncer, T. and Dogan, S. and Tasci, I. and Tasci, B. and Hajiyeva, R.},journal={Sci. Rep.},volume={14},number={1},pages={26688},year={2024},doi = {10.1038/s41598-024-77609-x},url = { },}. [Crossref]
@article{122,title={An end-to-end deep learning approach for epileptic seizure prediction},author={Xu, Y. K. and Yang, J. and Zhao, S. Q. and Wu, H. M. and Sawan, M.},journal={arXiv},year={2021},doi = {10.48550/ARXIV.2108.07453},url = {https://arxiv.org/abs/2108.07453 },}.
@article{123,title={{EEG}-Based Epileptic Seizure Prediction Using Temporal Multi-Channel Transformers},author={Godoy, R. V. and Reis, T. J. and Polegato, P. H. and Lahr, G. J. and Saute, R. L. and Nakano, F. N. and Machado, H. R. and Sakamoto, A. C. and Becker, M. and Caurin, G. A.},journal={arXiv},year={2022},doi = {10.48550/ARXIV.2209.11172},url = {https://arxiv.org/abs/2209.11172 },}.
@article{124,title={Multi-channel vision transformer for epileptic seizure prediction},author={Hussein, R. and Lee, S. and Ward, R.},journal={Biomedicines},volume={10},number={7},pages={1551},year={2022},doi = {10.3390/biomedicines10071551},url = { },}. [Crossref]
@article{125,title={Preictal period optimization for deep learning-based epileptic seizure prediction},author={Koutsouvelis, P. and Chybowski, B. and Gonzalez-Sulser, A. and Abdullateef, S. and Escudero, J.},journal={J. Neural Eng.},volume={21},number={6},pages={066040},year={2024},doi = {10.1088/1741-2552/ad9ad0},url = { },}. [Crossref]
@article{126,title={Assessing the potential of {EEG} in early detection of Alzheimer’s disease: {A} systematic comprehensive review (2000–2023)},author={Ehteshamzad, S.},journal={J. Alzheimer's Dis. Rep.},volume={8},number={1},pages={1153-1169},year={2024},doi = {10.3233/ADR-230159},url = { },}. [Crossref]
@article{127,title={Unlocking the potential of {EEG} in Alzheimer’s disease research: {C}urrent status and pathways to precision detection},author={Akbar, F. and Taj, I. and Usman, S. M. and Imran, A. S. and Khalid, S. and Ihsan, I. and Ali, A. and Yasin, A.},journal={Brain Res. Bull.},volume={223},pages={111281},year={2025},doi = {10.1016/j.brainresbull.2025.111281},url = { },}. [Crossref]
@article{128,title={Data-driven retrieval of population-level {EEG} features and their role in neurodegenerative diseases},author={Li, W. and Varatharajah, Y. and Dicks, E. and Barnard, L. and Brinkmann, B. H. and Crepeau, D. and Worrel, G. and Fan, W. and Kremers, W. and Boeve, B. and Botha, H. and Gogineni, V. and Jones, D. T.},journal={Brain Commun.},volume={6},number={4},pages={fcae227},year={2024},doi = {10.1093/braincomms/fcae227},url = { },}. [Crossref]
@article{129,title={Use of {EEG} to Diagnose {ADHD}},author={Lenartowicz, A. and Loo, S. K.},journal={Curr. Psychiatry Rep.},volume={16},number={11},pages={498},year={2014},doi = {10.1007/s11920-014-0498-0},url = { },}. [Crossref]
@article{130,title={Machine learning in attention-deficit/hyperactivity disorder: {N}ew approaches toward understanding the neural mechanisms},author={Cao, M. and Martin, E. and Li, X.},journal={Transl. Psychiatry},volume={13},number={1},pages={236},year={2023},doi = {10.1038/s41398-023-02536-w},url = { },}. [Crossref]
@article{131,title={Monitoring burst suppression in critically ill patients: {M}ulti-centric evaluation of a novel method},author={Fürbass, F. and Herta, J. and Koren, J. and Westover, M. B. and Hartmann, M. M. and Gruber, A. and Baumgartner, C. and Kluge, T.},journal={Clin. Neurophysiol.},volume={127},number={4},pages={2038-2046},year={2016},doi = {10.1016/j.clinph.2016.02.001},url = { },}. [Crossref]
@article{132,title={Research and application of deep learning-based sleep staging: {D}ata, modeling, validation, and clinical practice},author={Yue, H. J. and Chen, Z. Q. and Guo, W. B. and Sun, L. and Dai, Y. D. and Wang, Y. M. and Ma, W. J. and Fan, X. M. and Wen, W. P. and Lei, W. B.},journal={Sleep Med. Rev.},volume={74},pages={101897},year={2024},doi = {10.1016/j.smrv.2024.101897},url = { },}. [Crossref]
@article{133,title={The sleep heart health study: {D}esign, rationale, and methods},author={Quan, S. F. and Howard, B. V. and Iber, C. and Kiley, J. P. and Nieto, F. J. and O’Connor, G. T. and Rapoport, D. M. and Redline, S. and Robbins, J. and Samet, J. M. and Wahl, P. W.},journal={Sleep},volume={20},number={12},pages={1077-1085},year={1997},doi = {10.1093/sleep/20.12.1077},url = { },}. [Crossref]
@article{134,title={Analysis of a sleep-dependent neuronal feedback loop: {T}he slow-wave microcontinuity of the {EEG}},author={Kemp, B. and Zwinderman, A. H. and Tuk, B. and Kamphuisen, H. A. C. and Oberye, J. J. L.},journal={IEEE Trans. Biomed. Eng.},volume={47},number={9},pages={1185-1194},year={2000},doi = {10.1109/10.867928},url = { },}. [Crossref]
@article{135,title={The {S}leep-{EDF} {Database} [Expanded]},author={Kemp, B. and Zwinderman, A. and Tuk, B. and Kamphuisen, H. and Oberye, J.},journal={PhysioNet},year={2013},doi = {10.13026/C2X676},url = {https://physionet.org/content/sleep-edf/1.0.0/ },}.
@article{136,title={The National Sleep Research Resource: {T}owards a sleep data commons},author={Zhang, G. Q. and Cui, L. C. and Mueller, R. and Tao, S. Q. and Kim, M. and Rueschman, M. and Mariani, S. and Mobley, D. and Redline, S.},journal={J. Am. Med. Inform. Assoc.},volume={25},number={10},pages={1351-1358},year={2018},doi = {10.1093/jamia/ocy064},url = { },}. [Crossref]
@article{137,title={ZleepAnlystNet: A novel deep learning model for automatic sleep stage scoring based on single-channel raw {EEG} data using separating training},author={Jirakittayakorn, N. and Wongsawat, Y. and Mitrirattanakul, S.},journal={Sci. Rep.},volume={14},number={1},pages={9859},year={2024},doi = {10.1038/s41598-024-60796-y},url = { },}. [Crossref]
@article{138,title={An interpretable and efficient sleep staging algorithm: {D}etectsleep{N}et},author={Guo, S.},journal={arXiv},year={2024},doi = {10.48550/arXiv.2406.19246},url = {https://arxiv.org/abs/2406.19246 },}.
@article{139,title={Towards interpretable sleep stage classification using cross-modal transformers},author={Pradeepkumar, J. and Anandakumar, M. and Kugathasan, V. and Suntharalingham, D. and Kappel, S. L. and De Silva, A. C. and Edussooriya, C. U.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},year={2024},doi = {10.1109/TNSRE.2024.3438610},url = { },}. [Crossref]
@article{140,title={A multi constrained transformer-{B}i{LSTM} guided network for automated sleep stage classification from single-channel {EEG}},author={Sadik, F. and Raihan, M. T. and Rashid, R. B. and Rahman, M. and Abdal, S. M. and Ahmed, S. and Mahmud, T. I.},journal={arXiv},year={2023},doi = {10.48550/ARXIV.2309.10542},url = {},}@inproceedings{141,title={Siamese sleep transformer for robust sleep stage scoring with self-knowledge distillation and selective batch sampling},author={Kwak, H.-G. and Kweon, Y.-S. and Shin, G.-H.},booktitle={2023 11th International Winter Conference on Brain-Computer Interface (BCI)},pages={1-5},year={2023},doi={10.1109/BCI57258.2023.10078532},url={https://doi.org/10.1109/BCI57258.2023.10078532}}. [Crossref]
@article{142,title={Hybrid deep learning model based on transformer encoder for sleep stages classification},author={Al-akkam, O. A. A.-S. M.},journal={Bilad Alrafidain J. Eng. Sci. Technol.},volume={4},number={1},pages={113-126},year={2025},doi={10.56990/bajest/2025.040110},url={}}. [Crossref]
@article{143,title={Real-time segmentation of burst suppression patterns in critical care {EEG} monitoring},author={Westover, M. B. and Shafi, M. M. and Ching, S. and Chemali, J. J. and Purdon, P. L. and Cash, S. S. and Brown, E. N.},journal={J. Neurosci. Methods},volume={219},number={1},pages={131-141},year={2013},doi={10.1016/j.jneumeth.2013.07.003},url={}}. [Crossref]
@article{144,title={Electroencephalogram based detection of deep sedation in {ICU} patients using atomic decomposition},author={Nagaraj, S. B. and McClain, L. M. and Boyle, E. J. and Zhou, D. W. and Ramaswamy, S. M. and Biswal, S. and Akeju, O. and Purdon, P. L. and Westover, M. B.},journal={IEEE Trans. Biomed. Eng.},volume={65},number={12},pages={2684-2691},year={2018},doi={10.1109/TBME.2018.2813265},url={}}. [Crossref]
@article{145,title={One-dimensional convolutional neural networks combined with channel selection strategy for seizure prediction using long-term intracranial {EEG}},author={Wang, X. S. and Zhang, G. H. and Wang, Y. and Wang, L. and Yang, Z. H. and Liang, F. Y. and Cong, F. Y.},journal={Int. J. Neur. Syst.},volume={32},number={02},pages={2150048},year={2022},doi={10.1142/S0129065721500489},url={}}. [Crossref]
@article{146,title={Automated tracking of level of consciousness and delirium in critical illness using deep learning},author={Sun, H. Q. and Kimchi, E. and Akeju, O. and Nagaraj, S. B. and McClain, L. M. and Zhou, D. W. and Boyle, E. and Zheng, W. L. and Ge, W. D. and Westover, M. B.},journal={NPJ Digit. Med.},volume={2},number={1},pages={89},year={2019},doi={10.1038/s41746-019-0167-0},url={}}. [Crossref]
@article{147,title={Etiology of Burst Suppression {EEG} Patterns},author={Shanker, A. and Abel, J. H. and Schamberg, G. and Brown, E. N.},journal={Front. Psychol.},volume={12},pages={673529},year={2021},doi={10.3389/fpsyg.2021.673529},url={}}. [Crossref]
@article{148,title={{DEAP}: {A} database for emotion analysis; Using physiological signals},author={Koelstra, S. and Muhl, C. and Soleymani, M. and Lee, J. -S. and Yazdani, A. and Ebrahimi, T. and Pun, T. and Nijholt, A. and Patras, I.},journal={IEEE Trans. Affective Comput.},volume={3},number={1},pages={18-31},year={2012},doi={10.1109/T-AFFC.2011.15},url={}}. [Crossref]
@article{149,title={Investigating critical frequency bands and channels for {EEG}-based emotion recognition with deep neural networks},author={Zheng, Wei-Long and Lu, Bao-Liang},journal={IEEE Trans. Auton. Mental Dev.},volume={7},number={3},pages={162-175},year={2015},doi={10.1109/TAMD.2015.2431497},url={}}. [Crossref]
@article{150,title={Exploration of effective electroencephalography features for the recognition of different valence emotions},author={Yang, K. and Tong, L. and Zeng, Y. and Lu, R. and Zhang, R. and Gao, Y. and Yan, B.},journal={Front. Neurosci.},volume={16},pages={1010951},year={2022},doi={10.3389/fnins.2022.1010951},url={}}. [Crossref]
@article{151,title={{AMDET}: {A}ttention based multiple dimensions {EEG} transformer for emotion recognition},author={Xu, Y. L. and Du, Y. and Li, L. and Lai, H. H. and Zou, J. and Zhou, T. Y. and Xiao, L. S. and Liu, L. and Ma, P. C.},journal={IEEE Trans. Affect. Comput.},volume={15},number={3},pages={1067-1077},year={2023},doi={10.1109/TAFFC.2023.3318321},url={}}. [Crossref]
@article{152,title={{TPRO-NET}: {A}n {EEG}-based emotion recognition method reflecting subtle changes in emotion},author={Zhang, X. Y. and Cheng, X. K. and Liu, H.},journal={Sci. Rep.},volume={14},number={1},pages={13491},year={2024},doi={10.1038/s41598-024-62990-4},url={}}. [Crossref]
@article{153,title={Cross-Subject {EEG} Emotion Recognition With Self-Organized Graph Neural Network},author={Li, J. C. and Li, S. Q. and Li, J. H. and Pan, J. H. and Wang, F.},journal={Front. Neurosci.},volume={15},pages={611653},year={2021},doi={10.3389/fnins.2021.611653},url={}}. [Crossref]
@article{154,title={{DAGAM}: {A} domain adversarial graph attention model for subject-independent EEG-based emotion recognition},author={Xu, T. and Dang, W. and Wang, J. B. and Zhou, Y.},journal={J. Neural Eng.},volume={20},number={1},pages={016022},year={2023},doi={10.1088/1741-2552/acae06},url={}}. [Crossref]
@article{155,title={{EEG} emotion recognition based on federated learning framework},author={Xu, C. and Liu, H. and Qi, W.},journal={Electronics},volume={11},number={20},pages={3316},year={2022},doi={10.3390/electronics11203316},url={}}. [Crossref]
@article{156,title={Semi-supervised dual-stream self-attentive adversarial graph contrastive learning for cross-subject eeg-based emotion recognition},author={Ye, W. S. and Zhang, Z. G. and Teng, F. and Zhang, M. and Wang, J. H. and Ni, D. and Li, F. L. and Xu, P. and Liang, Z.},journal={IEEE Trans. Affect. Comput.},year={2024},doi={10.1109/TAFFC.2024.3433470},url={}}@inproceedings{157,title={Adaptive federated learning for {EEG} emotion recognition},author={Chan, C. and Zheng, Q. Q. and Xu, C. J. and Wang, Q. and Heng, P. A.},booktitle={2024 International Joint Conference on Neural Networks (IJCNN)},pages={1-8},year={2024},organization={IEEE},doi={10.1109/IJCNN60899.2024.10650004},url={https://doi.org/10.1109/IJCNN60899.2024.10650004}}. [Crossref]
@article{158,title={{FBCN}et: {A} Multi-view Convolutional Neural Network for Brain-Computer Interface},author={Mane, R. and Chew, E. and Chua, K. and Ang, K. K. and Robinson, N. and Vinod, A. P. and Lee, S. -W. and Guan, C. T.},journal={arXiv},year={2021},doi={10.48550/ARXIV.2104.01233},url={}}. [Crossref]
@article{159,title={A robust multi-branch multi-attention-mechanism {EEGN}et for motor imagery {BCI} decoding},author={Deng, H. D. and Li, M. F. and Li, J. D. and Li, M. M. and Guo, M. M. and Xu, G. Z.},journal={Journal of Neuroscience Methods},volume={405},pages={110108},year={2024},doi={10.1016/j.jneumeth.2024.110108},url={}}. [Crossref]
@article{160,title={{CTN}et: {A} convolutional transformer network for {EEG}-based motor imagery classification},author={Zhao, W. and Jiang, X. L. and Zhang, B. C. and Xiao, S. X. and Weng, S. J.},journal={Sci. Rep.},volume={14},number={1},pages={20237},year={2024},doi={10.1038/s41598-024-71118-7},url={}}. [Crossref]
@article{161,title={{CCLN}et: {M}ulticlass motor imagery {EEG} decoding through extended common spatial patterns and CNN-LSTM hybrid network},author={Singh, K. and Singha, N. and Bhalaik, S.},journal={J. Supercomput.},volume={81},number={7},pages={805},year={2025},doi={10.1007/s11227-025-07319-2},url={}}. [Crossref]
@article{162,title={An efficient deep learning framework for {P}300 evoked related potential detection in {EEG} signal},author={Havaei, P. and Zekri, M. and Mahmoudzadeh, E. and Rabbani, H.},journal={Comput. Methods Programs Biomed.},volume={229},pages={107324},year={2023},doi={10.1016/j.cmpb.2022.107324},url={}}@inproceedings{163,title={{LSTM}-based classification of multiflicker-{SSVEP} in single channel dry-{EEG} for low-power/high-accuracy quadcopter-BMI system},author={Kobayashi, N. and Ishizuka, K.},booktitle={2019 IEEE International Conference on Systems, Man and Cybernetics (SMC)},pages={2160-2165},year={2019},doi={10.1109/SMC.2019.8914015},url={https://doi.org/10.1109/SMC.2019.8914015}}. [Crossref]
@article{164,title={Transfer learning and {S}pec{A}ugment applied to {SSVEP} based {BCI} classification},author={Bassi, P. R. A. S. and Rampazzo, W. and Attux, R.},journal={Biomed. Signal Process. Control},volume={67},pages={102542},year={2021},doi={10.1016/j.bspc.2021.102542},url={}}. [Crossref]
@article{165,title={Silent {EEG}-speech recognition using convolutional and recurrent neural network with 85\% accuracy of 9 words classification},author={Vorontsova, D. and Menshikov, I. and Zubov, A. and Orlov, K. and Rikunov, P. and Zvereva, E. and Flitman, L. and Lanikin, A. and Sokolova, A. and Markov, S. and Bernadotte, A.},journal={Sensors},volume={21},number={20},pages={6744},year={2021},doi={10.3390/s21206744},url={}}. [Crossref]
@article{166,title={Status of deep learning for {EEG}-based brain–computer interface applications},author={Hossain, K. M. and Islam, Md. A. and Hossain, S. and Nijholt, A. and Ahad, M. A. R.},journal={Front. Comput. Neurosci.},volume={16},pages={1006763},year={2023},doi={10.3389/fncom.2022.1006763},url={}}. [Crossref]
@article{167,title={{TFAC}-{N}et: {A} temporal-frequential attentional convolutional network for driver drowsiness recognition with single-channel {EEG}},author={Gong, P. and Wang, Y. and Zhou, X. and Wen, X. and Zhang, D.},journal={IEEE Trans. Intell. Transport. Syst.},volume={25},number={7},pages={7004-7016},year={2024},doi={10.1109/TITS.2023.3347075},url={}}@inproceedings{168,title={Neural networks meet neural activity: {U}tilizing {EEG} for mental workload estimation},author={Siddhad, G. and Roy, P. P. and Kim, B. G.},booktitle={International Conference on Pattern Recognition},pages={325-339},year={2025},address={Cham, Germany},doi={10.1007/978-3-031-78195-7_22},url={https://doi.org/10.1007/978-3-031-78195-7_22}}. [Crossref]
@article{169,title={{EEG}-{C}og{N}et: {A} deep learning framework for cognitive state assessment using {EEG} brain connectivity},author={Panwar, N. and Pandey, V. and Roy, P. P.},journal={Biomed. Signal Process. Control},volume={98},pages={106770},year={2024},doi={10.1016/j.bspc.2024.106770},url={}}. [Crossref]
@article{170,title={Optimized driver fatigue detection method using multimodal neural networks},author={Cao, S. L. and Feng, P. H. and Kang, W. and Chen, Z. Y. and Wang, B.},journal={Sci. Rep.},volume={15},number={1},pages={12240},year={2025},doi={10.1038/s41598-025-86709-1},url={}}. [Crossref]
@article{171,title={Deep{S}leep{N}et: {A} Model for Automatic Sleep Stage Scoring Based on Raw Single-Channel {EEG}},author={Supratak, A. and Dong, H. and Wu, C. and Guo, Y.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},volume={25},number={11},pages={1998-2008},year={2017},doi={10.1109/TNSRE.2017.2721116},url={}}. [Crossref]
@article{172,title={Sleep{T}ransformer: {A}utomatic sleep staging with interpretability and uncertainty quantification},author={Phan, H. and Mikkelsen, K. and Chen, O. Y. and Koch, A. M. and Mertins, P. and De Vos, M.},journal={IEEE Trans. Biomed. Eng.},volume={69},number={8},pages={2456-2467},year={2022},doi={10.1109/TBME.2022.3147187},url={}}. [Crossref]
@article{173,title={Flex{S}leep{T}ransformer: {A} transformer-based sleep staging model with flexible input channel configurations},author={Guo, Y. and Nowakowski, M. and Dai, W.},journal={Sci. Rep.},volume={14},number={1},pages={26312},year={2024},doi={10.1038/s41598-024-76197-0},url={}}. [Crossref]
@article{174,title={Care{S}leep{N}et: {A} hybrid deep learning network for automatic sleep staging},author={Wang, J. Q. and Zhao, S. and Jiang, H. T. and Zhou, Y. X. and Yu, Z. H. and Li, T. and Li, S. J. and Pan, G.},journal={IEEE J. Biomed. Health Inform.},volume={28},number={12},pages={7392-7405},year={2024},doi={10.1109/JBHI.2024.3426939},url={}}. [Crossref]
@article{175,title={An improved feature extraction algorithms of {EEG} signals based on motor imagery brain-computer interface},author={Geng, X. Z. and Li, D. Z. and Chen, H. L. and Yu, P. and Yan, H. and Yue, M. Z.},journal={Alexandria Eng. J.},volume={61},number={6},pages={4807-4820},year={2022},doi={10.1016/j.aej.2021.10.034},url={}}. [Crossref]
@article{176,title={The {BCI} competition {III}: {V}alidating alternative approaches to actual {BCI} problems},author={Blankertz, B. and Muller, K. R. and Krusienski, D. J. and Schalk, G. and Wolpaw, J. R. and Schlogl, A. and Pfurtscheller, G. and Millan, Jd.R. and Schroder, M. and Birbaumer, N.},journal={IEEE Trans. Neural Syst. Rehabil. Eng.},volume={14},number={2},pages={153-159},year={2006},doi={10.1109/TNSRE.2006.875642},url={}}. [Crossref]
@article{177,title={The non-invasive Berlin Brain–Computer Interface: {F}ast acquisition of effective performance in untrained subjects},author={Blankertz, B. and Dornhege, G. and Krauledat, M. and Muller, K.-R. and Curio, G.},journal={NeuroImage},volume={37},number={2},pages={539-550},year={2007},doi={10.1016/j.neuroimage.2007.01.051},url={}}@misc{178,title={{BCI} {C}ompetition 2008–{G}raz data set {A}},author={Brunner, C. and Leeb, R. and Muller-Putz, G. and Schlögl, A. and Pfurtscheller, G.},howpublished={Institute for knowledge discovery (laboratory of brain-computer interfaces), Graz University of Technology},volume={16},number={1-6},pages={34},year={2008},url={https://lampz.tugraz.at/~bci/database/001-2014/description.pdf}}@misc{179,title={{BCI} {C}ompetition 2008–{G}raz data set {B}},author={Leeb, R. and Brunner, C. and Muller-Putz, G. and Schlögl, A. and Pfurtscheller, G.},howpublished={Graz University of Technology, Austria},volume={16},number={1-6},year={2008},url={https://lampx.tugraz.at/~bci/database/004-2014/description.pdf}}@misc{180,title={Prediction of finger flexion: 4th brain-computer interface data competition},author={Miller, K. J. and Schalk, G.},howpublished={BCI Competition IV},volume={1},pages={1-2},year={2008}}. [Crossref]
@article{181,title={{BNCI} {H}orizon 2020: {T}owards a roadmap for the {BCI} community},author={Brunner, C. and Birbaumer, N. and Blankertz, B. and Guger, C. and Kübler, A. and Mattia, D. and Millán, Jd. R. and Miralles, F. and Nijholt, A. and Opisso, E. and Ramsey, N. and Salomon, P. and Müller-Putz, G. R.},journal={Brain-Comput. Interfaces},volume={2},number={1},pages={1-10},year={2015},doi={10.1080/2326263X.2015.1008956},url={}}. [Crossref]
@article{182,title={The {T}emple {U}niversity {H}ospital {EEG} data corpus},author={Obeid, I. and Picone, J.},journal={Front. Neurosci.},volume={10},pages={196},year={2016},doi={10.3389/fnins.2016.00196},url={}}. [Crossref]
@article{183,title={The {T}emple {U}niversity {H}ospital seizure detection corpus},author={Shah, V. and von Weltin, E. and Lopez, S. and McHugh, J. R. and Veloso, L. and Golmohammadi, M. and Obeid, L. and Picone, J.},journal={Front. Neuroinform.},volume={12},pages={83},year={2018},doi={10.3389/fninf.2018.00083},url={}}. [Crossref]
@article{184,title={Indications of nonlinear deterministic and finite-dimensional structures in time series of brain electrical activity: {D}ependence on recording region and brain state},author={Andrzejak, R. G. and Lehnertz, K. and Mormann, F. and Rieke, C. and David, P. and Elger, C. E.},journal={Phys. Rev. E},volume={64},number={6},pages={061907},year={2001},doi={10.1103/PhysRevE.64.061907},url={}}@inproceedings{185,title={Differential entropy feature for {EEG}-based emotion classification},author={Duan, R.-N. and Zhu, J.-Y. and Lu, B.-L.},booktitle={2013 6th International IEEE/EMBS Conference on Neural Engineering (NER)},pages={81-84},year={2013},doi={10.1109/NER.2013.6695876},url={https://doi.org/10.1109/NER.2013.6695876}}. [Crossref]
@article{186,title={{DREAMER}: {A} Database for Emotion Recognition Through {EEG} and {ECG} Signals From Wireless Low-cost Off-the-Shelf Devices},author={Katsigiannis, S. and Ramzan, N.},journal={IEEE J. Biomed. Health Inform.},volume={22},number={1},pages={98-107},year={2018},doi={10.1109/JBHI.2017.2688239},url={}}. [Crossref]
@article{187,title={{BCI2000}: {A} General-Purpose Brain-Computer Interface {(BCI)} System},author={Schalk, G. and McFarland, D. J. and Hinterberger, T. and Birbaumer, N. and Wolpaw, J. R.},journal={IEEE Trans. Biomed. Eng.},volume={51},number={6},pages={1034-1043},year={2004},doi={10.1109/TBME.2004.827072},url={}}. [Crossref]
@article{188,title={{ERP CORE}: {A}n open resource for human event-related potential research},author={Kappenman, E. S. and Farrens, J. L. and Zhang, W. and Stewart, A. X. and Luck, S. J.},journal={NeuroImage},volume={225},pages={117465},year={2021},doi={10.1016/j.neuroimage.2020.117465},url={}}. [Crossref]
@article{189,title={The {O}pen{N}euro resource for sharing of neuroscience data},author={Markiewicz, C. J. and Gorgolewski, K. J. and Feingold, F. and Blair, R. and Halchenko, Y. O. and Miller, E. and Hardcastle, N. and Wexler, J. and Esteban, O. and Goncavles, M. and Jwa, A. and Poldrack, R.},journal={eLife},volume={10},pages={e71774},year={2021},doi={10.7554/eLife.71774},url={}}. [Crossref]
@article{190,title={{EEG-BIDS}, an extension to the brain imaging data structure for electroencephalography},author={Pernet, C. R. and Appelhoff, S. and Gorgolewski, K. J. and Flandin, G. and Phillips, C. and Delorme, A. and Oostenveld, R.},journal={Sci. Data},volume={6},number={1},pages={103},year={2019},doi={10.1038/s41597-019-0104-8},url={}}. [Crossref]
@article{191,title={NEMAR: {A}n open access data, tools, and compute resource operating on {N}euro{E}lectro{M}agnetic data},author={Delorme, A. and Truong, D. and Youn, C. and Sivagnanam, S. and Stirm, C. and Yoshimoto, K. and Poldrack, R. A. and Majumdar, A. and Makeig, S.},journal={Database},volume={2022},pages={baac096},year={2022},doi={10.1093/database/baac096},url={}}. [Crossref]
@article{192,title={{HBN-EEG}: The {FAIR} implementation of the {H}ealthy {B}rain {N}etwork ({HBN}) electroencephalography dataset},author={Shirazi, S. Y. and Franco, A. and Scopel Hoffmann, M. and Esper, N. B. and Truong, D. and Delorme, A. and Milham, M. P. and Makeig, S.},journal={bioRxiv},year={2024},doi={10.1101/2024.10.03.615261},url={}}. [Crossref]
@article{193,title={SeizeI{T}2: {W}earable dataset of patients with focal epilepsy},author={Bhagubai, M. and Chatzichristos, C. and Swinnen, L. and Macea, J. and Zhang, J. and Lagae, L. and Jansen, K. and Schulze-Bonhage, A. and Sales, F. and Mahler, B. and Weber, Y. and Paesschen, W. V. and De Vos, M.},journal={Sci. Data},volume={12},number={1},pages={1228},year={2025},doi={10.1038/s41597-025-05580-x},url={}}. [Crossref]
@article{194,title={Open access dataset integrating {EEG} and {fNIRS} during Stroop tasks},author={Chen, Z. M. and Gao, C. Y.. and Li, T. and Ji, X. and Liu, S. Y. and Xiao, M.},journal={Sci. Data},volume={10},number={1},pages={618},year={2023},doi={10.1038/s41597-023-02524-1},url={}}. [Crossref]
@article{195,title={A simultaneous {EEG}-{fNIRS} dataset of the visual cognitive motivation study in healthy adults},author={Phukhachee, T. and Angsuwatanakul, T. and Iramina, K. and Kaewkamnerdpong, B.},journal={Data Brief},volume={53},pages={110260},year={2024},doi={10.1016/j.dib.2024.110260},url={}}. [Crossref]
@article{196,title={A large-scale {MEG} and {EEG} dataset for object recognition in naturalistic scenes},author={Zhang, G. H. and Zhou, M. and Zhen, S. Y. and Tang, S. H. and Li, Z. and Zhen, Z. L.},journal={Sci. Data},volume={12},number={1},pages={857},year={2025},doi={10.1038/s41597-025-05174-7},url={}}. [Crossref]
@article{197,title={Removing electroencephalographic artifacts by blind source separation},author={Jung, T. P. and Makeig, S. and Humphries, C. and Lee, T. W. and McKeown, M. J. and Iragui, V. and Sejnowski, T. J.},journal={Psychophysiology},volume={37},number={2},pages={163-178},year={2000},doi={10.1111/1469-8986.3720163},url={}}. [Crossref]
@article{198,title={Learning a robust unified domain adaptation framework for cross-subject {EEG}-based emotion recognition},author={Jiménez-Guarneros, M. and Fuentes-Pineda, G.},journal={Biomed. Signal Process. Control},volume={86},pages={105138},year={2023},doi={10.1016/j.bspc.2023.105138},url={}}. [Crossref]
@article{199,title={Quad{TP}at: {Q}uadruple Transition Pattern-based explainable feature engineering model for stress detection using {EEG} signals},author={Cambay, V. Y. and Tasci, I. and Tasci, G. and Hajiyeva, R. and Dogan, S. and Tuncer, T.},journal={Sci. Rep.},volume={14},number={1},pages={27320},year={2024},doi={10.1038/s41598-024-78222-8},url={}}. [Crossref]
@article{200,title={Discrepancy between inter- and intra-subject variability in {EEG}-based motor imagery brain-computer interface: Evidence from multiple perspectives},author={Huang, G. and Zhao, Z. H. and Zhang, S. R. and Hu, Z. X. and Fan, J. M. and Fu, M. S. and Chen, J. L. and Xiao, Y. Q. and Wang, J. and Dan, G.},journal={Front. Neurosci.},volume={17},pages={1122661},year={2023},doi={10.3389/fnins.2023.1122661},url={}}. [Crossref]
@article{201,title={{META-EEG}: {M}eta-learning-based class-relevant {EEG} representation learning for zero-calibration brain–computer interfaces},author={Han, J. W. and Bak, S. and Kim, J. M. and Choi, W. and Shin, D. H. and Son, Y. H. and Kam, T. E.},journal={Expert Syst. Appl.},volume={238},pages={121986},year={2024},doi={10.1016/j.eswa.2023.121986},url={}}. [Crossref]
@article{202,title={On the effects of data normalization for domain adaptation on {EEG} data},author={Apicella, A. and Isgrò, F. and Pollastro, A. and Prevete, R.},journal={Eng. Appl. Artif. Intell.},volume={123},pages={106205},year={2023},doi={10.1016/j.engappai.2023.106205},url={}}. [Crossref]
@article{203,title={{SMOTE}: {S}ynthetic Minority Over-sampling Technique},author={Chawla, N. V. and Bowyer, K. W. and Hall, L. O. and Kegelmeyer, W. P.},journal={J. Artif. Intell. Res.},volume={16},pages={321-357},year={2002},doi={10.1613/jair.953},url={}}@inproceedings{204,title={{IDA-GAN}: {A} novel imbalanced data augmentation {GAN}},author={Yang, H. and Zhou, Y.},booktitle={2020 25th International Conference on Pattern Recognition (ICPR)},pages={8299-8305},year={2021},address={Milan, Italy},doi={10.1109/ICPR48806.2021.9411996},url={https://doi.org/10.1109/ICPR48806.2021.9411996}}. [Crossref]
@article{205,title={Generative {AI} with {WGAN-GP} for boosting seizure detection accuracy},author={Abou-Abbas, L. and Henni, K. and Jemal, I. and Mezghani, N.},journal={Front. Artif. Intell.},volume={7},pages={1437315},year={2024},doi={10.3389/frai.2024.1437315},url={}}. [Crossref]
@article{206,title={Explainable {AI}: {A} review of applications to neuroimaging data},author={Farahani, F. V. and Fiok, K. and Lahijanian, B. and Karwowski, W. and Douglas, P. K.},journal={Front. Neurosci.},volume={16},pages={906290},year={2022},doi={10.3389/fnins.2022.906290},url={}}. [Crossref]
@article{207,title={Interpretable and robust {AI} in {EEG} systems: A survey},author={Zhou, X. L. and Liu, C. Y. and Zhou, J. N. and Wang, Z. R. and Zhai, L. M. and Jia, Z. Y. and Guan, C. T. and Liu, Y.},journal={arXiv},year={2025},doi={10.48550/arXiv.2304.10755},url={}}. [Crossref]
@article{208,title={Perturbing {BEAMs}: {EEG} adversarial attack to deep learning models for epilepsy diagnosing},author={Yu, J. and Qiu, K. and Wang, P. and Su, C. and Fan, Y. and Cao, Y.},journal={BMC Med. Inform. Decis. Mak.},volume={23},number={1},pages={115},year={2023},doi={10.1186/s12911-023-02212-5},url={}}. [Crossref]
@article{209,title={An Efficient Model-Compressed {EEGNet} Accelerator for Generalized Brain-Computer Interfaces With Near Sensor Intelligence},author={Feng, L. C. and Shan, H. W. and Zhang, Y. Q. and Zhu, Z. M.},journal={IEEE Trans. Biomed. Circuits Syst.},volume={16},number={6},pages={1239-1249},year={2022},doi={10.1109/TBCAS.2022.3215962},url={}}@inproceedings{210,title={On-device learning of {EEGN}et-based network for wearable motor imagery brain-computer interface},author={Bian, S. and Kang, P. and Moosmann, J. and Liu, M. and Bonazzi, P. and Rosipal, R. and Magno, M.},booktitle={Proceedings of the 2024 ACM International Symposium on Wearable Computers},pages={9-16},year={2024},address={Melbourne, Australia},doi={10.1145/3675095.3676607},url={https://doi.org/10.1145/3675095.3676607}}. [Crossref]
@article{211,title={Reproducible machine learning research in mental workload classification using {EEG}},author={Demirezen, G. and Taşkaya Temizel, T. and Brouwer, A.-M.},journal={Front. Neuroerg.},volume={5},pages={1346794},year={2024},doi={10.3389/fnrgo.2024.1346794},url={}}@inproceedings{212,title={Achieving Reproducibility in {EEG}-Based Machine Learning},author={Kinahan, S. and Saidi, P. and Daliri, A. and Liss, J. and Berisha, V.},booktitle={Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency},pages={1464-1474},year={2024},address={Rio de Janeiro, Brazil},doi={10.1145/3630106.3658983},url={https://doi.org/10.1145/3630106.3658983}}. [Crossref]
@article{213,title={A systematic survey on the application of federated learning in mental state detection and human activity recognition},author={Grataloup, A. and Kurpicz-Briki, M.},journal={Front. Digit. Health},volume={6},pages={1495999},year={2024},doi={10.3389/fdgth.2024.1495999},url={}}. [Crossref]
@article{214,title={{AFLEMP}: {A}ttention-based federated learning for emotion recognition using multi-modal physiological data},author={Gahlan, N. and Sethia, D.},journal={Biomed. Signal Process. Control},volume={94},pages={106353},year={2024},doi={10.1016/j.bspc.2024.106353},url={}}@inproceedings{215,title={{EEG}-based emotion recognition with prototype-based data representation},author={Wang, Y. X. and Qiu, S. and Zhao, C. and Yang, W. J. and Li, J. P. and Ma, X. L. and He, H. G.},booktitle={2019 41st annual international conference of the IEEE engineering in medicine and biology society (EMBC)},pages={684-689},year={2019},organization={IEEE},address={Berlin, Germany},doi={10.1109/EMBC.2019.8857340},url={https://doi.org/10.1109/EMBC.2019.8857340}}. [Crossref]
@article{216,title={Multi-layer prototype learning with Dirichlet mixup for open-set {EEG} recognition},author={Han, D.-K. and Lee, M. and Lee, S.-W.},journal={Expert Syst. Appl.},volume={266},pages={126047},year={2025},doi={10.1016/j.eswa.2024.126047},url={}}. [Crossref]
@article{217,title={Proto{EEGN}et: {A}n interpretable approach for detecting interictal epileptiform discharges},author={Tang, D. and Willard, F. and Tegerdine, R. and Triplett, L. and Donnelly, J. and Moffett, L. and Semenova, L. and Barnett, A. J. and Jing, J. and Rudin, C. and Westover, B.},journal={arXiv},year={2023},doi={10.48550/arXiv.2312.10056},url={}}. [Crossref]
@article{218,title={Exploration of an intrinsically explainable self-attention based model for prototype generation on single-channel {EEG} sleep stage classification},author={Adey, B. and Habib, A. and Karmakar, C.},journal={Sci. Rep.},volume={14},number={1},pages={27612},year={2024},doi={10.1038/s41598-024-79139-y},url={}}. [Crossref]
@article{219,title={An empirical comparison of deep learning explainability approaches for {EEG} using simulated ground truth},author={Sujatha Ravindran, A. and Contreras-Vidal, J.},journal={Sci. Rep.},volume={13},number={1},pages={17709},year={2023},doi={10.1038/s41598-023-43871-8},url={}}. [Crossref]
@article{220,title={Towards best practice of interpreting deep learning models for {EEG}-based brain computer interfaces},author={Cui, J. and Yuan, L. Q. and Wang, Z. X. and Li, R. L. and Jiang, T. Z.},journal={Front. Comput. Neurosci.},volume={17},pages={1232925},year={2023},doi={10.3389/fncom.2023.1232925},url={}}. [Crossref]
@article{221,title={Strategic integration: {A} cross-disciplinary review of the f{NIRS}-{EEG} dual-modality imaging system for delivering multimodal neuroimaging to applications},author={Chen, J. F. and Chen, K. W. and Yu, Y. F. and Bi, X. and Ji, X. and Zhang, D. W.},journal={Brain Sci.},volume={14},number={10},pages={1022},year={2024},doi={10.3390/brainsci14101022},url={}}. [Crossref]
@article{222,title={Adaptive extreme edge computing for wearable devices},author={Covi, E. and Donati, E. and Liang, X. P. and Kappel, D. and Heidari, H. and Payvand, M. and Wang, W.},journal={Front. Neurosci.},volume={15},pages={611300},year={2021},doi={10.3389/fnins.2021.611300},url={}}. [Crossref]
@article{223,title={Wearable {EEG} electronics for a brain–{AI} closed-loop system to enhance autonomous machine decision-making},author={Shin, J. H. and Kwon, J. and Kim, J. U. and Ryu, H. and Ok, J. and Kwon, S. J. and Park, H. and Kim, T. I.},journal={npj Flex. Electron.},volume={6},number={1},pages={32},year={2022},doi={10.1038/s41528-022-00164-w},url={}}. [Crossref]
@article{224,title={Mother of all {BCI} Benchmarks},author={Aristimunha, B. and Carrara, I. and Guetschel, P. and Sedlar, S. and Rodrigues, P. and Sosulski, J. and Narayanan, D. and Bjareholt, E. and Quentin, B. and Schirrmeister, R. T. and Kalunga, E. and Darmet, L. and Gregoire, C. and Hussain, A. and Gatti, R. and Goncharenko, V. and Thielen, J. and Moreau, T. and Roy, Y. and Jayaram, V. and Barachant, A. and Chevallier, S.},journal={Zenodo},year={2023},doi={10.5281/ZENODO.10034224},url={}}. [Crossref]
Appendix

Table A. Glossary of key interdisciplinary terms

Term

EEG / Neuroscience Meaning

ML / AI Meaning

Clinical Relevance

Artifact

Non-neural contamination (eye blinks, EMG, and line noise)

Input noise / perturbations that mislead models

Risk of false alarms (e.g., mistaking a blink for an epileptic spike)

Nonstationarity

EEG statistics change over time, sessions, or subjects

Distribution drift/domain shift

Reduces cross-subject generalization ; requires recalibration

Generalization

Crosssubject / session applicability of EEG models

The model’s ability to perform on unseen data

Ensures deployment reliability across diverse populations

Sensitivity

True positive rate (detecting real seizures)

Equivalent to recall

High sensitivity avoids missed diagnoses

Specificity

True negative rate (rejecting nonseizure activity)

Related to true negative rate

High specificity avoids false alarms

Interpretability

Physiological plausibility (are features brainrelevant?)

Understanding model reasoning (saliency, SHAP, etc.)

Improves trust from clinicians and neuroscientists

XAI

EEG-specific methods: electrode-localized maps and frequency-aware attribution

General framework for making black-box models transparent

Confirms reliance on valid biomarkers (e.g., P300 and alpha rhythms)

Domain adaptation

Adjusting for subject, session, or device variability

Adapting models across datasets with shifted distributions

Reduces costly perpatient calibration

Transfer learning

Using pretrained EEG models (e.g., TUH $\rightarrow$ CHBMIT fine-tuning)

Reuse of knowledge from large source datasets to small target datasets

Critical for rare-disease or lowsample EEG datasets

Class imbalance

Few seizure vs. many non-seizure samples

Uneven distribution across categories

Requires oversampling, GAN augmentation, or anomaly detection

Robustness

Stability under noise, electrode shifts, and artifacts

Model performance under perturbations or adversarial noise

Essential for ICU / BCI reliability

Overfitting

EEG models memorizing subject quirks rather than true markers

Poor generalization due to memorizing training data

Leads to failed clinical deployment

Multimodal fusion

EEG + fNIRS, ECG, and eyetracking integration

Combining multiple data modalities

Improves affective computing, workload monitoring, and diagnostics

FL

Hospitals train locally and share only model updates

Distributed ML without raw data sharing

Preserves privacy while enabling large-scale EEG model training


Cite this:
APA Style
IEEE Style
BibTex Style
MLA Style
Chicago Style
GB-T-7714-2015
Mutlu, A., Doğan, Ş., & Tuncer, T. (2025). Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications. Acadlore Trans. Mach. Learn., 4(3), 157-189. https://doi.org/10.56578/ataiml040304
A. Mutlu, Ş. DOĞAN, and T. Tuncer, "Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications," Acadlore Trans. Mach. Learn., vol. 4, no. 3, pp. 157-189, 2025. https://doi.org/10.56578/ataiml040304
@research-article{Mutlu2025ArtificialII,
title={Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications},
author={Abdulvahap Mutlu and şEngüL DoğAn and TüRker Tuncer},
journal={Acadlore Transactions on AI and Machine Learning},
year={2025},
page={157-189},
doi={https://doi.org/10.56578/ataiml040304}
}
Abdulvahap Mutlu, et al. "Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications." Acadlore Transactions on AI and Machine Learning, v 4, pp 157-189. doi: https://doi.org/10.56578/ataiml040304
Abdulvahap Mutlu, şEngüL DoğAn and TüRker Tuncer. "Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications." Acadlore Transactions on AI and Machine Learning, 4, (2025): 157-189. doi: https://doi.org/10.56578/ataiml040304
MUTLU A, DOĞAN Ş, TÜRKER T. Artificial intelligence in electroencephalography: A comprehensive survey of methods, challenges, and applications[J]. Acadlore Transactions on AI and Machine Learning, 2025, 4(3): 157-189. https://doi.org/10.56578/ataiml040304
cc
©2025 by the author(s). Published by Acadlore Publishing Services Limited, Hong Kong. This article is available for free download and can be reused and cited, provided that the original published version is credited, under the CC BY 4.0 license.