@misc{10481/105969, year = {2025}, month = {4}, url = {https://hdl.handle.net/10481/105969}, abstract = {This paper introduces a novel algorithm designed for speech synthesis from neural activity recordings obtained using invasive electroencephalography (EEG) techniques. The proposed system offers a promising communication solution for individuals with severe speech impairments. Central to our approach is the integration of time-frequency features in the high-gamma band computed from EEG recordings with an advanced NeuroIncept Decoder architecture. This neural network architecture combines Convolutional Neural Networks (CNNs) and Gated Recurrent Units (GRUs) to reconstruct audio spectrograms from neural patterns. Our model demonstrates robust mean correlation coefficients between predicted and actual spectrograms, though inter-subject variability indicates distinct neural processing mechanisms among participants. Overall, our study highlights the potential of neural decoding techniques to restore communicative abilities in individuals with speech disorders and paves the way for future advancements in brain-computer interface technologies.}, organization = {This work was supported by grant PID2022-141378OB-C22 funded by MICIU/AEI/10.13039/501100011033 and by ERDF/EU}, publisher = {IEEE}, keywords = {Brain-computer interfaces}, keywords = {speech synthesis}, keywords = {deep neural networks}, keywords = {EEG}, title = {NeuroIncept Decoder for High-Fidelity Speech Reconstruction from Neural Activity}, doi = {10.1109/ICASSP49660.2025.10888547}, author = {Khanday, Owais Mujtaba and Pérez Córdoba, José Luis and Mir, Mohd Yaqub and Najar, Ashfaq Ahmad and González López, José Andrés}, }