Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{193737,
author = {E D Pavan Kumar and Sobilla Yuvasri and Paruchuri Vennela and Nindra Sowmya and Turaka Iswarya},
title = {EMOTION IDENTIFICATION FROM SPEECH SIGNALS USING DEEP LEARNING TECHNIQUES},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {10},
pages = {2275-2283},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=193737},
abstract = {Emotion Identification in Speech This is a research field of special consideration in human-computer interaction, artificial intelligence, and affective computing. The Emotion Identification from Speech Signals project has a goal to design an effective and trustworthy deep learning-based system that could help identify human emotions on the basis of speech inputs automatically. The system processes audio cues and derives significant acoustic signal controls like Mel-Frequency Cepstral Coefficients (MFCC), Chroma features, Pitch, Energy, lickering and Zero-Crossing Rate that are spectral and temporal attributes of speech. Preprocessing methods such as noise reduction, normalization, resampling, and silence cutting are used to improve the quality of data and improve the work of the model. To classify the emotions depending on Happy, Sad, Angry, Fear, and Neutral, a hybrid deep learning model incorporating both the Convolutional Neural Networks (CNN) and Long Short-Term Memory (LSTM) models is used. The CNN part process deep spatial elements of spectrogram representations, whereas the LSTM is used to respond to time-based relationships in speech sequences. In the model, it is trained on a labelled emotional speech dataset and evaluated on accuracy, precision, recall, F1-score and confusion matrix. This system may be utilized in different fields such as virtual assistant, surveillance of call centers, mental health, assistive technology, and human-robot interaction offering an effective, efficient, and feasible solution to emotion-sensitive intelligent systems},
keywords = {Emotion Identification, Deep Learning, Convolutional Neural Networks (CNN), Long Short-Term Memory (LSTM), MFCC, Audio Signal Processing, Emotion Classification, Affective Computing},
month = {March},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry