Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{175184,
author = {Deep Patil and VIKRAM CHAUDHARI and SAMI SHAIKH and SHIVAM PADWAL and ANURAG SHEKHAR},
title = {SPEECH EMOTION RECOGNITION},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {11},
number = {11},
pages = {2103-2108},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=175184},
abstract = {This project aims to develop a comprehensive Face and Speech Emotion Recognition system that leverages deep learning techniques to analyze and classify human emotions through facial expressions and speech signals. The facial emotion recognition module uses convolution neural networks (CNNs) to detect and interpret facial features, enabling the system to identify a range of emotions in real-time. The speech emotion recognition module applies recurrent neural networks (RNNs) or long short-term memory (LSTM) networks to analyze vocal patterns and capture emotion-related features, such as pitch, tone, and intensity. By integrating these two modalities, the system ensures more accurate emotion detection by cross-referencing visual and auditory cues. This multimodal approach enables the system to perform effectively across diverse environments and scenarios.
The system is designed to be applicable in various domains, such as healthcare for mental health monitoring, customer support for sentiment analysis, education for personalized learning, and human-computer interaction for adaptive interfaces. Special attention is given to data preprocessing and feature extraction to optimize model performance and reduce noise from input data. The system also addresses critical concerns related to privacy and bias by ensuring secure data handling and fairness in emotion classification. Overall, this project contributes to advancing emotion-aware artificial intelligence systems, fostering more empathetic and intuitive interactions between humans and machines in real-world applications.},
keywords = {Speech Emotion Recognition (SER), Machine Learning, Deep Learning, Emotional Speech Classification},
month = {April},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry