Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{182424,
author = {Sarvesh Kamble and Khushi Pardeshi and Vaidehi Pate and Karishma Shah and Priyanka Deshpande},
title = {Multimodal Emotion Recognition using Transformers and Cross-modal attention},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {12},
number = {2},
pages = {1910-1915},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=182424},
abstract = {Technologies under affective computing require multimodal emotion recognition through audio-visual information on system interfaces to detect emotions. Transformer and Cross-Modal Attention present a new architecture designed for Multimodal Emotion Recognition which facilitates semantic pattern connecting and temporal pattern retrieval between face and voice signals. The system employs RAVDESS and FER+ datasets for training and evaluation purposes to evaluate emotional states in various conditions. The system achieves long-term dependencies in each individual information stream using transformer encoders while connecting important features between audio and visual sections through cross-modal attention. Emotion classification methods require multiple set of modal representation data to be merged into unified representation packages by using fusion algorithms. Multimodal learning with attention-based emotion detection achieves superior performance than single-mode benchmarks according to the design structure. The system overcome temporal mismatches together with inconsistent attributes between different modalities using attention- guided refinement in joint optimization procedures. The real-time recognition system provides practical solutions through systematic methods that prove useful for human-machine interaction control as well as healthcare surveillance and health monitoring.},
keywords = {Multimodal Feature Extraction, Emotion Classification, Temporal Emotion Dynamics, Attention Mechanism, Speech Emotion Recognition, Facial Expression Analysis, Cross- Modal Transformer, Audio-Visual Synchronization, Deep Neural Networks, Auto-Encoders, Emotion Intensity Prediction, Valence- Arousal Modeling, Real-Time Emotion Detection, Human- Computer Interaction.},
month = {July},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry