Copyright © 2025 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{184244, author = {Harshitha B A and Yashaswini Y}, title = {Facial Emotion Based Music Player Using Haar-Cascade}, journal = {International Journal of Innovative Research in Technology}, year = {2025}, volume = {12}, number = {4}, pages = {840-850}, issn = {2349-6002}, url = {https://ijirt.org/article?manuscript=184244}, abstract = {The integration of computer vision and affective computing has opened new avenues for creating intelligent human-computer interaction systems that respond to human emotions in real-time. This paper presents EmotiTune, a novel facial emotion-based music player system that utilizes Haar cascade classifiers for real-time emotion detection and automatic music recommendation. Traditional music players rely on manual selection and static playlists, failing to adapt to users' dynamic emotional states. Our proposed system addresses this limitation by implementing a comprehensive framework that combines computer vision techniques for facial emotion recognition with intelligent music recommendation algorithms. The system employs Haar cascade classifiers trained on facial features to detect and classify seven primary emotions (happiness, sadness, anger, surprise, fear, disgust, and neutral) in real-time video streams. Based on the detected emotional state, the system automatically selects and plays music from curated playlists designed to either complement or enhance the user's current mood. Experimental validation conducted on a dataset of 500 participants across diverse demographic groups demonstrates that EmotiTune achieves 92.8% accuracy in emotion detection and 89.3% user satisfaction in music recommendation relevance. The system maintains real-time performance with an average processing latency of 150ms and demonstrates robust performance under varying lighting conditions and facial orientations. Performance analysis reveals superior user engagement metrics compared to traditional music players, with users reporting 78% higher satisfaction rates and 65% longer listening sessions when using the emotion-adaptive system.}, keywords = {Facial Emotion Recognition, Haar Cascade, Music Recommendation, Human-Computer Interaction, Computer Vision, Affective Computing, Real-time Processing}, month = {September}, }
Cite This Article
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry