Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{176346,
author = {Vuyyuru Gopi chand and Govindavarma and prakash},
title = {ENHANCING INFANT CRY ANALYSIS USING AUDIO-VISUAL MACHINE LEARNING MODELS},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {11},
number = {11},
pages = {7514-7519},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=176346},
abstract = {Infants cry to share their needs, but understanding those cries can be a real challenge for caregivers and doctors. In this work, we’ve crafted a new approach that listens to infant cries and watches their movements together, blending audio and video clues to pinpoint what’s going on. Using deep learning, we pulled out critical details from both, then combined multiple computer models to classify cries—like hunger or tiredness— with stunning precision. Our system hit an accuracy of 99.74 percent in tests, far outpacing methods that use just sound or visuals alone. This fusion of two streams makes spotting infant behavior patterns quick and reliable, offering a game-changer for care. It’s built on real data we gathered and tested, paving the way for tools that could help parents and health experts better tune into babies’ needs. This leap forward shows how smart tech can transform infant care with near-perfect results.},
keywords = {audio, behaviour, classification, Deep learning, ensemble, feature extraction, health care, InfantCry, blending.},
month = {April},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry