Copyright © 2025 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{165068, author = {Ayush Singh and Khushi Polshettiwar and Vedika Thakor and Aditya Tibile and Manjushri Raut }, title = {Deep Learning for Sign Language: Gestures to Text}, journal = {International Journal of Innovative Research in Technology}, year = {}, volume = {11}, number = {1}, pages = {47-51}, issn = {2349-6002}, url = {https://ijirt.org/article?manuscript=165068}, abstract = {This project aims to address communication barriers faced by the deaf and mute community by leveraging advanced technology to facilitate real-time recognition and interpretation of sign language gestures. It utilizes deep learning techniques, particularly the YOLO object detection model, known for its speed and accuracy in identifying objects within images and videos. Data collection involves compiling a comprehensive dataset of annotated videos capturing a wide range of sign language gestures, which are then used to train the YOLO model. The project also utilizes the OpenCV library for preprocessing and postprocessing tasks, such as resizing frames and adding text annotations to enhance gesture recognition accuracy. Performance is evaluated using metrics like precision, recall, and F1 score on a validation dataset, ensuring high accuracy and reliability in real-world scenarios}, keywords = {communication, deaf and mute community, deep learning, inclusivity, OpenCV, real-time recognition, sign language, YOLO}, month = {}, }
Cite This Article
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry