Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{182030,
author = {Thenmozhi R and Rahulraagav M R},
title = {Accessing Visual Information For The Person With Visual Disability},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {12},
number = {2},
pages = {685-695},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=182030},
abstract = {In today’s digital age, technology plays a critical role in shaping human development and how we access knowledge. Despite its widespread influence, many technological tools remain inaccessible to people with disabilities, particularly those who are visually impaired. This gap in accessibility significantly impacts their independence, quality of life, and ability to engage with modern innovations. The Vision2C project addresses this issue by providing a tailored mobile solution aimed at empowering visually challenged individuals, especially senior citizens. Vision2C is an Android-based application designed to assist the visually impaired in performing everyday tasks by integrating two core technologies: Speech Recognition and Text-to-Speech (TTS). The application allows users to convert spoken language into written text and vice versa, facilitating seamless interaction with digital content. The speech recognition component enables users to dictate notes or commands, while the TTS module reads out written content, making it accessible through audio. This project not only bridges the digital divide for visually impaired users but also enhances their autonomy and ease of communication in daily life. Through Vision2C, we aim to promote inclusivity and provide a meaningful technological advancement for a community often overlooked in the design of mainstream applications. The developed Hybrid HMM-DNN system reduced the Word Error Rate to 8.2%, showing marked improvement in recognition accuracy over traditional models. The system demonstrated efficient real-time performance with a total processing latency of 400 ms, balancing speed and accuracy across all speech processing stages. The system maintained low power usage, consuming approximately 4.7 mAh per hour, making it suitable for energy-constrained environments. Retrieval latency scaled logarithmically with the number of notes, enabling efficient access even as data volume increased. The system delivers a 37% improvement in energy-accuracy efficiency over existing solutions while supporting offline use and real-time interaction.},
keywords = {Assistive Technology, Visual Impairment, Speech Recognition, Text-to-Speech (TTS), Android Application, Accessibility, Elderly Care, Human-Computer Interaction, Voice-to-Text, Inclusive Design.},
month = {July},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry